text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
import npc, random
from neuron import network
#CONSTANTS
POPSIZE = 100
NUM_GENERATIONS = 100
NUM_BEST_PARENTS = 5
LOCAL_OPT = 20
bestArr=[]
stuckCount=0
#init empty population list
pop = []
#for range popsize create random individual
for x in range(POPSIZE):
pop.append(network())
#initialize best to empty
best = pop[0]
#repeat
for gen in range(NUM_GENERATIONS):
#for each indiviual
for x in range(len(pop)):
pop[x].getFitness(False)
oldbest = best
pop.sort(key = lambda x: x.fitness, reverse = True)
if best == None or best.fitness < pop[0].fitness:
best = pop[0]
if(best == oldbest):
stuckCount+=1
if gen%100 == 0 and gen != 0:
print 'play'
best.play(True)
#initialize empty list to hold next generation
nextGen = []
pop = pop[0:NUM_BEST_PARENTS]
#pop = pop[0:POPSIZE/5]
#for half the population size
#for x in range (NUM_BEST_PARENTS):
if(stuckCount > LOCAL_OPT):
print "new random children"
best.play(True)
bestArr.append(best)
best = None
stuckCount=0
for x in range(POPSIZE):
nextGen.append(network())
else:
for x in range(POPSIZE/2):
"""
for y in range(POPSIZE/(NUM_BEST_PARENTS)):
nextGen.append(pop[x].mutate())
"""
#pick two parents
p1 = random.choice(pop)
p2 = random.choice(pop)
#create two children from crossover
c1 = p1.crossover(p2)
c2 = p2.crossover(p1)
#mutate the children
c1 = c1.mutate()
c2 = c2.mutate()
nextGen.append(c1)
nextGen.append(c2)
#for x in range (POPSIZE/2):
# nextGen.append(network())
#set the parentlist to the next generation list
pop = nextGen
if(best != None):
print gen, best.fitness, best.toString()
else:
print gen, "reset"
bestArr.sort(key = lambda x: x.fitness, reverse = True)
for x in range(len(bestArr)):
bestArr[x].play(True)
print gen, bestArr[x].fitness, bestArr[x].toString()
for x in range(len(bestArr)):
print gen, bestArr[x].toString()
best.play(True)
| Josh-C-Montgomery/Evolving-Boss-Battles | GA.py | Python | apache-2.0 | 2,294 | [
"NEURON"
] | 273cde59bd7010b34c6f837a11321057ea39f9adf0096e52433a9769776f7c1d |
# -*- coding: utf-8 -*-
u"""PyTest for :mod:`sirepo.template.srw.py`
:copyright: Copyright (c) 2016 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
import requests
import sirepo.crystal
def _skip():
try:
requests.get(sirepo.crystal.X0H_SERVER, timeout=2)
return False
except requests.exceptions.ConnectTimeout:
return True
except Exception as e:
raise AssertionError(
'exception={} trying to reach uri={}'.format(e, sirepo.crystal.X0H_SERVER),
)
# skips test_srw_calc_bragg_angle too, when the server is unavailable.
pytestmark = pytest.mark.skipif(_skip(), reason='Unable to reach ' + sirepo.crystal.X0H_SERVER)
def test_srw_calc_bragg_angle():
from sirepo import crystal
for case in (
((3.135531576941939, 20368, 1), (0.06087205076590731, 0.09722123437454372, 5.570366408713557)),
((3.1355, 20368, 1), (0.06087205076590731, 0.09722221656509854, 5.570422684087026)),
):
angle_data = crystal.calc_bragg_angle(
d=case[0][0],
energy_eV=case[0][1],
n=case[0][2],
)
assert angle_data['lamda'] == case[1][0]
assert angle_data['bragg_angle'] == case[1][1]
assert angle_data['bragg_angle_deg'] == case[1][2]
def test_srw_get_crystal_parameters():
return
from sirepo import crystal
expected = (
(5.4309, 3.135531576941939, 3.135531576941939, 3.1355, -2.3353e-06, 8.6843e-09, 1.2299e-06, 6.0601e-09, 5.5704),
(5.4309, 3.135531576941939, 3.135531576941939, 3.1355, -6.0335e-06, 5.7615e-08, 3.1821e-06, 4.0182e-08, 8.9561),
)
for case in (
(('Silicon', 20368, 1, 1, 1), expected[0]),
(('Silicon', 20368, '1', '1', '1'), expected[0]),
(('Silicon', '20368', 1, 1, 1), expected[0]),
(('Silicon', 12700, 1, 1, 1), expected[1]),
):
crystal_parameters = crystal.get_crystal_parameters(
material=case[0][0],
energy_eV=case[0][1],
h=case[0][2],
k=case[0][3],
l=case[0][4],
)
assert crystal_parameters['a1'] == case[1][0]
assert crystal_parameters['d'] == case[1][1]
assert crystal_parameters['d_calculated'] == case[1][2]
assert crystal_parameters['d_server'] == case[1][3]
assert crystal_parameters['xr0'] == case[1][4]
assert crystal_parameters['xi0'] == case[1][5]
assert crystal_parameters['xrh'] == case[1][6]
assert crystal_parameters['xih'] == case[1][7]
assert crystal_parameters['bragg_angle_deg'] == case[1][8]
def test_srw_get_crystal_parameters_str():
from sirepo import crystal
with pytest.raises(AssertionError):
_ = crystal.get_crystal_parameters(
material='Si',
energy_eV='20368',
h=1,
k=1,
l=1,
)
| radiasoft/sirepo | tests/crystal_test.py | Python | apache-2.0 | 3,031 | [
"CRYSTAL"
] | 166d7a08e4916c1fcce9917720ed54e6dd56f7d1dd761698c40b6f458cb2df33 |
from ovito import *
from ovito.io import *
from ovito.modifiers import *
import numpy
node = import_file("../../files/LAMMPS/bonds.data.gz", atom_style = 'bond')
node.modifiers.append(WrapPeriodicImagesModifier())
print(node.compute().bonds.pbc_vectors)
| srinath-chakravarthy/ovito | tests/scripts/test_suite/bonds_pbc_vectors.py | Python | gpl-3.0 | 255 | [
"LAMMPS",
"OVITO"
] | e9244c6c4c02b079d4d53e592b66d3a8d8da95b0333c4141f5d0b5081c1eb9c3 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Auxiliary Functions and resources to PSF0X.
Created on Tue Jan 30 16:31:00 2018
:author: Ruyman Azzollini
"""
# IMPORT STUFF
from pdb import set_trace as stop
import numpy as np
import os
from collections import OrderedDict
import string as st
import copy
from matplotlib import cm
from vison.datamodel import cdp
from vison.plot import figclasses
from vison.plot import trends
from vison.point import Paux
# END IMPORT
def get_check_offsets_dict(test):
ntest = test.replace('_', '\_')
return dict(stats=['offset_pre', 'offset_ove'],
trendaxis='time',
figname='%s_offset_vs_time.png' % (test,),
caption='%s: offset vs. time.' % (ntest,),
meta=dict(doLegend=True,
doNiceXDate=True,
suptitle='%s-checks: offsets' % ntest,
ylim=trends.offset_lims))
def get_check_deltaoff_dict(test):
ntest = test.replace('_', '\_')
return dict(
stats=[
'deltaoff_pre', 'deltaoff_ove'], trendaxis='time', figname='%s_deltaoff_vs_time.png' %
(test,), caption='%s: $\delta$offset vs. time. Offset value in each frame minus the average value.' %
(ntest,), meta=dict(
doLegend=True, doNiceXDate=True, suptitle='%s-checks: delta-offsets' %
ntest, ylim=[
-10., 10.]))
def get_check_std_dict(test):
ntest = test.replace('_', '\_')
return dict(stats=['std_pre', 'std_ove'],
trendaxis='time',
figname='%s_std_vs_time.png' % test,
caption='%s: std vs. time.' % ntest,
meta=dict(doLegend=True,
doNiceXDate=True,
suptitle='%s-checks: std' % ntest,
ylim=trends.RON_lims))
def get_check_bgd_dict(test):
ntest = test.replace('_', '\_')
return dict(stats=['bgd_img'],
trendaxis='time',
figname='%s_bgd_vs_time.png' % test,
caption='%s: Background vs. time.' % ntest,
meta=dict(doLegend=False,
doNiceXDate=True,
suptitle='%s-checks: BGD' % ntest))
def get_check_flu_dict(test):
ntest = test.replace('_', '\_')
return dict(stats=['chk_fluence'],
trendaxis='exptime',
figname='%s_flu_vs_exptime.png' % test,
caption='%s: Fluence vs. Exposure Time.' % ntest,
meta=dict(doLegend=True,
doNiceXDate=False,
suptitle='%s-checks: Fluence' % ntest,
xlabel='seconds',
ylabel='Flu.[ADU]'))
def get_check_fwhmx_dict(test):
ntest = test.replace('_', '\_')
return dict(stats=['chk_fwhmx'],
trendaxis='exptime',
figname='%s_fwhmx_vs_exptime.png' % test,
caption='%s: FWHM(x) vs. Exposure Time.' % ntest,
meta=dict(doLegend=True,
doNiceXDate=False,
suptitle='%s-checks: FWHM(x)' % ntest,
xlabel='seconds',
ylabel='FWHMx [pix]'))
def get_check_fwhmy_dict(test):
ntest = test.replace('_', '\_')
return dict(stats=['chk_fwhmy'],
trendaxis='exptime',
figname='%s_fwhmy_vs_exptime.png' % test,
caption='%s: FWHM(y) vs. Exposure Time.' % ntest,
meta=dict(doLegend=True,
doNiceXDate=False,
suptitle='%s-checks: FWHM(y)' % ntest,
xlabel='seconds',
ylabel='FWHMy [pix]'))
def get_crosstalk_dict(test, figtype):
tcaption = '%s: Cross-Talk [%s]. Green means positive cross-talk, red means negative cross-talk' +\
' (does not mean compliance/non-compliance). Pale colours mean less accurate results.'
ntest = test.replace('_', '\_')
crosstalk_dict = dict(
figname='%s_crosstalk_%s.png' % (test, figtype),
caption=tcaption % (ntest, figtype),
meta=dict(),
data=None
)
return crosstalk_dict
def get_spotsposter_dict(test, BFE=True):
""" """
if BFE:
figtype ='noBFE'
tcaption = '%s: Spots Poster, BFE not corrected. Log scale.'
else:
figtype = 'withBFE'
tcaption = '%s: Spots Poster, BFE corrected using G+15. Log scale.'
ntest = test.replace('_', '\_')
sp_dict = dict(
figname='%s_spotsposter_%s.png' % (test, figtype),
caption=tcaption % (ntest,),
meta=dict(doColorbar=True,
corekwargs=dict(cmap=cm.gray,
aspect='auto',
# norm=None,
origin='lower left')),
data=None
)
return sp_dict
def get_FWHM_v_flu_dict(test, fwhmkey):
""" """
ntest = test.replace('_', '\_')
fdict = dict(
figname='%s_%s_v_flu.png' % (test, fwhmkey),
caption='%s: Gaussian-fit %s vs. Peak Fluence.' %
(ntest,fwhmkey.upper()),
meta=dict(doLegend=True,
ylabel='%s, [pix]' % fwhmkey,
xlabel=r'$I_{0}\ [10\ kADU]$',
ylim = [0.75,2.5],
xlim = [0.,6.5],
corekwargs=dict(
noBFE=dict(marker='', linestyle='--', color='b'),
BFE=dict(marker='', linestyle='-', color='r'),
ideal=dict(marker='',linestyle=':',color='k')),
suptitle='%s: gaussian-fit %s in pixels vs. Fluence' %\
(ntest, fwhmkey))
)
return fdict
def get_skew_dict(test, vswhat, direction):
"""vswhat: 'position', 'fluence'
direction: 'x', 'y'
"""
ntest = test.replace('_', '\_')
if vswhat == 'position':
xlabel = 'CCD-%s Pos. [pix]' % direction
suptitle = '%s: gaussian-fit res. %s-skew vs. %s-%s' %\
(ntest, direction, direction, vswhat)
caption = '%s: Gaussian-fit residuals %s-skewness vs. %s-%s.' % \
(ntest, direction, direction, vswhat)
elif vswhat == 'fluence':
xlabel = 'Fluence [kADU]'
suptitle = '%s: gaussian-fit res. %s-skew vs. %s' %\
(ntest, direction, vswhat)
caption = '%s: Gaussian-fit residuals %s-skewness vs. %s.' % \
(ntest, direction, vswhat)
fdict = dict(
figname='%s_skew_%s_vs_%s.png' % (test, direction, vswhat),
caption=caption,
meta=dict(doLegend=True,
doYErrbars=True,
ylabel='%s-skewness [adim.]' % direction,
xlabel=xlabel,
#ylim = [0.75,2.5],
#xlim = [0.,6.5],
corekwargs=dict(
noBFE=dict(marker='.', linestyle='', color='b'),
BFE=dict(marker='.', linestyle='', color='r')),
suptitle=suptitle)
)
return fdict
def get_PSF0Xfigs(test):
PSF0Xfigs = dict()
PSF0Xfigs['PSF0Xchecks_offsets'] = [
trends.Fig_Basic_Checkstat, get_check_offsets_dict(test)]
PSF0Xfigs['PSF0Xchecks_deltaoff'] = [
trends.Fig_Basic_Checkstat, get_check_deltaoff_dict(test)]
PSF0Xfigs['PSF0Xchecks_stds'] = [
trends.Fig_Basic_Checkstat, get_check_std_dict(test)]
PSF0Xfigs['PSF0Xchecks_bgd'] = [
trends.Fig_Basic_Checkstat, get_check_bgd_dict(test)]
PSF0Xfigs['PSF0Xchecks_fluence'] = [
trends.Fig_Basic_Checkstat, get_check_flu_dict(test)]
PSF0Xfigs['PSF0Xchecks_fwhmx'] = [
trends.Fig_Basic_Checkstat, get_check_fwhmx_dict(test)]
PSF0Xfigs['PSF0Xchecks_fwhmy'] = [
trends.Fig_Basic_Checkstat, get_check_fwhmy_dict(test)]
PSF0Xfigs['PSF0X_crosstalk_ADU'] = [
figclasses.Fig_Husk, get_crosstalk_dict(test, 'ADU')]
PSF0Xfigs['PSF0X_crosstalk_RATIO'] = [
figclasses.Fig_Husk, get_crosstalk_dict(test, 'RATIO')]
PSF0Xfigs['SpotsPoster'] = [
figclasses.Fig_ImgShow, get_spotsposter_dict(test, BFE=False)]
PSF0Xfigs['SpotsPosterNOBFE'] = [
figclasses.Fig_ImgShow, get_spotsposter_dict(test, BFE=True)]
PSF0Xfigs['PSF0X_fwhmx_v_flu'] = [
figclasses.Fig_Beam2DPlot, get_FWHM_v_flu_dict(test, fwhmkey='fwhmx')]
PSF0Xfigs['PSF0X_fwhmy_v_flu'] = [
figclasses.Fig_Beam2DPlot, get_FWHM_v_flu_dict(test, fwhmkey='fwhmy')]
PSF0Xfigs['PSF0X_skew_dirx_vs_fluence'] = [
figclasses.Fig_Beam2DPlot, get_skew_dict(test, 'fluence', 'x')]
PSF0Xfigs['PSF0X_skew_diry_vs_fluence'] = [
figclasses.Fig_Beam2DPlot, get_skew_dict(test, 'fluence', 'y')]
PSF0Xfigs['PSF0X_skew_dirx_vs_pos'] = [
figclasses.Fig_Beam2DPlot, get_skew_dict(test, 'position', 'x')]
PSF0Xfigs['PSF0X_skew_diry_vs_pos'] = [
figclasses.Fig_Beam2DPlot, get_skew_dict(test, 'position', 'y')]
PSF0Xfigs['BlueScreen'] = [figclasses.BlueScreen, dict()]
return PSF0Xfigs
def get_PSF01_PANCHRO_figs():
PSF01_PANCHRO_figs = dict()
return PSF01_PANCHRO_figs
def get_CDP_lib(test):
""" """
CDP_lib = OrderedDict()
CDP_lib['SPOTS'] = cdp.CDP()
CDP_lib['SPOTS'].rootname = 'Spots'
CDP_lib['SPOTS_NOBFE'] = cdp.CDP()
CDP_lib['SPOTS_NOBFE'].rootname = 'Spots_nobfe'
CDP_lib['RAW_CTALK'] = cdp.CDP()
CDP_lib['RAW_CTALK'].rootname = 'Raw_crosstalk'
CDP_lib['CTALK'] = cdp.CDP()
CDP_lib['CTALK'].rootname = 'crosstalk'
CDP_lib.update(Paux.get_CDP_lib())
return CDP_lib
def _f_xy_bin(x,y,Nbins=3):
""" """
from sklearn.cluster import KMeans
xresh = x.reshape(len(x),-1)
kmeansRes = KMeans(n_clusters=Nbins, verbose=0).fit(xresh)
xbin = np.zeros(Nbins,dtype='float32')
ybin = np.zeros(Nbins,dtype='float32')
ybinsig = np.zeros(Nbins,dtype='float32')
vNbin = np.zeros(Nbins,dtype='int32')
for i in range(Nbins):
ixsel = np.where(kmeansRes.labels_ == i)
xbin[i] = np.mean(x[ixsel])
ybin[i] = np.mean(y[ixsel])
ybinsig[i] = np.std(x[ixsel])
vNbin[i] = len(ixsel[0])
ixorder = np.argsort(xbin)
xbin = xbin[ixorder]
ybin = ybin[ixorder]
ybinsig = ybinsig[ixorder]
vNbin = vNbin[ixorder]
return (xbin, ybin, ybinsig, vNbin)
| ruymanengithub/vison | vison/point/PSF0Xaux.py | Python | gpl-3.0 | 10,426 | [
"Gaussian"
] | 31dd76fd5f03168fbc69312e79dbf4e4c26b025fe5c6f2043ef3ed60c9462f75 |
# -*- coding: utf-8 -*-
# This file is part of MOOSE simulator: http://moose.ncbs.res.in.
# MOOSE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# MOOSE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with MOOSE. If not, see <http://www.gnu.org/licenses/>.
"""moose_constants.py:
Last modified: Sat Jan 18, 2014 05:01PM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, Dilawar Singh, NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
## for Ca Pool
# FARADAY = 96154.0
# Coulombs
# from cadecay.mod : 1/(2*96154.0) = 5.2e-6 which is the Book of Genesis / readcell value
FARADAY = 96485.3415 # Coulombs/mol # from Wikipedia
## Table step_mode
# table acts as lookup - default mode
TAB_IO = 0
# table outputs value until it reaches the end and then stays at the last value
TAB_ONCE = 2
# table acts as a buffer: succesive entries at each time step
TAB_BUF = 3
# table acts as a buffer for spike times. Threshold stored in the pymoose 'stepSize' field.
TAB_SPIKE = 4
## Table fill modes
BSplineFill = 0 # B-spline fill (default)
CSplineFill = 1 # C_Spline fill (not yet implemented)
LinearFill = 2 # Linear fill
## clock 0 is for init & hsolve
## The ee method uses clocks 1, 2.
## hsolve & ee use clock 3 for Ca/ion pools.
## clocks 4 and 5 are for biochemical simulations.
## clock 6 for lookup tables, clock 7 for stimuli
## clocks 8 and 9 for tables for plots.
INITCLOCK = 0
ELECCLOCK = 1
CHANCLOCK = 2
POOLCLOCK = 3
LOOKUPCLOCK = 6
STIMCLOCK = 7
PLOTCLOCK = 8
| dharmasam9/moose-core | python/moose/moose_constants.py | Python | gpl-3.0 | 2,148 | [
"MOOSE"
] | 7001b6785acac3571825b419e99bb290478449877e07bc9a44d108e74b98798d |
"""
Tools for different procedure estimations
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, \
Pedro V. Amaral pedro.amaral@asu.edu, \
David C. Folch david.folch@asu.edu, \
Daniel Arribas-Bel darribas@asu.edu,\
Levi Wolf levi.john.wolf@gmail.com"
import numpy as np
from scipy import sparse as SP
from scipy.sparse import linalg as SPla
import scipy.optimize as op
import numpy.linalg as la
from pysal import lag_spatial
from sputils import *
import copy
class RegressionPropsY(object):
"""
Helper class that adds common regression properties to any regression
class that inherits it. It takes no parameters. See BaseOLS for example
usage.
Parameters
----------
Attributes
----------
mean_y : float
Mean of the dependent variable
std_y : float
Standard deviation of the dependent variable
"""
@property
def mean_y(self):
try:
return self._cache['mean_y']
except AttributeError:
self._cache = {}
self._cache['mean_y'] = np.mean(self.y)
except KeyError:
self._cache['mean_y'] = np.mean(self.y)
return self._cache['mean_y']
@mean_y.setter
def mean_y(self, val):
try:
self._cache['mean_y'] = val
except AttributeError:
self._cache = {}
self._cache['mean_y'] = val
except KeyError:
self._cache['mean_y'] = val
@property
def std_y(self):
try:
return self._cache['std_y']
except AttributeError:
self._cache = {}
self._cache['std_y'] = np.std(self.y, ddof=1)
except KeyError:
self._cache['std_y'] = np.std(self.y, ddof=1)
return self._cache['std_y']
@std_y.setter
def std_y(self, val):
try:
self._cache['std_y'] = val
except AttributeError:
self._cache = {}
self._cache['std_y'] = val
except KeyError:
self._cache['std_y'] = val
class RegressionPropsVM(object):
"""
Helper class that adds common regression properties to any regression
class that inherits it. It takes no parameters. See BaseOLS for example
usage.
Parameters
----------
Attributes
----------
utu : float
Sum of the squared residuals
sig2n : float
Sigma squared with n in the denominator
sig2n_k : float
Sigma squared with n-k in the denominator
vm : array
Variance-covariance matrix (kxk)
"""
@property
def utu(self):
try:
return self._cache['utu']
except AttributeError:
self._cache = {}
self._cache['utu'] = np.sum(self.u ** 2)
except KeyError:
self._cache['utu'] = np.sum(self.u ** 2)
return self._cache['utu']
@utu.setter
def utu(self, val):
try:
self._cache['utu'] = val
except AttributeError:
self._cache = {}
self._cache['utu'] = val
except KeyError:
self._cache['utu'] = val
@property
def sig2n(self):
try:
return self._cache['sig2n']
except AttributeError:
self._cache = {}
self._cache['sig2n'] = self.utu / self.n
except KeyError:
self._cache['sig2n'] = self.utu / self.n
return self._cache['sig2n']
@sig2n.setter
def sig2n(self, val):
try:
self._cache['sig2n'] = val
except AttributeError:
self._cache = {}
self._cache['sig2n'] = val
except KeyError:
self._cache['sig2n'] = val
@property
def sig2n_k(self):
try:
return self._cache['sig2n_k']
except AttributeError:
self._cache = {}
self._cache['sig2n_k'] = self.utu / (self.n - self.k)
except KeyError:
self._cache['sig2n_k'] = self.utu / (self.n - self.k)
return self._cache['sig2n_k']
@sig2n_k.setter
def sig2n_k(self, val):
try:
self._cache['sig2n_k'] = val
except AttributeError:
self._cache = {}
self._cache['sig2n_k'] = val
except KeyError:
self._cache['sig2n_k'] = val
@property
def vm(self):
try:
return self._cache['vm']
except AttributeError:
self._cache = {}
self._cache['vm'] = np.dot(self.sig2, self.xtxi)
except KeyError:
self._cache['vm'] = np.dot(self.sig2, self.xtxi)
finally:
return self._cache['vm']
@vm.setter
def vm(self, val):
try:
self._cache['vm'] = val
except AttributeError:
self._cache = {}
self._cache['vm'] = val
except KeyError:
self._cache['vm'] = val
def get_A1_het(S):
"""
Builds A1 as in Arraiz et al [Arraiz2010]_
.. math::
A_1 = W' W - diag(w'_{.i} w_{.i})
...
Parameters
----------
S : csr_matrix
PySAL W object converted into Scipy sparse matrix
Returns
-------
Implicit : csr_matrix
A1 matrix in scipy sparse format
"""
StS = S.T * S
d = SP.spdiags([StS.diagonal()], [0], S.get_shape()[0], S.get_shape()[1])
d = d.asformat('csr')
return StS - d
def get_A1_hom(s, scalarKP=False):
"""
Builds A1 for the spatial error GM estimation with homoscedasticity as in
Drukker et al. [Drukker2011]_ (p. 9).
.. math::
A_1 = \{1 + [n^{-1} tr(W'W)]^2\}^{-1} \[W'W - n^{-1} tr(W'W) I\]
...
Parameters
----------
s : csr_matrix
PySAL W object converted into Scipy sparse matrix
scalarKP : boolean
Flag to include scalar corresponding to the first moment
condition as in Drukker et al. [1]_ (Defaults to False)
Returns
-------
Implicit : csr_matrix
A1 matrix in scipy sparse format
"""
n = float(s.shape[0])
wpw = s.T * s
twpw = np.sum(wpw.diagonal())
e = SP.eye(n, n, format='csr')
e.data = np.ones(int(n)) * (twpw / n)
num = wpw - e
if not scalarKP:
return num
else:
den = 1. + (twpw / n) ** 2.
return num / den
def get_A2_hom(s):
"""
Builds A2 for the spatial error GM estimation with homoscedasticity as in
Anselin (2011) [Anselin2011]_
.. math::
A_2 = \dfrac{(W + W')}{2}
...
Parameters
----------
s : csr_matrix
PySAL W object converted into Scipy sparse matrix
Returns
-------
Implicit : csr_matrix
A2 matrix in scipy sparse format
"""
return (s + s.T) / 2.
def _moments2eqs(A1, s, u):
'''
Helper to compute G and g in a system of two equations as in
the heteroskedastic error models from Drukker et al. [Drukker2011]_
...
Parameters
----------
A1 : scipy.sparse.csr
A1 matrix as in the paper, different deppending on whether
it's homocedastic or heteroskedastic model
s : W.sparse
Sparse representation of spatial weights instance
u : array
Residuals. nx1 array assumed to be aligned with w
Attributes
----------
moments : list
List of two arrays corresponding to the matrices 'G' and
'g', respectively.
'''
n = float(s.shape[0])
A1u = A1 * u
wu = s * u
g1 = np.dot(u.T, A1u)
g2 = np.dot(u.T, wu)
g = np.array([[g1][0][0], [g2][0][0]]) / n
G11 = np.dot(u.T, ((A1 + A1.T) * wu))
G12 = -np.dot((wu.T * A1), wu)
G21 = np.dot(u.T, ((s + s.T) * wu))
G22 = -np.dot(wu.T, (s * wu))
G = np.array([[G11[0][0], G12[0][0]], [G21[0][0], G22[0][0]]]) / n
return [G, g]
def optim_moments(moments_in, vcX=np.array([0])):
"""
Optimization of moments
...
Parameters
----------
moments : Moments
Instance of gmm_utils.moments_het with G and g
vcX : array
Optional. 2x2 array with the Variance-Covariance matrix to be used as
weights in the optimization (applies Cholesky
decomposition). Set empty by default.
Returns
-------
x, f, d : tuple
x -- position of the minimum
f -- value of func at the minimum
d -- dictionary of information from routine
d['warnflag'] is
0 if converged
1 if too many function evaluations
2 if stopped for another reason, given in d['task']
d['grad'] is the gradient at the minimum (should be 0 ish)
d['funcalls'] is the number of function calls made
"""
moments = copy.deepcopy(moments_in)
if vcX.any():
Ec = np.transpose(la.cholesky(la.inv(vcX)))
moments[0] = np.dot(Ec, moments_in[0])
moments[1] = np.dot(Ec, moments_in[1])
scale = np.min([[np.min(moments[0]), np.min(moments[1])]])
moments[0], moments[1] = moments[0] / scale, moments[1] / scale
if moments[0].shape[0] == 2:
optim_par = lambda par: foptim_par(
np.array([[float(par[0]), float(par[0]) ** 2.]]).T, moments)
start = [0.0]
bounds = [(-1.0, 1.0)]
if moments[0].shape[0] == 3:
optim_par = lambda par: foptim_par(
np.array([[float(par[0]), float(par[0]) ** 2., float(par[1])]]).T, moments)
start = [0.0, 0.0]
bounds = [(-1.0, 1.0), (0.0, None)]
lambdaX = op.fmin_l_bfgs_b(
optim_par, start, approx_grad=True, bounds=bounds)
return lambdaX[0][0]
def foptim_par(par, moments):
"""
Preparation of the function of moments for minimization
...
Parameters
----------
lambdapar : float
Spatial autoregressive parameter
moments : list
List of Moments with G (moments[0]) and g (moments[1])
Returns
-------
minimum : float
sum of square residuals (e) of the equation system
moments.g - moments.G * lambdapar = e
"""
vv = np.dot(moments[0], par)
vv2 = moments[1] - vv
return sum(vv2 ** 2)
def get_spFilter(w, lamb, sf):
'''
Compute the spatially filtered variables
Parameters
----------
w : weight
PySAL weights instance
lamb : double
spatial autoregressive parameter
sf : array
the variable needed to compute the filter
Returns
--------
rs : array
spatially filtered variable
Examples
--------
>>> import numpy as np
>>> import pysal
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
>>> w=pysal.open(pysal.examples.get_path("columbus.gal")).read()
>>> solu = get_spFilter(w,0.5,y)
>>> print solu[0:5]
[[ -8.9882875]
[ -20.5685065]
[ -28.196721 ]
[ -36.9051915]
[-111.1298 ]]
'''
try:
result = sf - lamb * (w.sparse * sf)
except:
result = sf - lamb * (w * sf)
return result
def get_lags(w, x, w_lags):
'''
Calculates a given order of spatial lags and all the smaller orders
Parameters
----------
w : weight
PySAL weights instance
x : array
nxk arrays with the variables to be lagged
w_lags : integer
Maximum order of spatial lag
Returns
--------
rs : array
nxk*(w_lags+1) array with original and spatially lagged variables
'''
lag = lag_spatial(w, x)
spat_lags = lag
for i in range(w_lags - 1):
lag = lag_spatial(w, lag)
spat_lags = sphstack(spat_lags, lag)
return spat_lags
def inverse_prod(w, data, scalar, post_multiply=False, inv_method="power_exp", threshold=0.0000000001, max_iterations=None):
"""
Parameters
----------
w : Pysal W object
nxn Pysal spatial weights object
data : Numpy array
nx1 vector of data
scalar : float
Scalar value (typically rho or lambda)
post_multiply : boolean
If True then post-multiplies the data vector by the
inverse of the spatial filter, if false then
pre-multiplies.
inv_method : string
If "true_inv" uses the true inverse of W (slow);
If "power_exp" uses the power expansion method (default)
threshold : float
Test value to stop the iterations. Test is against
sqrt(increment' * increment), where increment is a
vector representing the contribution from each
iteration.
max_iterations : integer
Maximum number of iterations for the expansion.
Examples
--------
>>> import numpy, pysal
>>> import numpy.linalg as la
>>> np.random.seed(10)
>>> w = pysal.lat2W(5, 5)
>>> w.transform = 'r'
>>> data = np.random.randn(w.n)
>>> data.shape = (w.n, 1)
>>> rho = 0.4
>>> inv_pow = inverse_prod(w, data, rho, inv_method="power_exp")
>>> # true matrix inverse
>>> inv_reg = inverse_prod(w, data, rho, inv_method="true_inv")
>>> np.allclose(inv_pow, inv_reg, atol=0.0001)
True
>>> # test the transpose version
>>> inv_pow = inverse_prod(w, data, rho, inv_method="power_exp", post_multiply=True)
>>> inv_reg = inverse_prod(w, data, rho, inv_method="true_inv", post_multiply=True)
>>> np.allclose(inv_pow, inv_reg, atol=0.0001)
True
"""
if inv_method == "power_exp":
inv_prod = power_expansion(
w, data, scalar, post_multiply=post_multiply,
threshold=threshold, max_iterations=max_iterations)
elif inv_method == "true_inv":
try:
matrix = la.inv(np.eye(w.n) - (scalar * w.full()[0]))
except:
matrix = la.inv(np.eye(w.shape[0]) - (scalar * w))
if post_multiply:
inv_prod = spdot(data.T, matrix)
else:
inv_prod = spdot(matrix, data)
else:
raise Exception, "Invalid method selected for inversion."
return inv_prod
def power_expansion(w, data, scalar, post_multiply=False, threshold=0.0000000001, max_iterations=None):
"""
Compute the inverse of a matrix using the power expansion (Leontief
expansion). General form is:
.. math::
x &= (I - \rho W)^{-1}v = [I + \rho W + \rho^2 WW + \dots]v \\
&= v + \rho Wv + \rho^2 WWv + \dots
Examples
--------
Tests for this function are in inverse_prod()
"""
try:
ws = w.sparse
except:
ws = w
if post_multiply:
data = data.T
running_total = copy.copy(data)
increment = copy.copy(data)
count = 1
test = 10000000
if max_iterations == None:
max_iterations = 10000000
while test > threshold and count <= max_iterations:
if post_multiply:
increment = increment * ws * scalar
else:
increment = ws * increment * scalar
running_total += increment
test_old = test
test = la.norm(increment)
if test > test_old:
raise Exception, "power expansion will not converge, check model specification and that weight are less than 1"
count += 1
return running_total
def set_endog(y, x, w, yend, q, w_lags, lag_q):
# Create spatial lag of y
yl = lag_spatial(w, y)
# spatial and non-spatial instruments
if issubclass(type(yend), np.ndarray):
if lag_q:
lag_vars = sphstack(x, q)
else:
lag_vars = x
spatial_inst = get_lags(w, lag_vars, w_lags)
q = sphstack(q, spatial_inst)
yend = sphstack(yend, yl)
elif yend == None: # spatial instruments only
q = get_lags(w, x, w_lags)
yend = yl
else:
raise Exception, "invalid value passed to yend"
return yend, q
lag = lag_spatial(w, x)
spat_lags = lag
for i in range(w_lags - 1):
lag = lag_spatial(w, lag)
spat_lags = sphstack(spat_lags, lag)
return spat_lags
def set_endog_sparse(y, x, w, yend, q, w_lags, lag_q):
"""
Same as set_endog, but with a sparse object passed as weights instead of W object.
"""
yl = w * y
# spatial and non-spatial instruments
if issubclass(type(yend), np.ndarray):
if lag_q:
lag_vars = sphstack(x, q)
else:
lag_vars = x
spatial_inst = w * lag_vars
for i in range(w_lags - 1):
spatial_inst = sphstack(spatial_inst, w * spatial_inst)
q = sphstack(q, spatial_inst)
yend = sphstack(yend, yl)
elif yend == None: # spatial instruments only
q = w * x
for i in range(w_lags - 1):
q = sphstack(q, w * q)
yend = yl
else:
raise Exception, "invalid value passed to yend"
return yend, q
def iter_msg(iteration, max_iter):
if iteration == max_iter:
iter_stop = "Maximum number of iterations reached."
else:
iter_stop = "Convergence threshold (epsilon) reached."
return iter_stop
def sp_att(w, y, predy, w_y, rho):
xb = predy - rho * w_y
if np.abs(rho) < 1:
predy_sp = inverse_prod(w, xb, rho)
warn = None
# Note 1: Here if omitting pseudo-R2; If not, see Note 2.
resid_sp = y - predy_sp
else:
#warn = "Warning: Estimate for rho is outside the boundary (-1, 1). Computation of true inverse of W was required (slow)."
#predy_sp = inverse_prod(w, xb, rho, inv_method="true_inv")
warn = "*** WARNING: Estimate for spatial lag coefficient is outside the boundary (-1, 1). ***"
predy_sp = np.zeros(y.shape, float)
resid_sp = np.zeros(y.shape, float)
# resid_sp = y - predy_sp #Note 2: Here if computing true inverse; If not,
# see Note 1.
return predy_sp, resid_sp, warn
def set_warn(reg, warn):
''' Groups warning messages for printout. '''
if warn:
try:
reg.warning += "Warning: " + warn + "\n"
except:
reg.warning = "Warning: " + warn + "\n"
else:
pass
def RegressionProps_basic(reg, betas=None, predy=None, u=None, sig2=None, sig2n_k=None, vm=None):
''' Set props based on arguments passed. '''
if betas is not None:
reg.betas = betas
if predy is not None:
reg.predy = predy
else:
try:
reg.predy = spdot(reg.z, reg.betas)
except:
reg.predy = spdot(reg.x, reg.betas)
if u is not None:
reg.u = u
else:
reg.u = reg.y - reg.predy
if sig2 is not None:
reg.sig2 = sig2
elif sig2n_k:
reg.sig2 = np.sum(reg.u ** 2) / (reg.n - reg.k)
else:
reg.sig2 = np.sum(reg.u ** 2) / reg.n
if vm is not None:
reg.vm = vm
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| ljwolf/pysal | pysal/spreg/utils.py | Python | bsd-3-clause | 20,482 | [
"COLUMBUS"
] | 91a402273d71a4f12451b117c6492dd6104564bfb4184c43aff15d3793c3c1b5 |
import logging
from subprocess import CalledProcessError
from django.contrib.auth.decorators import login_required
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from core.utils import BLAST, get_context
log = logging.getLogger(__name__)
@login_required
def index(request: HttpRequest, voucher_code: str, gene_code: str) -> HttpResponse:
"""Runs a local blast for voucher code and gene code
Show results to user in a table
"""
blast = BLAST('local', voucher_code, gene_code)
blast.save_seqs_to_file()
if not blast.is_blast_db_up_to_date():
try:
blast.create_blast_db()
except CalledProcessError:
log.warning("there are no sequences for gene %s", gene_code)
was_sequence_saved = blast.save_query_to_file()
if was_sequence_saved:
blast.do_blast()
result = blast.parse_blast_output()
blast.delete_query_output_files()
else:
result = {
"error": "Query sequence has no valid codons, only question marks",
}
context = get_context(request)
context["result"] = result
return render(request, 'blast_local/index.html', context)
| carlosp420/VoSeq | blast_local/views.py | Python | bsd-3-clause | 1,206 | [
"BLAST"
] | 635f2d5be7b159b5852c226427ea25e6d909c79d71b819bfd5dc243940145e27 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from filebrowser.sites import site
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Enabling the Django Markdown:
url(r'^markdown/', include('django_markdown.urls')),
# Django Admin
url(r'^admin/filebrowser/', include(site.urls)),
url(r'^grappelli/', include('grappelli.urls')), # Grappelli URLS
url(r'^admin/', include(admin.site.urls)), # Admin URLS
# REST Framework URLs
url(r'^api-auth/',
include('rest_framework.urls', namespace='rest_framework')),
# User management
url(r'^users/', include("{{ cookiecutter.repo_name }}.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
| wldcordeiro/cookiecutter-django-essentials | {{cookiecutter.repo_name}}/config/urls.py | Python | bsd-3-clause | 1,636 | [
"VisIt"
] | 10d76602a3c32f1f03c3606e270951e780c67e1bb71ec9e39363bb87b505d560 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all files contain proper licensing information."""
import json
import optparse
import os.path
import subprocess
import sys
def PrintUsage():
print """Usage: python checklicenses.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checklicenses".
--ignore-suppressions Ignores path-specific license whitelist. Useful when
trying to remove a suppression/whitelist entry.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checklicenses.py
python checklicenses.py --root ~/chromium/src third_party"""
WHITELISTED_LICENSES = [
'APSL (v2) BSD (4 clause)',
'APSL (v2)',
'Anti-Grain Geometry',
'Apache (v2.0) BSD (2 clause)',
'Apache (v2.0) BSD-like',
'Apache (v2.0) GPL (v2)',
'Apache (v2.0)',
'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License
'BSD (2 clause) ISC',
'BSD (2 clause) MIT/X11 (BSD like)',
'BSD (2 clause)',
'BSD (3 clause) GPL (v2)',
'BSD (3 clause) ISC',
'BSD (3 clause) LGPL (v2 or later)',
'BSD (3 clause) LGPL (v2.1 or later)',
'BSD (3 clause) MIT/X11 (BSD like)',
'BSD (3 clause)',
'BSD (4 clause)',
'BSD',
'BSD-like',
# TODO(phajdan.jr): Make licensecheck not print BSD-like twice.
'BSD MIT/X11 (BSD like)',
'BSD-like MIT/X11 (BSD like)',
'BSL (v1.0)',
'BSL (v1) LGPL (v2.1 or later)',
'FreeType (BSD like) with patent clause',
'FreeType (BSD like)',
'GPL (v2 or later) with Bison parser exception',
'GPL (v2 or later) with libtool exception',
'GPL (v2) LGPL (v2.1 or later)',
'GPL (v3 or later) LGPL (v2.1 or later) with Bison parser exception',
'GPL (v3 or later) with Bison parser exception',
'GPL with Bison parser exception',
'ISC',
'Independent JPEG Group License',
'LGPL (unversioned/unknown version)',
'LGPL (v2 or later)',
'LGPL (v2)',
'LGPL (v2.1 or later)',
'LGPL (v2.1)',
'LGPL (v3 or later)',
'MIT/X11 (BSD like) LGPL (v2.1 or later)',
'MIT/X11 (BSD like)',
'MPL (v1.0) LGPL (v2 or later)',
'MPL (v1.1) BSD (3 clause) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) BSD (3 clause) LGPL (v2.1 or later)',
'MPL (v1.1) BSD-like GPL (unversioned/unknown version)',
'MPL (v1.1) BSD-like GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) BSD-like',
'MPL (v1.1) GPL (unversioned/unknown version)',
'MPL (v1.1) GPL (v2) LGPL (v2 or later)',
'MPL (v1.1) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (v2)',
'MPL (v1.1) LGPL (v2 or later)',
'MPL (v1.1) LGPL (v2.1 or later)',
'MPL (v1.1)',
'MPL (v2.0)',
'Ms-PL',
'Public domain BSD (3 clause)',
'Public domain BSD',
'Public domain BSD-like',
'Public domain LGPL (v2.1 or later)',
'Public domain',
'SGI Free Software License B',
'SunSoft (BSD like)',
'libpng',
'zlib/libpng',
'University of Illinois/NCSA Open Source License (BSD like)',
('University of Illinois/NCSA Open Source License (BSD like) '
'MIT/X11 (BSD like)'),
]
PATH_SPECIFIC_WHITELISTED_LICENSES = {
'base/third_party/icu': [ # http://crbug.com/98087
'UNKNOWN',
],
'base/third_party/libevent': [ # http://crbug.com/98309
'UNKNOWN',
],
# http://code.google.com/p/google-breakpad/issues/detail?id=450
'breakpad/src': [
'UNKNOWN',
],
'buildtools/third_party/libc++/trunk/test': [
# http://llvm.org/bugs/show_bug.cgi?id=25980
'UNKNOWN',
],
# http://llvm.org/bugs/show_bug.cgi?id=25976
'buildtools/third_party/libc++/trunk/src/include/atomic_support.h': [
'UNKNOWN'
],
'buildtools/third_party/libc++/trunk/utils/gen_link_script': [ 'UNKNOWN' ],
'buildtools/third_party/libc++/trunk/utils/not': [ 'UNKNOWN' ],
'buildtools/third_party/libc++/trunk/utils/sym_check': [ 'UNKNOWN' ],
'buildtools/third_party/libc++abi/trunk/test': [ 'UNKNOWN' ],
'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092
'UNKNOWN',
],
# This contains files copied from elsewhere from the tree. Since the copied
# directories might have suppressions below (like simplejson), whitelist the
# whole directory. This is also not shipped code.
'chrome/common/extensions/docs/server2/third_party': [
'UNKNOWN',
],
'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095
'UNKNOWN',
],
'courgette/third_party/qsufsort.h': [ # http://crbug.com/98095
'UNKNOWN',
],
'native_client': [ # http://crbug.com/98099
'UNKNOWN',
],
'native_client/toolchain': [
'BSD GPL (v2 or later)',
'BSD (2 clause) GPL (v2 or later)',
'BSD (3 clause) GPL (v2 or later)',
'BSD (4 clause) ISC',
'BSL (v1.0) GPL',
'BSL (v1.0) GPL (v3.1)',
'GPL',
'GPL (unversioned/unknown version)',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3.1)',
'GPL (v3 or later)',
'MPL (v1.1) LGPL (unversioned/unknown version)',
],
# The project is BSD-licensed but the individual files do not have
# consistent license headers. Also, this is just used in a utility
# and not shipped. https://github.com/waylan/Python-Markdown/issues/435
'third_party/Python-Markdown': [
'UNKNOWN',
],
'third_party/WebKit': [
'UNKNOWN',
],
# http://code.google.com/p/angleproject/issues/detail?id=217
'third_party/angle': [
'UNKNOWN',
],
# http://crbug.com/222828
# http://bugs.python.org/issue17514
'third_party/chromite/third_party/argparse.py': [
'UNKNOWN',
],
# http://crbug.com/326117
# https://bitbucket.org/chrisatlee/poster/issue/21
'third_party/chromite/third_party/poster': [
'UNKNOWN',
],
# http://crbug.com/333508
'buildtools/clang_format/script': [
'UNKNOWN',
],
# https://mail.python.org/pipermail/cython-devel/2014-July/004062.html
'third_party/cython': [
'UNKNOWN',
],
'third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/catapult/firefighter/default/tracing/third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/catapult/tracing/third_party/devscripts': [
'GPL (v2 or later)',
],
# https://github.com/shazow/apiclient/issues/8
# MIT license.
'third_party/catapult/third_party/apiclient': [
'UNKNOWN',
],
# https://bugs.launchpad.net/beautifulsoup/+bug/1481316
# MIT license.
'third_party/catapult/third_party/beautifulsoup': [
'UNKNOWN'
],
# https://bitbucket.org/ned/coveragepy/issue/313/add-license-file-containing-2-3-or-4
# Apache (v2.0) license, not shipped
'third_party/catapult/third_party/coverage': [
'UNKNOWN'
],
# https://code.google.com/p/graphy/issues/detail?id=6
# Apache (v2.0)
'third_party/catapult/third_party/graphy': [
'UNKNOWN',
],
# https://github.com/GoogleCloudPlatform/gsutil/issues/305
('third_party/catapult/third_party/gsutil/gslib/third_party/'
'storage_apitools'): [
'UNKNOWN',
],
# https://github.com/google/apitools/issues/63
'third_party/catapult/third_party/gsutil/third_party/apitools': [
'UNKNOWN',
],
# https://github.com/boto/boto/issues/3373
'third_party/catapult/third_party/gsutil/third_party/boto': [
'UNKNOWN',
],
# https://bitbucket.org/cmcqueen1975/crcmod/issues/1/please-add-per-file-licenses
# Includes third_party/catapult/third_party/gsutil/third_party/crcmod_osx.
'third_party/catapult/third_party/gsutil/third_party/crcmod': [
'UNKNOWN',
],
# https://github.com/jcgregorio/httplib2/issues/307
'third_party/catapult/third_party/gsutil/third_party/httplib2': [
'UNKNOWN',
],
# https://github.com/google/oauth2client/issues/331
'third_party/catapult/third_party/gsutil/third_party/oauth2client': [
'UNKNOWN',
],
# https://github.com/google/protorpc/issues/14
'third_party/catapult/third_party/gsutil/third_party/protorpc': [
'UNKNOWN',
],
# https://sourceforge.net/p/pyasn1/tickets/4/
# Includes
# third_party/catapult/third_party/gsutil/third_party/pyasn1-modules.
'third_party/catapult/third_party/gsutil/third_party/pyasn1': [
'UNKNOWN',
],
# https://github.com/pnpnpn/retry-decorator/issues/4
'third_party/catapult/third_party/gsutil/third_party/retry-decorator': [
'UNKNOWN',
],
# https://bitbucket.org/sybren/python-rsa/issues/28/please-add-per-file-licenses
'third_party/catapult/third_party/gsutil/third_party/rsa': [
'UNKNOWN',
],
# https://bitbucket.org/gutworth/six/issues/137/please-add-per-file-licenses
# Already fixed upstream. https://crbug.com/573341
'third_party/catapult/third_party/gsutil/third_party/six': [
'UNKNOWN',
],
# https://github.com/html5lib/html5lib-python/issues/125
# MIT license.
'third_party/catapult/third_party/html5lib-python': [
'UNKNOWN',
],
# https://github.com/GoogleCloudPlatform/appengine-mapreduce/issues/71
# Apache (v2.0)
'third_party/catapult/third_party/mapreduce': [
'UNKNOWN',
],
# https://code.google.com/p/webapp-improved/issues/detail?id=103
# Apache (v2.0).
'third_party/catapult/third_party/webapp2': [
'UNKNOWN',
],
# https://github.com/Pylons/webob/issues/211
# MIT license.
'third_party/catapult/third_party/WebOb': [
'UNKNOWN',
],
# https://github.com/Pylons/webtest/issues/141
# MIT license.
'third_party/catapult/third_party/webtest': [
'UNKNOWN',
],
# https://bitbucket.org/ianb/paste/issues/12/add-license-headers-to-source-files
# MIT license.
'third_party/catapult/third_party/Paste': [
'UNKNOWN',
],
'third_party/expat/files/lib': [ # http://crbug.com/98121
'UNKNOWN',
],
'third_party/ffmpeg': [
'GPL',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98123
],
'third_party/fontconfig': [
# https://bugs.freedesktop.org/show_bug.cgi?id=73401
'UNKNOWN',
],
'third_party/freetype2': [ # http://crbug.com/177319
'UNKNOWN',
],
'third_party/freetype-android': [ # http://crbug.com/177319
'UNKNOWN',
],
'third_party/hunspell': [ # http://crbug.com/98134
'UNKNOWN',
],
'third_party/iccjpeg': [ # http://crbug.com/98137
'UNKNOWN',
],
'third_party/icu': [ # http://crbug.com/98301
'UNKNOWN',
],
'third_party/jmake': [ # Used only at build time.
'GPL (v2)',
],
'third_party/jsoncpp/source': [
# https://github.com/open-source-parsers/jsoncpp/issues/234
'UNKNOWN',
],
'third_party/junit/src': [
# Pulled in via DEPS for Android only.
# Eclipse Public License / not shipped.
# Bug filed but upstream prefers not to fix.
# https://github.com/junit-team/junit/issues/1132
'UNKNOWN',
],
'third_party/lcov': [ # http://crbug.com/98304
'UNKNOWN',
],
'third_party/lcov/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/libjingle/source/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjpeg_turbo': [ # http://crbug.com/98314
'UNKNOWN',
],
# Many liblouis files are mirrored but not used in the NaCl module.
# They are not excluded from the mirror because of lack of infrastructure
# support. Getting license headers added to the files where missing is
# tracked in https://github.com/liblouis/liblouis/issues/22.
'third_party/liblouis/src': [
'GPL (v3 or later)',
'UNKNOWN',
],
# The following files lack license headers, but are trivial.
'third_party/libusb/src/libusb/os/poll_posix.h': [
'UNKNOWN',
],
'third_party/libvpx/source': [ # http://crbug.com/98319
'UNKNOWN',
],
'third_party/libxml': [
'UNKNOWN',
],
'third_party/libxslt': [
'UNKNOWN',
],
'third_party/lzma_sdk': [
'UNKNOWN',
],
'third_party/mesa/src': [
'GPL (v2)',
'GPL (v3 or later)',
'MIT/X11 (BSD like) GPL (v3 or later) with Bison parser exception',
'UNKNOWN', # http://crbug.com/98450
],
'third_party/modp_b64': [
'UNKNOWN',
],
# Missing license headers in openh264 sources: https://github.com/cisco/openh264/issues/2233
'third_party/openh264/src': [
'UNKNOWN',
],
'third_party/openmax_dl/dl' : [
'Khronos Group',
],
'third_party/opus/src/autogen.sh' : [ # https://trac.xiph.org/ticket/2253#ticket
'UNKNOWN',
],
'third_party/boringssl': [
# There are some files in BoringSSL which came from OpenSSL and have no
# license in them. We don't wish to add the license header ourselves
# thus we don't expect to pass license checks.
'UNKNOWN',
],
'third_party/molokocacao': [ # http://crbug.com/98453
'UNKNOWN',
],
'third_party/ocmock/OCMock': [ # http://crbug.com/98454
'UNKNOWN',
],
'third_party/protobuf': [ # http://crbug.com/98455
'UNKNOWN',
],
# https://bitbucket.org/ned/coveragepy/issue/313/add-license-file-containing-2-3-or-4
# BSD 2-clause license.
'third_party/pycoverage': [
'UNKNOWN',
],
'third_party/pyelftools': [ # http://crbug.com/222831
'UNKNOWN',
],
'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462
'UNKNOWN',
],
'third_party/sfntly/src/java': [ # Apache 2.0, not shipped.
'UNKNOWN',
],
'third_party/simplejson': [
'UNKNOWN',
],
'third_party/skia': [ # http://crbug.com/98463
'UNKNOWN',
],
'third_party/snappy/src': [ # http://crbug.com/98464
'UNKNOWN',
],
'third_party/smhasher/src': [ # http://crbug.com/98465
'UNKNOWN',
],
'third_party/speech-dispatcher/libspeechd.h': [
'GPL (v2 or later)',
],
'third_party/sqlite': [
'UNKNOWN',
],
# New BSD license. http://crbug.com/98455
'tools/swarming_client/third_party/google': [
'UNKNOWN',
],
# Apache v2.0.
'tools/swarming_client/third_party/googleapiclient': [
'UNKNOWN',
],
# http://crbug.com/334668
# MIT license.
'tools/swarming_client/third_party/httplib2': [
'UNKNOWN',
],
# http://crbug.com/334668
# Apache v2.0.
'tools/swarming_client/third_party/oauth2client': [
'UNKNOWN',
],
# http://crbug.com/471372
# BSD
'tools/swarming_client/third_party/pyasn1': [
'UNKNOWN',
],
# http://crbug.com/471372
# Apache v2.0.
'tools/swarming_client/third_party/rsa': [
'UNKNOWN',
],
# https://github.com/kennethreitz/requests/issues/1610
'tools/swarming_client/third_party/requests': [
'UNKNOWN',
],
# BSD License. http://bugzilla.maptools.org/show_bug.cgi?id=2532
'third_party/pdfium/third_party/libtiff/tif_ojpeg.c': [
'UNKNOWN',
],
'third_party/pdfium/third_party/libtiff/tiffvers.h': [
'UNKNOWN',
],
'third_party/pdfium/third_party/libtiff/uvcode.h': [
'UNKNOWN',
],
'third_party/talloc': [
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98588
],
'third_party/tcmalloc': [
'UNKNOWN', # http://crbug.com/98589
],
'third_party/tlslite': [
'UNKNOWN',
],
# MIT license but some files contain no licensing info. e.g. autogen.sh.
# Files missing licensing info are not shipped.
'third_party/wayland': [ # http://crbug.com/553573
'UNKNOWN',
],
'third_party/webdriver': [ # http://crbug.com/98590
'UNKNOWN',
],
# https://github.com/html5lib/html5lib-python/issues/125
# https://github.com/KhronosGroup/WebGL/issues/435
'third_party/webgl/src': [
'UNKNOWN',
],
'third_party/webrtc': [ # http://crbug.com/98592
'UNKNOWN',
],
'third_party/xdg-utils': [ # http://crbug.com/98593
'UNKNOWN',
],
'third_party/yasm/source': [ # http://crbug.com/98594
'UNKNOWN',
],
'third_party/zlib/contrib/minizip': [
'UNKNOWN',
],
'third_party/zlib/trees.h': [
'UNKNOWN',
],
'tools/emacs': [ # http://crbug.com/98595
'UNKNOWN',
],
'tools/gyp/test': [
'UNKNOWN',
],
'tools/python/google/__init__.py': [
'UNKNOWN',
],
'tools/stats_viewer/Properties/AssemblyInfo.cs': [
'UNKNOWN',
],
'tools/symsrc/pefile.py': [
'UNKNOWN',
],
# Not shipped, MIT license but the header files contain no licensing info.
'tools/telemetry/third_party/altgraph': [
'UNKNOWN',
],
# Not shipped, MIT license but the header files contain no licensing info.
'tools/telemetry/third_party/modulegraph': [
'UNKNOWN',
],
'tools/telemetry/third_party/pyserial': [
# https://sourceforge.net/p/pyserial/feature-requests/35/
'UNKNOWN',
],
# Not shipped, MIT license but the header files contain no licensing info.
'third_party/catapult/telemetry/third_party/altgraph': [
'UNKNOWN',
],
# Not shipped, MIT license but the header files contain no licensing info.
'third_party/catapult/telemetry/third_party/modulegraph': [
'UNKNOWN',
],
'third_party/catapult/telemetry/third_party/pyserial': [
# https://sourceforge.net/p/pyserial/feature-requests/35/
'UNKNOWN',
],
}
EXCLUDED_PATHS = [
# Don't check generated files
'out/',
# Don't check downloaded goma client binaries
'build/goma/client',
# Don't check sysroot directories
'build/linux/debian_wheezy_amd64-sysroot',
'build/linux/debian_wheezy_arm-sysroot',
'build/linux/debian_wheezy_i386-sysroot',
'build/linux/debian_wheezy_mips-sysroot',
]
def check_licenses(options, args):
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = options.base_directory
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(options.base_directory, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print "Using base directory:", options.base_directory
print "Checking:", start_dir
print
licensecheck_path = os.path.abspath(os.path.join(options.base_directory,
'third_party',
'devscripts',
'licensecheck.pl'))
licensecheck = subprocess.Popen([licensecheck_path,
'-l', '100',
'-r', start_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = licensecheck.communicate()
if options.verbose:
print '----------- licensecheck stdout -----------'
print stdout
print '--------- end licensecheck stdout ---------'
if licensecheck.returncode != 0 or stderr:
print '----------- licensecheck stderr -----------'
print stderr
print '--------- end licensecheck stderr ---------'
print "\nFAILED\n"
return 1
used_suppressions = set()
errors = []
for line in stdout.splitlines():
filename, license = line.split(':', 1)
filename = os.path.relpath(filename.strip(), options.base_directory)
# Check if the file belongs to one of the excluded paths.
if any((filename.startswith(path) for path in EXCLUDED_PATHS)):
continue
# For now we're just interested in the license.
license = license.replace('*No copyright*', '').strip()
# Skip generated files.
if 'GENERATED FILE' in license:
continue
if license in WHITELISTED_LICENSES:
continue
if not options.ignore_suppressions:
matched_prefixes = [
prefix for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES
if filename.startswith(prefix) and
license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]]
if matched_prefixes:
used_suppressions.update(set(matched_prefixes))
continue
errors.append({'filename': filename, 'license': license})
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
for error in errors:
print "'%s' has non-whitelisted license '%s'" % (
error['filename'], error['license'])
print "\nFAILED\n"
print "Please read",
print "http://www.chromium.org/developers/adding-3rd-party-libraries"
print "for more info how to handle the failure."
print
print "Please respect OWNERS of checklicenses.py. Changes violating"
print "this requirement may be reverted."
# Do not print unused suppressions so that above message is clearly
# visible and gets proper attention. Too much unrelated output
# would be distracting and make the important points easier to miss.
return 1
print "\nSUCCESS\n"
if not len(args):
unused_suppressions = set(
PATH_SPECIFIC_WHITELISTED_LICENSES.iterkeys()).difference(
used_suppressions)
if unused_suppressions:
print "\nNOTE: unused suppressions detected:\n"
print '\n'.join(unused_suppressions)
return 0
def main():
default_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
option_parser = optparse.OptionParser()
option_parser.add_option('--root', default=default_root,
dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Print debug logging')
option_parser.add_option('--ignore-suppressions',
action='store_true',
default=False,
help='Ignore path-specific license whitelist.')
option_parser.add_option('--json', help='Path to JSON output file')
options, args = option_parser.parse_args()
return check_licenses(options, args)
if '__main__' == __name__:
sys.exit(main())
| highweb-project/highweb-webcl-html5spec | tools/checklicenses/checklicenses.py | Python | bsd-3-clause | 23,436 | [
"Galaxy"
] | 53bd60a698e8c93931c9d7e359cdccf19f8d87e4b80636f096d6a0f06cff140f |
"""
The B{0install remove-feed} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
syntax = "[INTERFACE] FEED"
from zeroinstall import SafeException, _
from zeroinstall.injector import model, writer
from zeroinstall.cmd import add_feed, UsageError
add_options = add_feed.add_options
def handle(config, options, args):
if len(args) == 2:
iface = config.iface_cache.get_interface(model.canonical_iface_uri(args[0]))
feed_url = args[1]
feed_import = add_feed.find_feed_import(iface, feed_url)
if not feed_import:
raise SafeException(_('Interface %(interface)s has no feed %(feed)s') %
{'interface': iface.uri, 'feed': feed_url})
iface.extra_feeds.remove(feed_import)
writer.save_interface(iface)
elif len(args) == 1:
add_feed.handle(config, options, args, add_ok = False, remove_ok = True)
else:
raise UsageError()
| timdiels/zeroinstall | zeroinstall/cmd/remove_feed.py | Python | lgpl-2.1 | 925 | [
"VisIt"
] | 359d4c43124f8df8695d4297bcd2b5d84752eb024649ad9b040b76aeca1cc794 |
"""
Methods for calculating Mutual Information in an embarrassingly parallel way.
Author: Daniel Homola <dani.homola@gmail.com>
License: BSD 3 clause
"""
import numpy as np
from scipy.special import gamma, psi
from sklearn.neighbors import NearestNeighbors
from sklearn.externals.joblib import Parallel, delayed
def get_mi_vector(MI_FS, F, s):
"""
Calculates the Mututal Information between each feature in F and s.
This function is for when |S| > 1. s is the previously selected feature.
We exploite the fact that this step is embarrassingly parallel.
"""
MIs = Parallel(n_jobs=MI_FS.n_jobs)(delayed(_get_mi)(f, s, MI_FS)
for f in F)
return MIs
def _get_mi(f, s, MI_FS):
n, p = MI_FS.X.shape
if MI_FS.method in ['JMI', 'JMIM']:
# JMI & JMIM
joint = MI_FS.X[:, (s, f)]
if MI_FS.categorical:
MI = _mi_dc(joint, MI_FS.y, MI_FS.k)
else:
vars = (joint, MI_FS.y)
MI = _mi_cc(vars, MI_FS.k)
else:
# MRMR
vars = (MI_FS.X[:, s].reshape(n, 1), MI_FS.X[:, f].reshape(n, 1))
MI = _mi_cc(vars, MI_FS.k)
# MI must be non-negative
if MI > 0:
return MI
else:
return np.nan
def get_first_mi_vector(MI_FS, k):
"""
Calculates the Mututal Information between each feature in X and y.
This function is for when |S| = 0. We select the first feautre in S.
"""
n, p = MI_FS.X.shape
MIs = Parallel(n_jobs=MI_FS.n_jobs)(delayed(_get_first_mi)(i, k, MI_FS)
for i in xrange(p))
return MIs
def _get_first_mi(i, k, MI_FS):
n, p = MI_FS.X.shape
if MI_FS.categorical:
x = MI_FS.X[:, i].reshape((n, 1))
MI = _mi_dc(x, MI_FS.y, k)
else:
vars = (MI_FS.X[:, i].reshape((n, 1)), MI_FS.y)
MI = _mi_cc(vars, k)
# MI must be non-negative
if MI > 0:
return MI
else:
return np.nan
def _mi_dc(x, y, k):
"""
Calculates the mututal information between a continuous vector x and a
disrete class vector y.
This implementation can calculate the MI between the joint distribution of
one or more continuous variables (X[:, 1:3]) with a discrete variable (y).
Thanks to Adam Pocock, the author of the FEAST package for the idea.
Brian C. Ross, 2014, PLOS ONE
Mutual Information between Discrete and Continuous Data Sets
"""
y = y.flatten()
n = x.shape[0]
classes = np.unique(y)
knn = NearestNeighbors(n_neighbors=k)
# distance to kth in-class neighbour
d2k = np.empty(n)
# number of points within each point's class
Nx = []
for yi in y:
Nx.append(np.sum(y == yi))
# find the distance of the kth in-class point
for c in classes:
mask = np.where(y == c)[0]
knn.fit(x[mask, :])
d2k[mask] = knn.kneighbors()[0][:, -1]
# find the number of points within the distance of the kth in-class point
knn.fit(x)
m = knn.radius_neighbors(radius=d2k, return_distance=False)
m = [i.shape[0] for i in m]
# calculate MI based on Equation 2 in Ross 2014
MI = psi(n) - np.mean(psi(Nx)) + psi(k) - np.mean(psi(m))
return MI
def _mi_cc(variables, k=1):
"""
Returns the mutual information between any number of variables.
Here it is used to estimate MI between continuous X(s) and y.
Written by Gael Varoquaux:
https://gist.github.com/GaelVaroquaux/ead9898bd3c973c40429
"""
all_vars = np.hstack(variables)
return (sum([_entropy(X, k=k) for X in variables]) -
_entropy(all_vars, k=k))
def _nearest_distances(X, k=1):
"""
Returns the distance to the kth nearest neighbor for every point in X
"""
knn = NearestNeighbors(n_neighbors=k, metric='chebyshev')
knn.fit(X)
# the first nearest neighbor is itself
d, _ = knn.kneighbors(X)
# returns the distance to the kth nearest neighbor
return d[:, -1]
def _entropy(X, k=1):
"""
Returns the entropy of the X.
Written by Gael Varoquaux:
https://gist.github.com/GaelVaroquaux/ead9898bd3c973c40429
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data the entropy of which is computed
k : int, optional
number of nearest neighbors for density estimation
References
----------
Kozachenko, L. F. & Leonenko, N. N. 1987 Sample estimate of entropy
of a random vector. Probl. Inf. Transm. 23, 95-101.
See also: Evans, D. 2008 A computationally efficient estimator for
mutual information, Proc. R. Soc. A 464 (2093), 1203-1215.
and:
Kraskov A, Stogbauer H, Grassberger P. (2004). Estimating mutual
information. Phys Rev E 69(6 Pt 2):066138.
F. Perez-Cruz, (2008). Estimation of Information Theoretic Measures
for Continuous Random Variables. Advances in Neural Information
Processing Systems 21 (NIPS). Vancouver (Canada), December.
return d*mean(log(r))+log(volume_unit_ball)+log(n-1)-log(k)
"""
# Distance to kth nearest neighbor
r = _nearest_distances(X, k)
n, d = X.shape
volume_unit_ball = (np.pi ** (.5 * d)) / gamma(.5 * d + 1)
return (d * np.mean(np.log(r + np.finfo(X.dtype).eps)) +
np.log(volume_unit_ball) + psi(n) - psi(k))
| xrafael/readmission | src/mifs/mi.py | Python | gpl-3.0 | 5,353 | [
"Brian"
] | 596a39510da0b0934969b1cded68c3791ea4e211bf7f1cb7d44a591efc463470 |
# Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import fixtures
import mock
import mox
import netaddr
from oslo.config import cfg
from oslo import messaging
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import ipv6
from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
from nova import objects
from nova.objects import quotas as quotas_obj
from nova.objects import virtual_interface as vif_obj
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import quota
from nova import test
from nova.tests import fake_instance
from nova.tests import fake_ldap
from nova.tests import fake_network
from nova.tests import matchers
from nova.tests.objects import test_fixed_ip
from nova.tests.objects import test_floating_ip
from nova.tests.objects import test_network
from nova.tests.objects import test_service
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
HOST = "testhost"
FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
fake_inst = fake_instance.fake_db_instance
networks = [{'id': 0,
'uuid': FAKEUUID,
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'dhcp_server': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'},
{'id': 1,
'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'dhcp_server': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '192.168.1.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '2001:db9:0:1::10',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': 0},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': 0},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_uuid': 0}]
class FlatNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatManager(host=HOST)
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def test_get_instance_nw_info(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
for i, vif in enumerate(nw_info):
nid = i + 1
check = {'bridge': 'fake_br%d' % nid,
'cidr': '192.168.%s.0/24' % nid,
'cidr_v6': '2001:db8:0:%x::/64' % nid,
'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
'multi_host': False,
'injected': False,
'bridge_interface': None,
'vlan': None,
'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
'gateway': '192.168.%d.1' % nid,
'gateway_v6': '2001:db8:0:1::1',
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
'vif_type': net_model.VIF_TYPE_BRIDGE,
'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'ovs_interfaceid': None,
'qbh_params': None,
'qbg_params': None,
'should_create_vlan': False,
'should_create_bridge': False,
'ip': '192.168.%d.%03d' % (nid, nid + 99),
'ip_v6': '2001:db8:0:1::%x' % nid,
'netmask': '255.255.255.0',
'netmask_v6': 64,
'physical_network': None,
}
network = vif['network']
net_v4 = vif['network']['subnets'][0]
net_v6 = vif['network']['subnets'][1]
vif_dict = dict(bridge=network['bridge'],
cidr=net_v4['cidr'],
cidr_v6=net_v6['cidr'],
id=vif['id'],
multi_host=network.get_meta('multi_host', False),
injected=network.get_meta('injected', False),
bridge_interface=
network.get_meta('bridge_interface'),
vlan=network.get_meta('vlan'),
broadcast=str(net_v4.as_netaddr().broadcast),
dhcp_server=network.get_meta('dhcp_server',
net_v4['gateway']['address']),
dns=[ip['address'] for ip in net_v4['dns']],
gateway=net_v4['gateway']['address'],
gateway_v6=net_v6['gateway']['address'],
label=network['label'],
mac=vif['address'],
rxtx_cap=vif.get_meta('rxtx_cap'),
vif_type=vif['type'],
vif_devname=vif.get('devname'),
vif_uuid=vif['id'],
ovs_interfaceid=vif.get('ovs_interfaceid'),
qbh_params=vif.get('qbh_params'),
qbg_params=vif.get('qbg_params'),
should_create_vlan=
network.get_meta('should_create_vlan', False),
should_create_bridge=
network.get_meta('should_create_bridge',
False),
ip=net_v4['ips'][i]['address'],
ip_v6=net_v6['ips'][i]['address'],
netmask=str(net_v4.as_netaddr().netmask),
netmask_v6=net_v6.as_netaddr()._prefixlen,
physical_network=
network.get_meta('physical_network', None))
self.assertThat(vif_dict, matchers.DictMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0])
ip['network'] = dict(test_network.fake_network,
**networks[0])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_valid_fixed_ipv6(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'2001:db9:0:1::10')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **networks[1])])
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[2])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(3, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_add_fixed_ip_instance_using_id_without_vpn(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_add_fixed_ip_instance_using_uuid_without_vpn(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['uuid'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(len(names), 2)
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(len(names), 1)
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(len(addresses), 1)
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_mini_dns_driver_with_mixed_case(self):
zone1 = "example.org"
driver = self.network.instance_dns_manager
driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 1)
for n in addresses:
driver.delete_entry(n, zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 0)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_instance_dns(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
fixedip = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None
).AndReturn(fixedip)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['uuid'])
instance_manager = self.network.instance_dns_manager
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip['address'])
addresses = instance_manager.get_entries_by_name(FAKEUUID,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip['address'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
def test_allocate_floating_ip(self):
self.assertIsNone(self.network.allocate_floating_ip(self.context,
1, None))
def test_deallocate_floating_ip(self):
self.assertIsNone(self.network.deallocate_floating_ip(self.context,
1, None))
def test_associate_floating_ip(self):
self.assertIsNone(self.network.associate_floating_ip(self.context,
None, None))
def test_disassociate_floating_ip(self):
self.assertIsNone(self.network.disassociate_floating_ip(self.context,
None, None))
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_allocate_calculates_quota_auth(self, util_method, reserve,
get_by_uuid):
inst = objects.Instance()
inst['uuid'] = 'nosuch'
get_by_uuid.return_value = inst
reserve.side_effect = exception.OverQuota(overs='testing')
util_method.return_value = ('foo', 'bar')
self.assertRaises(exception.FixedIpLimitExceeded,
self.network.allocate_fixed_ip,
self.context, 123, {'uuid': 'nosuch'})
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_deallocate_calculates_quota_auth(self, util_method, reserve,
get_by_address):
inst = objects.Instance(uuid='fake-uuid')
fip = objects.FixedIP(instance_uuid='fake-uuid',
virtual_interface_id=1)
get_by_address.return_value = fip
util_method.return_value = ('foo', 'bar')
# This will fail right after the reserve call when it tries
# to look up the fake instance we created above
self.assertRaises(exception.InstanceNotFound,
self.network.deallocate_fixed_ip,
self.context, '1.2.3.4', instance=inst)
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.virtual_interface.VirtualInterface'
'.get_by_instance_and_network')
@mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
@mock.patch('nova.objects.fixed_ip.FixedIP.save')
def test_allocate_fixed_ip_cleanup(self,
mock_fixedip_save,
mock_fixedip_associate,
mock_fixedip_disassociate,
mock_vif_get,
mock_instance_get):
address = netaddr.IPAddress('1.2.3.4')
fip = objects.FixedIP(instance_uuid='fake-uuid',
address=address,
virtual_interface_id=1)
mock_fixedip_associate.return_value = fip
instance = objects.Instance(context=self.context)
instance.create()
mock_instance_get.return_value = instance
mock_vif_get.return_value = vif_obj.VirtualInterface(
instance_uuid='fake-uuid', id=1)
with contextlib.nested(
mock.patch.object(self.network, '_setup_network_on_host'),
mock.patch.object(self.network, 'instance_dns_manager'),
mock.patch.object(self.network,
'_do_trigger_security_group_members_refresh_for_instance')
) as (mock_setup_network, mock_dns_manager, mock_ignored):
mock_setup_network.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=address)
mock_dns_manager.delete_entry.assert_has_calls([
mock.call(instance.display_name, ''),
mock.call(instance.uuid, '')
])
mock_fixedip_disassociate.assert_called_once_with(self.context)
class FlatDHCPNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatDHCPNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class VlanNetworkTestCase(test.TestCase):
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
def test_quota_driver_type(self):
self.assertEqual(objects.QuotasNoOp,
self.network.quotas_cls)
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
network_id=mox.IgnoreArg(),
reserved=True).AndReturn(fixed)
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network,
vpn=True)
def test_vpn_allocate_fixed_ip_no_network_id(self):
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
network['id'] = None
instance = db.instance_create(self.context, {})
self.assertRaises(exception.FixedIpNotFoundForNetwork,
self.network.allocate_fixed_ip,
self.context_admin,
instance['uuid'],
network,
vpn=True)
def test_allocate_fixed_ip(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address_vpn(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch',
'vpn_private_address': netaddr.IPAddress('1.2.3.4')
}, vpn=1)
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1, reserved=True)
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_duplicate_vlan_raises(self):
# VLAN 100 is already used and we force the network to be created
# in that vlan (vlan=100).
self.assertRaises(exception.DuplicateVlan,
self.network.create_networks,
self.context_admin, label="fake", num_networks=1,
vlan=100, cidr='192.168.0.1/24', network_size=100)
def test_vlan_start(self):
# VLAN 100 and 101 are used, so this network shoud be created in 102
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
def test_vlan_start_multiple(self):
# VLAN 100 and 101 are used, so these networks shoud be created in 102
# and 103
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
self.assertEqual(networks[1]["vlan"], 103)
def test_vlan_start_used(self):
# VLAN 100 and 101 are used, but vlan_start=99.
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=99, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
@mock.patch('nova.db.network_get')
def test_validate_networks(self, net_get):
def network_get(_context, network_id, project_only='allow_none'):
return dict(test_network.fake_network, **networks[network_id])
net_get.side_effect = network_get
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
db_fixed1 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[1]['id'],
network=dict(test_network.fake_network,
**networks[1]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed1)
db_fixed2 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[0]['id'],
network=dict(test_network.fake_network,
**networks[0]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed2)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id + '1')
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
ctxt = context.RequestContext(None, None,
is_admin=True)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id='testproject')
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake_allocate_address(*args, **kwargs):
return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
fake_allocate_address)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.commit')
def test_deallocate_floating_ip(self, mock_commit, mock_reserve):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip)
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=1)
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
mock_reserve.return_value = 'reserve'
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
mock_commit.assert_called_once_with(ctxt, 'reserve',
project_id='testproject')
@mock.patch('nova.db.fixed_ip_get')
def test_associate_floating_ip(self, fixed_get):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
network=test_network.fake_network)
# floating ip that's already associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1)
# floating ip that isn't associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise processutils.ProcessExecutionError('',
'Cannot find device "em0"\n')
def fake9(*args, **kwargs):
raise test.TestingException()
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
'1.2.3.4',
'1.2.3.5',
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
instance_uuid='fake_uuid',
network=test_network.fake_network)
# doesn't raise because we exit early if the address is the same
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), '1.2.3.4')
# raises because we call disassociate which is mocked
self.assertRaises(test.TestingException,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
'new')
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_add_floating_ip_nat_before_bind(self):
# Tried to verify order with documented mox record/verify
# functionality, but it doesn't seem to work since I can't make it
# fail. I'm using stubs and a flag for now, but if this mox feature
# can be made to work, it would be a better way to test this.
#
# self.mox.StubOutWithMock(self.network.driver,
# 'ensure_floating_forward')
# self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
#
# self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg())
# self.network.driver.bind_floating_ip(mox.IgnoreArg(),
# mox.IgnoreArg())
# self.mox.ReplayAll()
nat_called = [False]
def fake_nat(*args, **kwargs):
nat_called[0] = True
def fake_bind(*args, **kwargs):
self.assertTrue(nat_called[0])
self.stubs.Set(self.network.driver,
'ensure_floating_forward',
fake_nat)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fakeiface',
'fakenet')
@mock.patch('nova.db.floating_ip_get_all_by_host')
@mock.patch('nova.db.fixed_ip_get')
def _test_floating_ip_init_host(self, fixed_get, floating_get,
public_interface, expected_arg):
floating_get.return_value = [
dict(test_floating_ip.fake_floating_ip,
interface='foo',
address='1.2.3.4'),
dict(test_floating_ip.fake_floating_ip,
interface='fakeiface',
address='1.2.3.5',
fixed_ip_id=1),
dict(test_floating_ip.fake_floating_ip,
interface='bar',
address='1.2.3.6',
fixed_ip_id=2),
]
def fixed_ip_get(_context, fixed_ip_id, get_network):
if fixed_ip_id == 1:
return dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network=test_network.fake_network)
raise exception.FixedIpNotFound(id=fixed_ip_id)
fixed_get.side_effect = fixed_ip_get
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.flags(public_interface=public_interface)
self.network.l3driver.add_floating_ip(netaddr.IPAddress('1.2.3.5'),
netaddr.IPAddress('1.2.3.4'),
expected_arg,
mox.IsA(objects.Network))
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
self.mox.VerifyAll()
def test_floating_ip_init_host_without_public_interface(self):
self._test_floating_ip_init_host(public_interface=False,
expected_arg='fakeiface')
def test_floating_ip_init_host_with_public_interface(self):
self._test_floating_ip_init_host(public_interface='fooiface',
expected_arg='fooiface')
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# floating ip that is associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
project_id=ctxt.project_id)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False,
host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
auto_assigned=True,
project_id=ctxt.project_id)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
# raises because auto_assigned floating IP cannot be disassociated
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
def test_ip_association_and_allocation_of_other_project(self, net_get,
fixed_get):
"""Makes sure that we cannot deallocaate or disassociate
a public ip of other project.
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
float_ip = db.floating_ip_create(context1.elevated(),
{'address': '1.2.3.4',
'project_id': context1.project_id})
float_addr = float_ip['address']
instance = db.instance_create(context1,
{'project_id': 'project1'})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['uuid']).address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
# Associate the IP with non-admin user context
self.assertRaises(exception.Forbidden,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.Forbidden,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.Forbidden,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed(self, fixed_update, net_get, fixed_get):
"""Verify that release is called properly.
Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return vifs[0]
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.mox.StubOutWithMock(linux_net, 'release_dhcp')
linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address,
'DE:AD:BE:EF:00:00')
self.mox.ReplayAll()
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
def test_deallocate_fixed_deleted(self):
# Verify doesn't deallocate deleted fixed_ip from deleted network.
def teardown_network_on_host(_context, network):
if network['id'] == 0:
raise test.TestingException()
self.stubs.Set(self.network, '_teardown_network_on_host',
teardown_network_on_host)
context1 = context.RequestContext('user', 'project1')
elevated = context1.elevated()
instance = db.instance_create(context1,
{'project_id': 'project1'})
network = db.network_create_safe(elevated, networks[0])
_fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fix_addr = _fix_addr.address
db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
elevated.read_deleted = 'yes'
delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
values = {'address': fix_addr,
'network_id': network.id,
'instance_uuid': delfixed['instance_uuid']}
db.fixed_ip_create(elevated, values)
elevated.read_deleted = 'no'
elevated.read_deleted = 'yes'
deallocate = self.network.deallocate_fixed_ip
self.assertRaises(test.TestingException, deallocate, context1,
fix_addr, 'fake')
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get):
"""Verify that deallocate doesn't raise when no vif is returned.
Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return None
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
fixed_update.return_value = fixed_get.return_value
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get):
# Verify IP is not deallocated if the security group refresh fails.
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = objects.FixedIP.associate_pool(elevated, 1,
instance['uuid'])
def fake_refresh(instance_uuid):
raise test.TestingException()
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
fake_refresh)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.assertRaises(test.TestingException,
self.network.deallocate_fixed_ip,
context1, str(fix_addr.address), 'fake')
self.assertFalse(fixed_update.called)
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class _TestDomainObject(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
self.__setattr__(k, v)
class FakeNetwork(object):
def __init__(self, **kwargs):
self.vlan = None
for k, v in kwargs.iteritems():
self.__setattr__(k, v)
def __getitem__(self, item):
return getattr(self, item)
class CommonNetworkTestCase(test.TestCase):
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.flags(ipv6_backend='rfc2462')
self.flags(use_local=True, group='conductor')
ipv6.reset_backend()
def test_validate_instance_zone_for_dns_domain(self):
domain = 'example.com'
az = 'test_az'
domains = {
domain: _TestDomainObject(
domain=domain,
availability_zone=az)}
def dnsdomain_get(context, instance_domain):
return domains.get(instance_domain)
self.stubs.Set(db, 'dnsdomain_get', dnsdomain_get)
fake_instance = {'uuid': FAKEUUID,
'availability_zone': az}
manager = network_manager.NetworkManager()
res = manager._validate_instance_zone_for_dns_domain(self.context,
fake_instance)
self.assertTrue(res)
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None,
extra_reserved=None):
return None
def test_get_instance_nw_info_client_exceptions(self):
manager = network_manager.NetworkManager()
self.mox.StubOutWithMock(manager.db,
'virtual_interface_get_by_instance')
manager.db.virtual_interface_get_by_instance(
self.context, FAKEUUID,
use_slave=False).AndRaise(exception.InstanceNotFound(
instance_id=FAKEUUID))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
manager.get_instance_nw_info,
self.context, FAKEUUID, 'fake_rxtx_factor', HOST)
@mock.patch('nova.db.instance_get')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_deallocate_for_instance_passes_host_info(self, fixed_get,
instance_get):
manager = fake_network.FakeNetworkManager()
db = manager.db
instance_get.return_value = fake_inst(uuid='ignoreduuid')
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network_id=123)]
manager.deallocate_for_instance(
ctx, instance=objects.Instance._from_db_object(self.context,
objects.Instance(), instance_get.return_value))
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host')
], manager.deallocate_fixed_ip_calls)
@mock.patch('nova.db.fixed_ip_get_by_instance')
@mock.patch('nova.db.fixed_ip_disassociate')
def test_remove_fixed_ip_from_instance(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
get.return_value = [
dict(test_fixed_ip.fake_fixed_ip, **x)
for x in manager.db.fixed_ip_get_by_instance(None,
FAKEUUID)]
manager.remove_fixed_ip_from_instance(self.context, FAKEUUID,
HOST,
'10.0.0.1')
self.assertEqual(manager.deallocate_called, '10.0.0.1')
disassociate.assert_called_once_with(self.context, '10.0.0.1')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_remove_fixed_ip_from_instance_bad_input(self, get):
manager = fake_network.FakeNetworkManager()
get.return_value = []
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, HOST, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/25', cidrs)
self.assertIn('192.168.0.128/25', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/24')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_smaller_subnet_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.9/25')]
# CidrConflict: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/25')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
get_all.return_value = [dict(test_network.fake_network, id=1,
cidr='192.168.2.9/29')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/27', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_all_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
in_use = [dict(test_network.fake_network, **values) for values in
[{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]]
get_all.return_value = in_use
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
3, 64, None, None, None, None, None)
# CidrConflict: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
cidr='192.168.0.0/24')]
# CidrConflict: cidr already in use
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', returned_cidrs)
self.assertIn('192.168.1.0/24', returned_cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_conflict_existing_supernet(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/8')]
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
# CidrConflict: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get_all')
def test_create_networks_cidr_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/24')]
args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 10, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip_regex(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
def test_get_instance_uuids_by_ipv6_regex(self, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
def _network_get(context, network_id, **args):
return dict(test_network.fake_network,
**manager.db.network_get(context, network_id))
network_get.side_effect = _network_get
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network, **networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(network['uuid'], uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
@mock.patch('nova.db.network_get_all')
def test_get_all_networks(self, get_all):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get_all.return_value = [dict(test_network.fake_network, **net)
for net in networks]
output = manager.get_all_networks(fake_context)
self.assertEqual(len(networks), 2)
self.assertEqual(output[0]['uuid'],
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
self.assertEqual(output[1]['uuid'],
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
@mock.patch('nova.db.network_get_by_uuid')
@mock.patch('nova.db.network_disassociate')
def test_disassociate_network(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
disassociate.return_value = True
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network,
**networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_disassociate_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
def _test_init_host_dynamic_fixed_range(self, net_manager):
self.flags(fake_network=True,
routing_source_ip='172.16.0.1',
metadata_host='172.16.0.1',
public_interface='eth1',
dmz_cidr=['10.0.3.0/24'])
binary_name = linux_net.get_binary_name()
# Stub out calls we don't want to really run, mock the db
self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
lambda *args: None)
self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
lambda *args: None)
self.mox.StubOutWithMock(db, 'network_get_all_by_host')
fake_networks = [dict(test_network.fake_network, **n)
for n in networks]
db.network_get_all_by_host(mox.IgnoreArg(),
mox.IgnoreArg()
).MultipleTimes().AndReturn(fake_networks)
self.mox.ReplayAll()
net_manager.init_host()
# Get the iptables rules that got created
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[0]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[0]['cidr'],
networks[0]['cidr']),
'[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[1]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[1]['cidr'],
networks[1]['cidr'])]
# Compare the expected rules against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
# Add an additional network and ensure the rules get configured
new_network = {'id': 2,
'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc',
'label': 'test2',
'injected': False,
'multi_host': False,
'cidr': '192.168.2.0/24',
'cidr_v6': '2001:dba::/64',
'gateway_v6': '2001:dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.2.1',
'dhcp_server': '192.168.2.1',
'broadcast': '192.168.2.255',
'dns1': '192.168.2.1',
'dns2': '192.168.2.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.2.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}
new_network_obj = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **new_network))
ctxt = context.get_admin_context()
net_manager._setup_network_on_host(ctxt, new_network_obj)
# Get the new iptables rules that got created from adding a new network
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
# Add the new expected rules to the old ones
expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, new_network['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
'! --ctstate DNAT -j ACCEPT' % (binary_name,
new_network['cidr'],
new_network['cidr'])]
# Compare the expected rules (with new network) against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
def test_flatdhcpmanager_dynamic_fixed_range(self):
"""Test FlatDHCPManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
def test_vlanmanager_dynamic_fixed_range(self):
"""Test VlanManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.TestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.flags(use_local=True, group='conductor')
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes.
"""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return test_network.fake_network
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(rval, address)
class TestFloatingIPManager(floating_ips.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
def setUp(self):
super(AllocateTestCase, self).setUp()
dns = 'nova.network.noop_dns_driver.NoopDNSDriver'
self.flags(instance_dns_manager=dns)
self.useFixture(test.SampleNetworks())
self.conductor = self.start_service(
'conductor', manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.network = self.start_service('network')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.user_context = context.RequestContext('testuser',
'testproject')
def test_allocate_for_instance(self):
address = "10.10.10.10"
self.flags(auto_assign_floating_ip=True)
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
inst = objects.Instance()
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create(self.context)
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.user_context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=None)
self.assertEqual(1, len(nw_info))
fixed_ip = nw_info.fixed_ips()[0]['address']
self.assertTrue(utils.is_valid_ipv4(fixed_ip))
self.network.deallocate_for_instance(self.context,
instance=inst)
def test_allocate_for_instance_illegal_network(self):
networks = db.network_get_all(self.context)
requested_networks = []
for network in networks:
# set all networks to other projects
db.network_update(self.context, network['id'],
{'host': self.network.host,
'project_id': 'otherid'})
requested_networks.append((network['uuid'], None))
# set the first network to our project
db.network_update(self.context, networks[0]['id'],
{'project_id': self.user_context.project_id})
inst = objects.Instance()
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create(self.context)
self.assertRaises(exception.NetworkNotFoundForProject,
self.network.allocate_for_instance, self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=self.context.project_id, macs=None,
requested_networks=requested_networks)
def test_allocate_for_instance_with_mac(self):
available_macs = set(['ca:fe:de:ad:be:ef'])
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
assigned_macs = [vif['address'] for vif in nw_info]
self.assertEqual(1, len(assigned_macs))
self.assertEqual(available_macs.pop(), assigned_macs[0])
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
host=self.network.host,
project_id=project_id)
def test_allocate_for_instance_not_enough_macs(self):
available_macs = set()
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
self.assertRaises(exception.VirtualInterfaceCreateException,
self.network.allocate_for_instance,
self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP."""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.service_get_by_host_and_topic')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_disassociate_floating_ip_multi_host_calls(self, floating_get,
service_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=12)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
service_get.return_value = test_service.fake_service
self.stubs.Set(self.network.servicegroup_api,
'service_is_up',
lambda _x: True)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_disassociate_floating_ip')
self.network.network_rpcapi._disassociate_floating_ip(
ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
self.mox.ReplayAll()
self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_associate_floating_ip_multi_host_calls(self, floating_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=None)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_associate_floating_ip')
self.network.network_rpcapi._associate_floating_ip(
ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
'instance-uuid')
self.mox.ReplayAll()
self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
def test_double_deallocation(self):
instance_ref = db.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_deallocate_floating_ip_quota_rollback(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake)
self.mox.StubOutWithMock(db, 'floating_ip_deallocate')
self.mox.StubOutWithMock(self.network,
'_floating_ip_owned_by_project')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
quota.QUOTAS.reserve(self.context,
floating_ips=-1,
project_id='testproject').AndReturn('fake-rsv')
self.network._floating_ip_owned_by_project(self.context,
mox.IgnoreArg())
db.floating_ip_deallocate(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
quota.QUOTAS.rollback(self.context, 'fake-rsv',
project_id='testproject')
self.mox.ReplayAll()
self.network.deallocate_floating_ip(self.context, '10.0.0.1')
def test_deallocation_deleted_instance(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance()
instance.project_id = self.project_id
instance.deleted = True
instance.create(self.context)
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
def test_deallocation_duplicate_floating_ip(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance()
instance.project_id = self.project_id
instance.create(self.context)
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10',
'deleted': True})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_get_by_address')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_start(self, floating_update, floating_get,
fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
floating_get.side_effect = fake_floating_ip_get_by_address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
def fake_clean_conntrack(fixed_ip):
if not str(fixed_ip) == "10.0.0.2":
raise exception.FixedIpInvalid(address=fixed_ip)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
fake_remove_floating_ip)
self.stubs.Set(self.network.driver, 'clean_conntrack',
fake_clean_conntrack)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_start(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
rxtx_factor=3,
project_id=self.project_id,
source='fake_source',
dest='fake_dest')
self.assertEqual(called['count'], 2)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_finish(self, floating_update, fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_add_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
fake_add_floating_ip)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_finish(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
host='fake_dest',
rxtx_factor=3,
project_id=self.project_id,
source='fake_source')
self.assertEqual(called['count'], 2)
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
zone1 = "testzone"
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_public_dns_domain, self.context,
domain1, zone1)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 2)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[1]['domain'], domain2)
self.assertEqual(domains[0]['project'], 'testproject')
self.assertEqual(domains[1]['project'], 'fakeproject')
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEqual(len(entries), 2)
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
# Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
# Create a VIF with aa:aa:aa:aa:aa:aa
crash_test_dummy_vif = {
'address': macs[1],
'instance_uuid': 'fake_uuid',
'network_id': 123,
'uuid': 'fake_uuid',
}
self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
# Hand out a collision first, then a legit MAC
def fake_gen_mac():
return macs.pop()
self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
# SQLite doesn't seem to honor the uniqueness constraint on the
# address column, so fake the collision-avoidance here
def fake_vif_save(vif):
if vif.address == crash_test_dummy_vif['address']:
raise db_exc.DBError("If you're smart, you'll retry!")
# NOTE(russellb) The VirtualInterface object requires an ID to be
# set, and we expect it to get set automatically when we do the
# save.
vif.id = 1
self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
# Attempt to add another and make sure that both MACs are consumed
# by the retry loop
self.network._add_virtual_interface(ctxt, 'fake_uuid', 123)
self.assertEqual(macs, [])
def test_deallocate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.deallocate_floating_ip,
self.context, '1.2.3.4')
def test_associate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.associate_floating_ip,
self.context, '1.2.3.4', '10.0.0.1')
def test_disassociate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.disassociate_floating_ip,
self.context, '1.2.3.4')
def test_get_floating_ip_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
exception.FloatingIpNotFound(id='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.get_floating_ip,
self.context, 'fake-id')
def _test_associate_floating_ip_failure(self, stdout, expected_exception):
def _fake_catchall(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
network=test_network.fake_network)
def _fake_add_floating_ip(*args, **kwargs):
raise processutils.ProcessExecutionError(stdout)
self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate',
_fake_catchall)
self.stubs.Set(self.network.db, 'floating_ip_disassociate',
_fake_catchall)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
_fake_add_floating_ip)
self.assertRaises(expected_exception,
self.network._associate_floating_ip, self.context,
'1.2.3.4', '1.2.3.5', '', '')
def test_associate_floating_ip_failure(self):
self._test_associate_floating_ip_failure(None,
processutils.ProcessExecutionError)
def test_associate_floating_ip_failure_interface_not_found(self):
self._test_associate_floating_ip_failure('Cannot find device',
exception.NoFloatingIpInterface)
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_private_dns_domain, self.context,
domain1, zone1)
self.network.create_private_dns_domain(context_admin, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 1)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[0]['availability_zone'], zone1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.TestCase):
"""Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
self.useFixture(test.ReplaceModule('ldap', fake_ldap))
dns_class = 'nova.network.ldapdns.LdapDNS'
self.driver = importutils.import_object(dns_class)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'associateddomain': ['root'],
'dc': ['root']}
self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
super(LdapDNSTestCase, self).tearDown()
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(len(domains), 2)
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
| srajag/nova | nova/tests/network/test_manager.py | Python | apache-2.0 | 143,411 | [
"FEFF"
] | 807e7161a92ae80f9f507b0f8d4c08980f77c335f2086cbc46c8992e4847f2c4 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import os
from django.contrib.auth.models import Permission
from django.conf import settings
from django.core import mail
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.test import TestCase, skipUnlessDBFeature
from django.test.client import Client
from django.test.utils import override_settings
from django.utils import timezone
from django.utils.translation.trans_real import get_supported_language_variant
from pybb import permissions, views as pybb_views
from pybb.templatetags.pybb_tags import pybb_is_topic_unread, pybb_topic_unread, pybb_forum_unread, \
pybb_get_latest_topics, pybb_get_latest_posts
from pybb import compat, util
User = compat.get_user_model()
username_field = compat.get_username_field()
try:
from lxml import html
except ImportError:
raise Exception('PyBB requires lxml for self testing')
from pybb import defaults
from pybb.models import Category, Forum, Topic, Post, PollAnswer, TopicReadTracker, \
ForumReadTracker, ForumSubscription
Profile = util.get_pybb_profile_model()
__author__ = 'zeus'
class SharedTestModule(object):
def create_user(self):
self.user = User.objects.create_user('zeus', 'zeus@localhost', 'zeus')
def login_client(self, username='zeus', password='zeus'):
self.client.login(username=username, password=password)
def create_initial(self, post=True):
self.category = Category.objects.create(name='foo')
self.forum = Forum.objects.create(name='xfoo', description='bar', category=self.category)
self.topic = Topic.objects.create(name='etopic', forum=self.forum, user=self.user)
if post:
self.post = Post.objects.create(topic=self.topic, user=self.user, body='bbcode [b]test[/b]')
def get_form_values(self, response, form="post-form"):
return dict(html.fromstring(response.content).xpath('//form[@class="%s"]' % form)[0].form_values())
def get_with_user(self, url, username=None, password=None):
if username:
self.client.login(username=username, password=password)
r = self.client.get(url)
self.client.logout()
return r
class FeaturesTest(TestCase, SharedTestModule):
def setUp(self):
self.ORIG_PYBB_ENABLE_ANONYMOUS_POST = defaults.PYBB_ENABLE_ANONYMOUS_POST
self.ORIG_PYBB_PREMODERATION = defaults.PYBB_PREMODERATION
defaults.PYBB_PREMODERATION = False
defaults.PYBB_ENABLE_ANONYMOUS_POST = False
self.create_user()
self.create_initial()
mail.outbox = []
def test_base(self):
# Check index page
Forum.objects.create(name='xfoo1', description='bar1', category=self.category, parent=self.forum)
url = reverse('pybb:index')
response = self.client.get(url)
parser = html.HTMLParser(encoding='utf8')
tree = html.fromstring(response.content, parser=parser)
self.assertContains(response, 'foo')
self.assertContains(response, self.forum.get_absolute_url())
self.assertTrue(defaults.PYBB_DEFAULT_TITLE in tree.xpath('//title')[0].text_content())
self.assertEqual(len(response.context['categories']), 1)
self.assertEqual(len(response.context['categories'][0].forums_accessed), 1)
def test_forum_page(self):
# Check forum page
response = self.client.get(self.forum.get_absolute_url())
self.assertEqual(response.context['forum'], self.forum)
tree = html.fromstring(response.content)
self.assertTrue(tree.xpath('//a[@href="%s"]' % self.topic.get_absolute_url()))
self.assertTrue(tree.xpath('//title[contains(text(),"%s")]' % self.forum.name))
self.assertFalse(tree.xpath('//a[contains(@href,"?page=")]'))
self.assertFalse(response.context['is_paginated'])
def test_category_page(self):
Forum.objects.create(name='xfoo1', description='bar1', category=self.category, parent=self.forum)
response = self.client.get(self.category.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.forum.get_absolute_url())
self.assertEqual(len(response.context['object'].forums_accessed), 1)
def test_profile_language_default(self):
user = User.objects.create_user(username='user2', password='user2',
email='user2@example.com')
self.assertEqual(util.get_pybb_profile(user).language,
get_supported_language_variant(settings.LANGUAGE_CODE))
def test_profile_edit(self):
# Self profile edit
self.login_client()
response = self.client.get(reverse('pybb:edit_profile'))
self.assertEqual(response.status_code, 200)
values = self.get_form_values(response, 'profile-edit')
values['signature'] = 'test signature'
response = self.client.post(reverse('pybb:edit_profile'), data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.client.get(self.post.get_absolute_url(), follow=True)
self.assertContains(response, 'test signature')
# Test empty signature
values['signature'] = ''
response = self.client.post(reverse('pybb:edit_profile'), data=values, follow=True)
self.assertEqual(len(response.context['form'].errors), 0)
def test_pagination_and_topic_addition(self):
for i in range(0, defaults.PYBB_FORUM_PAGE_SIZE + 3):
topic = Topic(name='topic_%s_' % i, forum=self.forum, user=self.user)
topic.save()
url = self.forum.get_absolute_url()
response = self.client.get(url)
self.assertEqual(len(response.context['topic_list']), defaults.PYBB_FORUM_PAGE_SIZE)
self.assertTrue(response.context['is_paginated'])
self.assertEqual(response.context['paginator'].num_pages,
int((defaults.PYBB_FORUM_PAGE_SIZE + 3) / defaults.PYBB_FORUM_PAGE_SIZE) + 1)
def test_bbcode_and_topic_title(self):
response = self.client.get(self.topic.get_absolute_url())
tree = html.fromstring(response.content)
self.assertTrue(self.topic.name in tree.xpath('//title')[0].text_content())
self.assertContains(response, self.post.body_html)
self.assertContains(response, 'bbcode <strong>test</strong>')
def test_topic_addition(self):
self.login_client()
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'new topic test'
values['name'] = 'new topic name'
values['poll_type'] = 0
response = self.client.post(add_topic_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(Topic.objects.filter(name='new topic name').exists())
def test_topic_read_before_post_addition(self):
"""
Test if everything is okay when :
- user A create the topic
- but before associated post is created, user B display the forum
"""
topic = Topic(name='xtopic', forum=self.forum, user=self.user)
topic.save()
#topic is saved, but post is not yet created at this time
#an other user is displaing the forum before the post creation
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client = Client()
client.login(username='ann', password='ann')
self.assertEqual(client.get(topic.get_absolute_url()).status_code, 404)
self.assertEqual(topic.forum.post_count, 1)
self.assertEqual(topic.forum.topic_count, 1)
#do we need to correct this ?
#self.assertEqual(topic.forum.topics.count(), 1)
self.assertEqual(topic.post_count, 0)
#Now, TopicReadTracker is not created because the topic detail view raise a 404
#If its creation is not finished. So we create it manually to add a test, just in case
#we have an other way where TopicReadTracker could be set for a not complete topic.
TopicReadTracker.objects.create(user=user_ann, topic=topic, time_stamp=topic.created)
#before correction, raised TypeError: can't compare datetime.datetime to NoneType
pybb_topic_unread([topic,], user_ann)
#before correction, raised IndexError: list index out of range
last_post = topic.last_post
#post creation now.
Post(topic=topic, user=self.user, body='one').save()
self.assertEqual(client.get(topic.get_absolute_url()).status_code, 200)
self.assertEqual(topic.forum.post_count, 2)
self.assertEqual(topic.forum.topic_count, 2)
self.assertEqual(topic.forum.topics.count(), 2)
self.assertEqual(topic.post_count, 1)
def test_post_deletion(self):
post = Post(topic=self.topic, user=self.user, body='bbcode [b]test[/b]')
post.save()
post.delete()
Topic.objects.get(id=self.topic.id)
Forum.objects.get(id=self.forum.id)
def test_topic_deletion(self):
topic = Topic(name='xtopic', forum=self.forum, user=self.user)
topic.save()
post = Post(topic=topic, user=self.user, body='one')
post.save()
post = Post(topic=topic, user=self.user, body='two')
post.save()
post.delete()
Topic.objects.get(id=topic.id)
Forum.objects.get(id=self.forum.id)
topic.delete()
Forum.objects.get(id=self.forum.id)
def test_forum_updated(self):
topic = Topic(name='xtopic', forum=self.forum, user=self.user)
topic.save()
post = Post(topic=topic, user=self.user, body='one')
post.save()
post = Post.objects.get(id=post.id)
self.assertTrue(self.forum.updated == post.created)
def test_read_tracking(self):
topic = Topic(name='xtopic', forum=self.forum, user=self.user)
topic.save()
post = Post(topic=topic, user=self.user, body='one')
post.save()
client = Client()
client.login(username='zeus', password='zeus')
# Topic status
tree = html.fromstring(client.get(topic.forum.get_absolute_url()).content)
self.assertTrue(tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.get_absolute_url()))
# Forum status
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertTrue(
tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.forum.get_absolute_url()))
# Visit it
client.get(topic.get_absolute_url())
# Topic status - readed
tree = html.fromstring(client.get(topic.forum.get_absolute_url()).content)
# Visit others
for t in topic.forum.topics.all():
client.get(t.get_absolute_url())
self.assertFalse(tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.get_absolute_url()))
# Forum status - readed
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertFalse(
tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.forum.get_absolute_url()))
# Post message
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': topic.id})
response = client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test tracking'
response = client.post(add_post_url, values, follow=True)
self.assertContains(response, 'test tracking')
# Topic status - readed
tree = html.fromstring(client.get(topic.forum.get_absolute_url()).content)
self.assertFalse(tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.get_absolute_url()))
# Forum status - readed
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertFalse(
tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.forum.get_absolute_url()))
post = Post(topic=topic, user=self.user, body='one')
post.save()
client.get(reverse('pybb:mark_all_as_read'))
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertFalse(
tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.forum.get_absolute_url()))
# Empty forum - readed
f = Forum(name='empty', category=self.category)
f.save()
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertFalse(tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % f.get_absolute_url()))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_read_tracking_multi_user(self):
topic_1 = self.topic
topic_2 = Topic(name='topic_2', forum=self.forum, user=self.user)
topic_2.save()
Post(topic=topic_2, user=self.user, body='one').save()
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
user_bob = User.objects.create_user('bob', 'bob@localhost', 'bob')
client_bob = Client()
client_bob.login(username='bob', password='bob')
# Two topics, each with one post. everything is unread, so the db should reflect that:
self.assertEqual(TopicReadTracker.objects.all().count(), 0)
self.assertEqual(ForumReadTracker.objects.all().count(), 0)
# user_ann reads topic_1, she should get one topic read tracker, there should be no forum read trackers
client_ann.get(topic_1.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=user_ann).count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=user_ann, topic=topic_1).count(), 1)
self.assertEqual(ForumReadTracker.objects.all().count(), 0)
# user_bob reads topic_1, he should get one topic read tracker, there should be no forum read trackers
client_bob.get(topic_1.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 2)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob).count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob, topic=topic_1).count(), 1)
# user_bob reads topic_2, he should get a forum read tracker,
# there should be no topic read trackers for user_bob
client_bob.get(topic_2.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 1)
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
self.assertEqual(ForumReadTracker.objects.filter(user=user_bob).count(), 1)
self.assertEqual(ForumReadTracker.objects.filter(user=user_bob, forum=self.forum).count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob).count(), 0)
self.assertListEqual([t.unread for t in pybb_topic_unread([topic_1, topic_2], user_bob)], [False, False])
# user_ann creates topic_3, they should get a new topic read tracker in the db
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
response = client_ann.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'topic_3'
values['name'] = 'topic_3'
values['poll_type'] = 0
response = client_ann.post(add_topic_url, data=values, follow=True)
self.assertEqual(TopicReadTracker.objects.all().count(), 2)
self.assertEqual(TopicReadTracker.objects.filter(user=user_ann).count(), 2)
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
topic_3 = Topic.objects.order_by('-updated', '-id')[0]
self.assertEqual(topic_3.name, 'topic_3')
# user_ann posts to topic_1, a topic they've already read, no new trackers should be created
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': topic_1.id})
response = client_ann.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test tracking'
response = client_ann.post(add_post_url, values, follow=True)
self.assertEqual(TopicReadTracker.objects.all().count(), 2)
self.assertEqual(TopicReadTracker.objects.filter(user=user_ann).count(), 2)
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
# user_bob has two unread topics, 'topic_1' and 'topic_3'.
# This is because user_ann created a new topic and posted to an existing topic,
# after user_bob got his forum read tracker.
# user_bob reads 'topic_1'
# user_bob gets a new topic read tracker, and the existing forum read tracker stays the same.
# 'topic_3' appears unread for user_bob
#
previous_time = ForumReadTracker.objects.all()[0].time_stamp
client_bob.get(topic_1.get_absolute_url())
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
self.assertEqual(ForumReadTracker.objects.all()[0].time_stamp, previous_time)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob).count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=user_ann).count(), 2)
self.assertEqual(TopicReadTracker.objects.all().count(), 3)
# user_bob reads the last unread topic, 'topic_3'.
# user_bob's existing forum read tracker updates and his topic read tracker disappears
#
previous_time = ForumReadTracker.objects.all()[0].time_stamp
client_bob.get(topic_3.get_absolute_url())
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
self.assertGreater(ForumReadTracker.objects.all()[0].time_stamp, previous_time)
self.assertEqual(TopicReadTracker.objects.all().count(), 2)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob).count(), 0)
def test_read_tracking_multi_forum(self):
topic_1 = self.topic
topic_2 = Topic(name='topic_2', forum=self.forum, user=self.user)
topic_2.save()
Post(topic=topic_2, user=self.user, body='one').save()
forum_1 = self.forum
forum_2 = Forum(name='forum_2', description='bar', category=self.category)
forum_2.save()
Topic(name='garbage', forum=forum_2, user=self.user).save()
client = Client()
client.login(username='zeus', password='zeus')
# everything starts unread
self.assertEqual(ForumReadTracker.objects.all().count(), 0)
self.assertEqual(TopicReadTracker.objects.all().count(), 0)
# user reads topic_1, they should get one topic read tracker, there should be no forum read trackers
client.get(topic_1.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=self.user).count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=self.user, topic=topic_1).count(), 1)
# user reads topic_2, they should get a forum read tracker,
# there should be no topic read trackers for the user
client.get(topic_2.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 0)
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
self.assertEqual(ForumReadTracker.objects.filter(user=self.user).count(), 1)
self.assertEqual(ForumReadTracker.objects.filter(user=self.user, forum=self.forum).count(), 1)
def test_read_tracker_after_posting(self):
client = Client()
client.login(username='zeus', password='zeus')
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test tracking'
response = client.post(add_post_url, values, follow=True)
# after posting in topic it should be readed
# because there is only one topic, so whole forum should be marked as readed
self.assertEqual(TopicReadTracker.objects.filter(user=self.user, topic=self.topic).count(), 0)
self.assertEqual(ForumReadTracker.objects.filter(user=self.user, forum=self.forum).count(), 1)
def test_pybb_is_topic_unread_filter(self):
forum_1 = self.forum
topic_1 = self.topic
topic_2 = Topic.objects.create(name='topic_2', forum=forum_1, user=self.user)
forum_2 = Forum.objects.create(name='forum_2', description='forum2', category=self.category)
topic_3 = Topic.objects.create(name='topic_2', forum=forum_2, user=self.user)
Post(topic=topic_1, user=self.user, body='one').save()
Post(topic=topic_2, user=self.user, body='two').save()
Post(topic=topic_3, user=self.user, body='three').save()
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
# Two topics, each with one post. everything is unread, so the db should reflect that:
self.assertTrue(pybb_is_topic_unread(topic_1, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_2, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_3, user_ann))
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2, topic_3], user_ann)],
[True, True, True])
client_ann.get(topic_1.get_absolute_url())
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
topic_3 = Topic.objects.get(id=topic_3.id)
self.assertFalse(pybb_is_topic_unread(topic_1, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_2, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_3, user_ann))
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2, topic_3], user_ann)],
[False, True, True])
client_ann.get(topic_2.get_absolute_url())
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
topic_3 = Topic.objects.get(id=topic_3.id)
self.assertFalse(pybb_is_topic_unread(topic_1, user_ann))
self.assertFalse(pybb_is_topic_unread(topic_2, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_3, user_ann))
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2, topic_3], user_ann)],
[False, False, True])
client_ann.get(topic_3.get_absolute_url())
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
topic_3 = Topic.objects.get(id=topic_3.id)
self.assertFalse(pybb_is_topic_unread(topic_1, user_ann))
self.assertFalse(pybb_is_topic_unread(topic_2, user_ann))
self.assertFalse(pybb_is_topic_unread(topic_3, user_ann))
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2, topic_3], user_ann)],
[False, False, False])
def test_is_forum_unread_filter(self):
Forum.objects.all().delete()
forum_parent = Forum.objects.create(name='f1', category=self.category)
forum_child1 = Forum.objects.create(name='f2', category=self.category, parent=forum_parent)
forum_child2 = Forum.objects.create(name='f3', category=self.category, parent=forum_parent)
topic_1 = Topic.objects.create(name='topic_1', forum=forum_parent, user=self.user)
topic_2 = Topic.objects.create(name='topic_2', forum=forum_child1, user=self.user)
topic_3 = Topic.objects.create(name='topic_3', forum=forum_child2, user=self.user)
Post(topic=topic_1, user=self.user, body='one').save()
Post(topic=topic_2, user=self.user, body='two').save()
Post(topic=topic_3, user=self.user, body='three').save()
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
forum_parent = Forum.objects.get(id=forum_parent.id)
forum_child1 = Forum.objects.get(id=forum_child1.id)
forum_child2 = Forum.objects.get(id=forum_child2.id)
self.assertListEqual([f.unread for f in pybb_forum_unread([forum_parent, forum_child1, forum_child2], user_ann)],
[True, True, True])
# unless we read parent topic, there is unreaded topics in child forums
client_ann.get(topic_1.get_absolute_url())
forum_parent = Forum.objects.get(id=forum_parent.id)
forum_child1 = Forum.objects.get(id=forum_child1.id)
forum_child2 = Forum.objects.get(id=forum_child2.id)
self.assertListEqual([f.unread for f in pybb_forum_unread([forum_parent, forum_child1, forum_child2], user_ann)],
[True, True, True])
# still unreaded topic in one of the child forums
client_ann.get(topic_2.get_absolute_url())
forum_parent = Forum.objects.get(id=forum_parent.id)
forum_child1 = Forum.objects.get(id=forum_child1.id)
forum_child2 = Forum.objects.get(id=forum_child2.id)
self.assertListEqual([f.unread for f in pybb_forum_unread([forum_parent, forum_child1, forum_child2], user_ann)],
[True, False, True])
# all topics readed
client_ann.get(topic_3.get_absolute_url())
forum_parent = Forum.objects.get(id=forum_parent.id)
forum_child1 = Forum.objects.get(id=forum_child1.id)
forum_child2 = Forum.objects.get(id=forum_child2.id)
self.assertListEqual([f.unread for f in pybb_forum_unread([forum_parent, forum_child1, forum_child2], user_ann)],
[False, False, False])
@skipUnlessDBFeature('supports_microsecond_precision')
def test_read_tracker_when_topics_forum_changed(self):
forum_1 = Forum.objects.create(name='f1', description='bar', category=self.category)
forum_2 = Forum.objects.create(name='f2', description='bar', category=self.category)
topic_1 = Topic.objects.create(name='t1', forum=forum_1, user=self.user)
topic_2 = Topic.objects.create(name='t2', forum=forum_2, user=self.user)
Post.objects.create(topic=topic_1, user=self.user, body='one')
Post.objects.create(topic=topic_2, user=self.user, body='two')
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
# Everything is unread
self.assertListEqual([t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)], [True, True])
self.assertListEqual([t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)], [True, True])
# read all
client_ann.get(reverse('pybb:mark_all_as_read'))
self.assertListEqual([t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)], [False, False])
self.assertListEqual([t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)], [False, False])
post = Post.objects.create(topic=topic_1, user=self.user, body='three')
post = Post.objects.get(id=post.id) # get post with timestamp from DB
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
self.assertEqual(topic_1.updated, post.updated or post.created)
self.assertEqual(forum_1.updated, post.updated or post.created)
self.assertListEqual([t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)], [True, False])
self.assertListEqual([t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)], [True, False])
post.topic = topic_2
post.save()
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
forum_1 = Forum.objects.get(id=forum_1.id)
forum_2 = Forum.objects.get(id=forum_2.id)
self.assertEqual(topic_2.updated, post.updated or post.created)
self.assertEqual(forum_2.updated, post.updated or post.created)
self.assertListEqual([t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)], [False, True])
self.assertListEqual([t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)], [False, True])
topic_2.forum = forum_1
topic_2.save()
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
forum_1 = Forum.objects.get(id=forum_1.id)
forum_2 = Forum.objects.get(id=forum_2.id)
self.assertEqual(forum_1.updated, post.updated or post.created)
self.assertListEqual([t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)], [False, True])
self.assertListEqual([t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)], [True, False])
@skipUnlessDBFeature('supports_microsecond_precision')
def test_open_first_unread_post(self):
forum_1 = self.forum
topic_1 = Topic.objects.create(name='topic_1', forum=forum_1, user=self.user)
topic_2 = Topic.objects.create(name='topic_2', forum=forum_1, user=self.user)
post_1_1 = Post.objects.create(topic=topic_1, user=self.user, body='1_1')
post_1_2 = Post.objects.create(topic=topic_1, user=self.user, body='1_2')
post_2_1 = Post.objects.create(topic=topic_2, user=self.user, body='2_1')
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
response = client_ann.get(topic_1.get_absolute_url(), data={'first-unread': 1}, follow=True)
self.assertRedirects(response, '%s?page=%d#post-%d' % (topic_1.get_absolute_url(), 1, post_1_1.id))
response = client_ann.get(topic_1.get_absolute_url(), data={'first-unread': 1}, follow=True)
self.assertRedirects(response, '%s?page=%d#post-%d' % (topic_1.get_absolute_url(), 1, post_1_2.id))
response = client_ann.get(topic_2.get_absolute_url(), data={'first-unread': 1}, follow=True)
self.assertRedirects(response, '%s?page=%d#post-%d' % (topic_2.get_absolute_url(), 1, post_2_1.id))
post_1_3 = Post.objects.create(topic=topic_1, user=self.user, body='1_3')
post_1_4 = Post.objects.create(topic=topic_1, user=self.user, body='1_4')
response = client_ann.get(topic_1.get_absolute_url(), data={'first-unread': 1}, follow=True)
self.assertRedirects(response, '%s?page=%d#post-%d' % (topic_1.get_absolute_url(), 1, post_1_3.id))
def test_latest_topics(self):
topic_1 = self.topic
topic_1.updated = timezone.now()
topic_1.save()
topic_2 = Topic.objects.create(name='topic_2', forum=self.forum, user=self.user)
topic_2.updated = timezone.now() + datetime.timedelta(days=-1)
topic_2.save()
category_2 = Category.objects.create(name='cat2')
forum_2 = Forum.objects.create(name='forum_2', category=category_2)
topic_3 = Topic.objects.create(name='topic_3', forum=forum_2, user=self.user)
topic_3.updated = timezone.now() + datetime.timedelta(days=-2)
topic_3.save()
self.login_client()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertEqual(response.status_code, 200)
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2, topic_3])
topic_2.forum.hidden = True
topic_2.forum.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_3])
topic_2.forum.hidden = False
topic_2.forum.save()
category_2.hidden = True
category_2.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2])
topic_2.forum.hidden = False
topic_2.forum.save()
category_2.hidden = False
category_2.save()
topic_1.on_moderation = True
topic_1.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2, topic_3])
topic_1.user = User.objects.create_user('another', 'another@localhost', 'another')
topic_1.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_2, topic_3])
topic_1.forum.moderators.add(self.user)
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2, topic_3])
topic_1.forum.moderators.remove(self.user)
self.user.is_superuser = True
self.user.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2, topic_3])
self.client.logout()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_2, topic_3])
def test_hidden(self):
client = Client()
category = Category(name='hcat', hidden=True)
category.save()
forum_in_hidden = Forum(name='in_hidden', category=category)
forum_in_hidden.save()
topic_in_hidden = Topic(forum=forum_in_hidden, name='in_hidden', user=self.user)
topic_in_hidden.save()
forum_hidden = Forum(name='hidden', category=self.category, hidden=True)
forum_hidden.save()
topic_hidden = Topic(forum=forum_hidden, name='hidden', user=self.user)
topic_hidden.save()
post_hidden = Post(topic=topic_hidden, user=self.user, body='hidden')
post_hidden.save()
post_in_hidden = Post(topic=topic_in_hidden, user=self.user, body='hidden')
post_in_hidden.save()
self.assertFalse(category.id in [c.id for c in client.get(reverse('pybb:index')).context['categories']])
self.assertEqual(client.get(category.get_absolute_url()).status_code, 302)
self.assertEqual(client.get(forum_in_hidden.get_absolute_url()).status_code, 302)
self.assertEqual(client.get(topic_in_hidden.get_absolute_url()).status_code, 302)
self.assertNotContains(client.get(reverse('pybb:index')), forum_hidden.get_absolute_url())
self.assertNotContains(client.get(reverse('pybb:feed_topics')), topic_hidden.get_absolute_url())
self.assertNotContains(client.get(reverse('pybb:feed_topics')), topic_in_hidden.get_absolute_url())
self.assertNotContains(client.get(reverse('pybb:feed_posts')), post_hidden.get_absolute_url())
self.assertNotContains(client.get(reverse('pybb:feed_posts')), post_in_hidden.get_absolute_url())
self.assertEqual(client.get(forum_hidden.get_absolute_url()).status_code, 302)
self.assertEqual(client.get(topic_hidden.get_absolute_url()).status_code, 302)
user = User.objects.create_user('someguy', 'email@abc.xyz', 'password')
client.login(username='someguy', password='password')
response = client.get(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}))
self.assertEqual(response.status_code, 200, response)
response = client.get(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}), data={'quote_id': post_hidden.id})
self.assertEqual(response.status_code, 403, response)
client.login(username='zeus', password='zeus')
self.assertFalse(category.id in [c.id for c in client.get(reverse('pybb:index')).context['categories']])
self.assertNotContains(client.get(reverse('pybb:index')), forum_hidden.get_absolute_url())
self.assertEqual(client.get(category.get_absolute_url()).status_code, 403)
self.assertEqual(client.get(forum_in_hidden.get_absolute_url()).status_code, 403)
self.assertEqual(client.get(topic_in_hidden.get_absolute_url()).status_code, 403)
self.assertEqual(client.get(forum_hidden.get_absolute_url()).status_code, 403)
self.assertEqual(client.get(topic_hidden.get_absolute_url()).status_code, 403)
self.user.is_staff = True
self.user.save()
self.assertTrue(category.id in [c.id for c in client.get(reverse('pybb:index')).context['categories']])
self.assertContains(client.get(reverse('pybb:index')), forum_hidden.get_absolute_url())
self.assertEqual(client.get(category.get_absolute_url()).status_code, 200)
self.assertEqual(client.get(forum_in_hidden.get_absolute_url()).status_code, 200)
self.assertEqual(client.get(topic_in_hidden.get_absolute_url()).status_code, 200)
self.assertEqual(client.get(forum_hidden.get_absolute_url()).status_code, 200)
self.assertEqual(client.get(topic_hidden.get_absolute_url()).status_code, 200)
def test_inactive(self):
self.login_client()
url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = self.client.get(url)
values = self.get_form_values(response)
values['body'] = 'test ban'
response = self.client.post(url, values, follow=True)
self.assertEqual(len(Post.objects.filter(body='test ban')), 1)
self.user.is_active = False
self.user.save()
values['body'] = 'test ban 2'
self.client.post(url, values, follow=True)
self.assertEqual(len(Post.objects.filter(body='test ban 2')), 0)
def get_csrf(self, form):
return form.xpath('//input[@name="csrfmiddlewaretoken"]/@value')[0]
def test_csrf(self):
client = Client(enforce_csrf_checks=True)
client.login(username='zeus', password='zeus')
post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = client.get(post_url)
values = self.get_form_values(response)
del values['csrfmiddlewaretoken']
response = client.post(post_url, values, follow=True)
self.assertNotEqual(response.status_code, 200)
response = client.get(self.topic.get_absolute_url())
values = self.get_form_values(response)
response = client.post(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}), values, follow=True)
self.assertEqual(response.status_code, 200)
def test_user_blocking(self):
user = User.objects.create_user('test', 'test@localhost', 'test')
topic = Topic.objects.create(name='topic', forum=self.forum, user=user)
p1 = Post.objects.create(topic=topic, user=user, body='bbcode [b]test[/b]')
p2 = Post.objects.create(topic=topic, user=user, body='bbcode [b]test[/b]')
self.user.is_superuser = True
self.user.save()
self.login_client()
response = self.client.get(reverse('pybb:block_user', args=[user.username]), follow=True)
self.assertEqual(response.status_code, 405)
response = self.client.post(reverse('pybb:block_user', args=[user.username]), follow=True)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=user.username)
self.assertFalse(user.is_active)
self.assertEqual(Topic.objects.filter().count(), 2)
self.assertEqual(Post.objects.filter(user=user).count(), 2)
user.is_active = True
user.save()
self.assertEqual(Topic.objects.count(), 2)
response = self.client.post(reverse('pybb:block_user', args=[user.username]),
data={'block_and_delete_messages': 'block_and_delete_messages'}, follow=True)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=user.username)
self.assertFalse(user.is_active)
self.assertEqual(Topic.objects.count(), 1)
self.assertEqual(Post.objects.filter(user=user).count(), 0)
def test_user_unblocking(self):
user = User.objects.create_user('test', 'test@localhost', 'test')
user.is_active=False
user.save()
self.user.is_superuser = True
self.user.save()
self.login_client()
response = self.client.get(reverse('pybb:unblock_user', args=[user.username]), follow=True)
self.assertEqual(response.status_code, 405)
response = self.client.post(reverse('pybb:unblock_user', args=[user.username]), follow=True)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=user.username)
self.assertTrue(user.is_active)
def test_ajax_preview(self):
self.login_client()
response = self.client.post(reverse('pybb:post_ajax_preview'), data={'data': '[b]test bbcode ajax preview[/b]'})
self.assertContains(response, '<strong>test bbcode ajax preview</strong>')
def test_headline(self):
self.forum.headline = 'test <b>headline</b>'
self.forum.save()
client = Client()
self.assertContains(client.get(self.forum.get_absolute_url()), 'test <b>headline</b>')
def test_quote(self):
self.login_client()
response = self.client.get(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}),
data={'quote_id': self.post.id, 'body': 'test tracking'}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.post.body)
def test_edit_post(self):
self.login_client()
edit_post_url = reverse('pybb:edit_post', kwargs={'pk': self.post.id})
response = self.client.get(edit_post_url)
self.assertEqual(response.status_code, 200)
self.assertIsNone(Post.objects.get(id=self.post.id).updated)
tree = html.fromstring(response.content)
values = dict(tree.xpath('//form[@method="post"]')[0].form_values())
values['body'] = 'test edit'
response = self.client.post(edit_post_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Post.objects.get(pk=self.post.id).body, 'test edit')
response = self.client.get(self.post.get_absolute_url(), follow=True)
self.assertContains(response, 'test edit')
self.assertIsNotNone(Post.objects.get(id=self.post.id).updated)
# Check admin form
orig_conf = defaults.PYBB_ENABLE_ADMIN_POST_FORM
self.user.is_staff = True
self.user.save()
defaults.PYBB_ENABLE_ADMIN_POST_FORM = False
response = self.client.get(edit_post_url)
self.assertEqual(response.status_code, 200)
tree = html.fromstring(response.content)
values = dict(tree.xpath('//form[@method="post"]')[0].form_values())
self.assertNotIn('login', values)
values['body'] = 'test edit'
values['login'] = 'new_login'
response = self.client.post(edit_post_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test edit')
self.assertNotContains(response, 'new_login')
defaults.PYBB_ENABLE_ADMIN_POST_FORM = True
response = self.client.get(edit_post_url)
self.assertEqual(response.status_code, 200)
tree = html.fromstring(response.content)
values = dict(tree.xpath('//form[@method="post"]')[0].form_values())
self.assertIn('login', values)
values['body'] = 'test edit 2'
values['login'] = 'new_login 2'
response = self.client.post(edit_post_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test edit 2')
self.assertContains(response, 'new_login 2')
defaults.PYBB_ENABLE_ADMIN_POST_FORM = orig_conf
def test_admin_post_add(self):
self.user.is_staff = True
self.user.save()
self.login_client()
response = self.client.post(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}),
data={'quote_id': self.post.id, 'body': 'test admin post', 'user': 'zeus'},
follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test admin post')
def test_stick(self):
self.user.is_superuser = True
self.user.save()
self.login_client()
self.assertEqual(
self.client.get(reverse('pybb:stick_topic', kwargs={'pk': self.topic.id}), follow=True).status_code, 200)
self.assertEqual(
self.client.get(reverse('pybb:unstick_topic', kwargs={'pk': self.topic.id}), follow=True).status_code, 200)
def test_delete_view(self):
post = Post(topic=self.topic, user=self.user, body='test to delete')
post.save()
self.user.is_superuser = True
self.user.save()
self.login_client()
response = self.client.post(reverse('pybb:delete_post', args=[post.id]), follow=True)
self.assertEqual(response.status_code, 200)
# Check that topic and forum exists ;)
self.assertEqual(Topic.objects.filter(id=self.topic.id).count(), 1)
self.assertEqual(Forum.objects.filter(id=self.forum.id).count(), 1)
# Delete topic
response = self.client.post(reverse('pybb:delete_post', args=[self.post.id]), follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Post.objects.filter(id=self.post.id).count(), 0)
self.assertEqual(Topic.objects.filter(id=self.topic.id).count(), 0)
self.assertEqual(Forum.objects.filter(id=self.forum.id).count(), 1)
def test_open_close(self):
self.user.is_superuser = True
self.user.save()
self.login_client()
add_post_url = reverse('pybb:add_post', args=[self.topic.id])
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test closed'
response = self.client.get(reverse('pybb:close_topic', args=[self.topic.id]), follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 403)
response = self.client.get(reverse('pybb:open_topic', args=[self.topic.id]), follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
def test_subscription(self):
user2 = User.objects.create_user(username='user2', password='user2', email='user2@someserver.com')
user3 = User.objects.create_user(username='user3', password='user3', email='user3@example.com')
client = Client()
client.login(username='user2', password='user2')
subscribe_url = reverse('pybb:add_subscription', args=[self.topic.id])
response = client.get(self.topic.get_absolute_url())
subscribe_links = html.fromstring(response.content).xpath('//a[@href="%s"]' % subscribe_url)
self.assertEqual(len(subscribe_links), 1)
response = client.get(subscribe_url, follow=True)
self.assertEqual(response.status_code, 200)
self.assertIn(user2, self.topic.subscribers.all())
self.topic.subscribers.add(user3)
# create a new reply (with another user)
self.client.login(username='zeus', password='zeus')
add_post_url = reverse('pybb:add_post', args=[self.topic.id])
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test subscribtion юникод'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_post = Post.objects.order_by('-id')[0]
# there should only be one email in the outbox (to user2) because @example.com are ignored
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to[0], user2.email)
self.assertTrue([msg for msg in mail.outbox if new_post.get_absolute_url() in msg.body])
# unsubscribe
client.login(username='user2', password='user2')
self.assertTrue([msg for msg in mail.outbox if new_post.get_absolute_url() in msg.body])
response = client.get(reverse('pybb:delete_subscription', args=[self.topic.id]), follow=True)
self.assertEqual(response.status_code, 200)
self.assertNotIn(user2, self.topic.subscribers.all())
def test_subscription_disabled(self):
orig_conf = defaults.PYBB_DISABLE_SUBSCRIPTIONS
defaults.PYBB_DISABLE_SUBSCRIPTIONS = True
user2 = User.objects.create_user(username='user2', password='user2', email='user2@someserver.com')
user3 = User.objects.create_user(username='user3', password='user3', email='user3@someserver.com')
client = Client()
client.login(username='user2', password='user2')
subscribe_url = reverse('pybb:add_subscription', args=[self.topic.id])
response = client.get(self.topic.get_absolute_url())
subscribe_links = html.fromstring(response.content).xpath('//a[@href="%s"]' % subscribe_url)
self.assertEqual(len(subscribe_links), 0)
response = client.get(subscribe_url, follow=True)
self.assertEqual(response.status_code, 403)
self.topic.subscribers.add(user3)
# create a new reply (with another user)
self.client.login(username='zeus', password='zeus')
add_post_url = reverse('pybb:add_post', args=[self.topic.id])
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test subscribtion юникод'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_post = Post.objects.order_by('-id')[0]
# there should be one email in the outbox (user3)
#because already subscribed users will still receive notifications.
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to[0], user3.email)
defaults.PYBB_DISABLE_SUBSCRIPTIONS = orig_conf
def _test_notification_emails_init(self):
user2 = User.objects.create_user(username='user2', password='user2', email='user2@someserver.com')
profile2 = util.get_pybb_profile(user2)
profile2.language = 'en'
profile2.save()
user3 = User.objects.create_user(username='user3', password='user3', email='user3@someserver.com')
profile3 = util.get_pybb_profile(user3)
profile3.language = 'fr'
profile3.save()
self.topic.subscribers.add(user2)
self.topic.subscribers.add(user3)
# create a new reply (with another user)
self.client.login(username='zeus', password='zeus')
add_post_url = reverse('pybb:add_post', args=[self.topic.id])
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test notification HTML'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_post = Post.objects.order_by('-id')[0]
return user2, user3, new_post
def test_notification_emails_alternative(self):
user2, user3, new_post = self._test_notification_emails_init()
# there should be two emails in the outbox (user2 and user3)
self.assertEqual(len(mail.outbox), 2)
email = mail.outbox[0]
self.assertEqual(email.to[0], user2.email)
# HTML alternative must be available
self.assertEqual(len(email.alternatives), 1)
self.assertEqual(email.alternatives[0][1], 'text/html')
def test_notification_emails_content(self):
user2, user3, new_post = self._test_notification_emails_init()
# there should be two emails in the outbox (user2 and user3)
self.assertEqual(len(mail.outbox), 2)
email = mail.outbox[0]
html_body = email.alternatives[0][0]
text_body = email.body
# emails (txt and HTML) must contains links to post AND to topic AND to unsubscribe.
delete_url = reverse('pybb:delete_subscription', args=[self.topic.id])
post_url = new_post.get_absolute_url()
topic_url = new_post.topic.get_absolute_url()
links = html.fromstring(html_body).xpath('//a')
found = {'post_url': False, 'topic_url': False, 'delete_url': False,}
for link in links:
if delete_url in link.attrib['href']:
found['delete_url'] = True
elif post_url in link.attrib['href']:
found['post_url'] = True
elif topic_url in link.attrib['href']:
found['topic_url'] = True
self.assertTrue(found['delete_url'])
self.assertTrue(found['post_url'])
self.assertTrue(found['topic_url'])
self.assertIn(post_url, text_body)
self.assertIn(topic_url, text_body)
self.assertIn(delete_url, text_body)
def test_notification_emails_translation(self):
user2, user3, new_post = self._test_notification_emails_init()
# there should be two emails in the outbox (user2 and user3)
self.assertEqual(len(mail.outbox), 2)
if mail.outbox[0].to[0] == user2.email:
email_en, email_fr = mail.outbox[0], mail.outbox[1]
else:
email_fr, email_en = mail.outbox[0], mail.outbox[1]
subject_en = "New answer in topic that you subscribed."
self.assertEqual(email_en.subject, subject_en)
self.assertNotEqual(email_fr.subject, subject_en)
def test_notifications_disabled(self):
orig_conf = defaults.PYBB_DISABLE_NOTIFICATIONS
defaults.PYBB_DISABLE_NOTIFICATIONS = True
user2 = User.objects.create_user(username='user2', password='user2', email='user2@someserver.com')
user3 = User.objects.create_user(username='user3', password='user3', email='user3@someserver.com')
client = Client()
client.login(username='user2', password='user2')
subscribe_url = reverse('pybb:add_subscription', args=[self.topic.id])
response = client.get(self.topic.get_absolute_url())
subscribe_links = html.fromstring(response.content).xpath('//a[@href="%s"]' % subscribe_url)
self.assertEqual(len(subscribe_links), 1)
response = client.get(subscribe_url, follow=True)
self.assertEqual(response.status_code, 200)
self.topic.subscribers.add(user3)
# create a new reply (with another user)
self.client.login(username='zeus', password='zeus')
add_post_url = reverse('pybb:add_post', args=[self.topic.id])
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test subscribtion юникод'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_post = Post.objects.order_by('-id')[0]
# there should be no email in the outbox
self.assertEqual(len(mail.outbox), 0)
defaults.PYBB_DISABLE_NOTIFICATIONS = orig_conf
def test_forum_subscription(self):
url = reverse('pybb:forum_subscription', kwargs={'pk': self.forum.id})
user2 = User.objects.create_user(username='user2', password='user2', email='user2@dns.com')
user3 = User.objects.create_user(username='user3', password='user3', email='user3@dns.com')
client = Client()
client.login(username='user2', password='user2')
parser = html.HTMLParser(encoding='utf8')
# Check we have the "Subscribe" link
response = client.get(self.forum.get_absolute_url())
self.assertEqual(response.status_code, 200)
tree = html.fromstring(response.content, parser=parser)
self.assertTrue(['Subscribe'], tree.xpath('//a[@href="%s"]/text()' % url))
# check anonymous can't subscribe :
anonymous_client = Client()
response = anonymous_client.get(url)
self.assertEqual(response.status_code, 302)
# click on this link with a logged account
response = client.get(url)
self.assertEqual(response.status_code, 200)
tree = html.fromstring(response.content, parser=parser)
# Check we have 4 radio inputs
radio_ids = tree.xpath('//input[@type="radio"]/@id')
self.assertEqual(['id_type_0', 'id_type_1', 'id_topics_0', 'id_topics_1'], radio_ids)
# submit the form to be notified for new topics
values = self.get_form_values(response, form='forum_subscription')
values.update({'type': ForumSubscription.TYPE_NOTIFY, 'topics': 'new', })
response = client.post(url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue('subscription' in response.context_data)
self.assertEqual(response.context_data['subscription'].forum, self.forum)
tree = html.fromstring(response.content, parser=parser)
self.assertTrue(['Manage subscription'], tree.xpath('//a[@href="%s"]/text()' % url))
client = Client()
client.login(username='user3', password='user3')
response = client.get(url)
values = self.get_form_values(response, form='forum_subscription')
values.update({'type': ForumSubscription.TYPE_SUBSCRIBE, 'topics': 'new', })
response = client.post(url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue('subscription' in response.context_data)
self.assertTrue(response.context_data['subscription'].forum, self.forum)
# Check there is still only zeus who subscribe to topic
usernames = list(self.topic.subscribers.all().values_list('username', flat=True))
self.assertEqual(usernames, [self.user.username, ])
topic = Topic(name='newtopic', forum=self.forum, user=self.user)
topic.save()
# user2 should have a mail
self.assertEqual(1, len(mail.outbox))
self.assertEqual([user2.email, ], mail.outbox[0].to)
self.assertEqual('New topic in forum that you subscribed.', mail.outbox[0].subject)
self.assertTrue('User zeus post a new topic' in mail.outbox[0].body)
self.assertTrue(topic.get_absolute_url() in mail.outbox[0].body)
self.assertTrue(url in mail.outbox[0].body)
post = Post(topic=topic, user=self.user, body='body')
post.save()
# Now, user3 should be subscribed to this new topic
usernames = topic.subscribers.all().order_by('username')
usernames = list(usernames.values_list('username', flat=True))
self.assertEqual(usernames, ['user3', self.user.username])
self.assertEqual(2, len(mail.outbox))
self.assertEqual([user3.email, ], mail.outbox[1].to)
self.assertEqual('New answer in topic that you subscribed.', mail.outbox[1].subject)
# Now, we unsubscribe user3 to be auto subscribed
response = client.get(url)
self.assertEqual(response.status_code, 200)
tree = html.fromstring(response.content, parser=parser)
# Check we have 5 radio inputs
radio_ids = tree.xpath('//input[@type="radio"]/@id')
expected_inputs = [
'id_type_0', 'id_type_1', 'id_type_2',
'id_topics_0', 'id_topics_1'
]
self.assertEqual(expected_inputs, radio_ids)
self.assertEqual(tree.xpath('//input[@id="id_type_2"]/@value'), ['unsubscribe', ])
self.assertEqual(tree.xpath('//input[@id="id_type_1"]/@checked'), ['checked', ])
values = self.get_form_values(response, form='forum_subscription')
values['type'] = 'unsubscribe'
response = client.post(url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue('subscription' in response.context_data)
self.assertIsNone(response.context_data['subscription'])
# user3 should not be subscribed anymore to any forum
with self.assertRaises(ForumSubscription.DoesNotExist):
ForumSubscription.objects.get(user=user3)
# but should still be still subscribed to the topic
usernames = list(topic.subscribers.all().order_by('id').values_list('username', flat=True))
self.assertEqual(usernames, [self.user.username, 'user3', ])
# Update user2's subscription to be autosubscribed to all posts
client = Client()
client.login(username='user2', password='user2')
response = client.get(url)
self.assertEqual(response.status_code, 200)
tree = html.fromstring(response.content, parser=parser)
self.assertEqual(tree.xpath('//input[@id="id_type_0"]/@checked'), ['checked', ])
values = self.get_form_values(response, form='forum_subscription')
values['type'] = ForumSubscription.TYPE_SUBSCRIBE
values['topics'] = 'all'
response = client.post(url, values, follow=True)
self.assertEqual(response.status_code, 200)
# user2 shoud now be subscribed to all self.forum's topics
subscribed_topics = list(user2.subscriptions.all().order_by('name').values_list('name', flat=True))
expected_topics = list(self.forum.topics.all().order_by('name').values_list('name', flat=True))
self.assertEqual(subscribed_topics, expected_topics)
# unsubscribe user2 to all topics
response = client.get(url)
self.assertEqual(response.status_code, 200)
tree = html.fromstring(response.content, parser=parser)
self.assertEqual(tree.xpath('//input[@id="id_type_2"]/@value'), ['unsubscribe', ])
values = self.get_form_values(response, form='forum_subscription')
values['type'] = 'unsubscribe'
values['topics'] = 'all'
response = client.post(url, values, follow=True)
self.assertEqual(response.status_code, 200)
# user2 shoud now be subscribed to zero topic
topics = list(user2.subscriptions.all().values_list('name', flat=True))
self.assertEqual(topics, [])
@skipUnlessDBFeature('supports_microsecond_precision')
def test_topic_updated(self):
topic = Topic(name='new topic', forum=self.forum, user=self.user)
topic.save()
post = Post(topic=topic, user=self.user, body='bbcode [b]test[/b]')
post.save()
client = Client()
response = client.get(self.forum.get_absolute_url())
self.assertEqual(response.context['topic_list'][0], topic)
post = Post(topic=self.topic, user=self.user, body='bbcode [b]test[/b]')
post.save()
client = Client()
response = client.get(self.forum.get_absolute_url())
self.assertEqual(response.context['topic_list'][0], self.topic)
def test_topic_deleted(self):
forum_1 = Forum.objects.create(name='new forum', category=self.category)
topic_1 = Topic.objects.create(name='new topic', forum=forum_1, user=self.user)
post_1 = Post.objects.create(topic=topic_1, user=self.user, body='test')
post_1 = Post.objects.get(id=post_1.id)
self.assertEqual(topic_1.updated, post_1.created)
self.assertEqual(forum_1.updated, post_1.created)
topic_2 = Topic.objects.create(name='another topic', forum=forum_1, user=self.user)
post_2 = Post.objects.create(topic=topic_2, user=self.user, body='another test')
post_2 = Post.objects.get(id=post_2.id)
self.assertEqual(topic_2.updated, post_2.created)
self.assertEqual(forum_1.updated, post_2.created)
topic_2.delete()
forum_1 = Forum.objects.get(id=forum_1.id)
self.assertEqual(forum_1.updated, post_1.created)
self.assertEqual(forum_1.topic_count, 1)
self.assertEqual(forum_1.post_count, 1)
post_1.delete()
forum_1 = Forum.objects.get(id=forum_1.id)
self.assertEqual(forum_1.topic_count, 0)
self.assertEqual(forum_1.post_count, 0)
def test_user_views(self):
response = self.client.get(reverse('pybb:user', kwargs={'username': self.user.username}))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('pybb:user_posts', kwargs={'username': self.user.username}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['object_list'].count(), 1)
response = self.client.get(reverse('pybb:user_topics', kwargs={'username': self.user.username}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['object_list'].count(), 1)
self.topic.forum.hidden = True
self.topic.forum.save()
self.client.logout()
response = self.client.get(reverse('pybb:user_posts', kwargs={'username': self.user.username}))
self.assertEqual(response.context['object_list'].count(), 0)
response = self.client.get(reverse('pybb:user_topics', kwargs={'username': self.user.username}))
self.assertEqual(response.context['object_list'].count(), 0)
def test_post_count(self):
topic = Topic(name='etopic', forum=self.forum, user=self.user)
topic.save()
post = Post(topic=topic, user=self.user, body='test') # another post
post.save()
self.assertEqual(util.get_pybb_profile(self.user).post_count, 2)
post.body = 'test2'
post.save()
self.assertEqual(Profile.objects.get(pk=util.get_pybb_profile(self.user).pk).post_count, 2)
post.delete()
self.assertEqual(Profile.objects.get(pk=util.get_pybb_profile(self.user).pk).post_count, 1)
def test_latest_topics_tag(self):
Topic.objects.all().delete()
for i in range(10):
Topic.objects.create(name='topic%s' % i, user=self.user, forum=self.forum)
latest_topics = pybb_get_latest_topics(context=None, user=self.user)
self.assertEqual(len(latest_topics), 5)
self.assertEqual(latest_topics[0].name, 'topic9')
self.assertEqual(latest_topics[4].name, 'topic5')
def test_latest_posts_tag(self):
Post.objects.all().delete()
for i in range(10):
Post.objects.create(body='post%s' % i, user=self.user, topic=self.topic)
latest_topics = pybb_get_latest_posts(context=None, user=self.user)
self.assertEqual(len(latest_topics), 5)
self.assertEqual(latest_topics[0].body, 'post9')
self.assertEqual(latest_topics[4].body, 'post5')
def test_multiple_objects_returned(self):
"""
see issue #87: https://github.com/hovel/pybbm/issues/87
"""
self.assertFalse(self.user.is_superuser)
self.assertFalse(self.user.is_staff)
self.assertFalse(self.topic.on_moderation)
self.assertEqual(self.topic.user, self.user)
user1 = User.objects.create_user('geyser', 'geyser@localhost', 'geyser')
self.topic.forum.moderators.add(self.user)
self.topic.forum.moderators.add(user1)
self.login_client()
response = self.client.get(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}))
self.assertEqual(response.status_code, 200)
def tearDown(self):
defaults.PYBB_ENABLE_ANONYMOUS_POST = self.ORIG_PYBB_ENABLE_ANONYMOUS_POST
defaults.PYBB_PREMODERATION = self.ORIG_PYBB_PREMODERATION
def test_managing_forums(self):
_attach_perms_class('pybb.tests.CustomPermissionHandler')
forum2 = Forum.objects.create(name='foo2', description='bar2', category=self.category)
Forum.objects.create(name='foo3', description='bar3', category=self.category)
moderator = User.objects.create_user('moderator', 'moderator@localhost', 'moderator')
self.login_client()
#test the visibility of the button and the access to the page
response = self.client.get(reverse('pybb:user', kwargs={'username': moderator.username}))
self.assertNotContains(
response, '<a href="%s"' % reverse(
'pybb:edit_privileges', kwargs={'username': moderator.username}
)
)
response = self.client.get(reverse('pybb:edit_privileges', kwargs={'username': moderator.username}))
self.assertEqual(response.status_code, 403)
add_change_forum_permission = Permission.objects.get_by_natural_key('change_forum','pybb','forum')
self.user.user_permissions.add(add_change_forum_permission)
self.user.is_staff = True
self.user.save()
response = self.client.get(reverse('pybb:user', kwargs={'username': moderator.username}))
self.assertContains(
response, '<a href="%s"' % reverse(
'pybb:edit_privileges', kwargs={'username': moderator.username}
)
)
response = self.client.get(reverse('pybb:edit_privileges', kwargs={'username': moderator.username}))
self.assertEqual(response.status_code, 200)
# test if there are as many chechkboxs as forums in the category
inputs = dict(html.fromstring(response.content).xpath('//form[@class="%s"]' % "privileges-edit")[0].inputs)
self.assertEqual(
len(response.context['form'].authorized_forums),
len(inputs['cat_%d' % self.category.pk])
)
# test to add user as moderator
# get csrf token
values = self.get_form_values(response, "privileges-edit")
# dynamic contruction of the list corresponding to custom may_change_forum
available_forums = [forum for forum in self.category.forums.all() if not forum.pk % 3 == 0]
values['cat_%d' % self.category.pk] = [forum.pk for forum in available_forums]
response = self.client.post(
reverse('pybb:edit_privileges', kwargs={'username': moderator.username}), data=values, follow=True
)
self.assertEqual(response.status_code, 200)
correct_list = sorted(available_forums, key=lambda forum: forum.pk)
moderator_list = sorted([forum for forum in moderator.forum_set.all()], key=lambda forum: forum.pk)
self.assertEqual(correct_list, moderator_list)
# test to remove user as moderator
values['cat_%d' % self.category.pk] = [available_forums[0].pk, ]
response = self.client.post(
reverse('pybb:edit_privileges', kwargs={'username': moderator.username}), data=values, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual([available_forums[0], ], [forum for forum in moderator.forum_set.all()])
values['cat_%d' % self.category.pk] = []
response = self.client.post(
reverse('pybb:edit_privileges', kwargs={'username': moderator.username}), data=values, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(0, moderator.forum_set.count())
self.user.user_permissions.remove(add_change_forum_permission)
_detach_perms_class()
class AnonymousTest(TestCase, SharedTestModule):
def setUp(self):
self.ORIG_PYBB_ENABLE_ANONYMOUS_POST = defaults.PYBB_ENABLE_ANONYMOUS_POST
self.ORIG_PYBB_ANONYMOUS_USERNAME = defaults.PYBB_ANONYMOUS_USERNAME
self.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER = defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER
defaults.PYBB_ENABLE_ANONYMOUS_POST = True
defaults.PYBB_ANONYMOUS_USERNAME = 'Anonymous'
self.user = User.objects.create_user('Anonymous', 'Anonymous@localhost', 'Anonymous')
self.category = Category.objects.create(name='foo')
self.forum = Forum.objects.create(name='xfoo', description='bar', category=self.category)
self.topic = Topic.objects.create(name='etopic', forum=self.forum, user=self.user)
self.post = Post.objects.create(body='body post', topic=self.topic, user=self.user)
add_post_permission = Permission.objects.get_by_natural_key('add_post', 'pybb', 'post')
self.user.user_permissions.add(add_post_permission)
def tearDown(self):
defaults.PYBB_ENABLE_ANONYMOUS_POST = self.ORIG_PYBB_ENABLE_ANONYMOUS_POST
defaults.PYBB_ANONYMOUS_USERNAME = self.ORIG_PYBB_ANONYMOUS_USERNAME
defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER = self.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER
def test_anonymous_posting(self):
post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = self.client.get(post_url)
values = self.get_form_values(response)
values['body'] = 'test anonymous'
response = self.client.post(post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(Post.objects.filter(body='test anonymous')), 1)
self.assertEqual(Post.objects.get(body='test anonymous').user, self.user)
def test_anonymous_cache_topic_views(self):
self.assertNotIn(util.build_cache_key('anonymous_topic_views', topic_id=self.topic.id), cache)
url = self.topic.get_absolute_url()
self.client.get(url)
self.assertEqual(cache.get(util.build_cache_key('anonymous_topic_views', topic_id=self.topic.id)), 1)
for _ in range(defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER - 2):
self.client.get(url)
self.assertEqual(Topic.objects.get(id=self.topic.id).views, 0)
self.assertEqual(cache.get(util.build_cache_key('anonymous_topic_views', topic_id=self.topic.id)),
defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER - 1)
self.client.get(url)
self.assertEqual(Topic.objects.get(id=self.topic.id).views, defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER)
self.assertEqual(cache.get(util.build_cache_key('anonymous_topic_views', topic_id=self.topic.id)), 0)
views = Topic.objects.get(id=self.topic.id).views
defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER = None
self.client.get(url)
self.assertEqual(Topic.objects.get(id=self.topic.id).views, views + 1)
self.assertEqual(cache.get(util.build_cache_key('anonymous_topic_views', topic_id=self.topic.id)), 0)
def premoderate_test(user, post):
"""
Test premoderate function
Allow post without moderation for staff users only
"""
if user.username.startswith('allowed'):
return True
return False
class PreModerationTest(TestCase, SharedTestModule):
def setUp(self):
self.ORIG_PYBB_PREMODERATION = defaults.PYBB_PREMODERATION
defaults.PYBB_PREMODERATION = premoderate_test
self.create_user()
self.create_initial()
mail.outbox = []
def test_premoderation(self):
self.client.login(username='zeus', password='zeus')
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test premoderation'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
post = Post.objects.get(body='test premoderation')
self.assertEqual(post.on_moderation, True)
# Post is visible by author
response = self.client.get(post.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test premoderation')
# Post is not visible by anonymous user
client = Client()
response = client.get(post.get_absolute_url(), follow=True)
self.assertRedirects(response, settings.LOGIN_URL + '?next=%s' % post.get_absolute_url())
response = client.get(self.topic.get_absolute_url(), follow=True)
self.assertNotContains(response, 'test premoderation')
# But visible by superuser (with permissions)
user = User.objects.create_user('admin', 'admin@localhost', 'admin')
user.is_superuser = True
user.save()
client.login(username='admin', password='admin')
response = client.get(post.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test premoderation')
# user with names stats with allowed can post without premoderation
user = User.objects.create_user('allowed_zeus', 'allowed_zeus@localhost', 'allowed_zeus')
client.login(username='allowed_zeus', password='allowed_zeus')
response = client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test premoderation staff'
response = client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
post = Post.objects.get(body='test premoderation staff')
client = Client()
response = client.get(post.get_absolute_url(), follow=True)
self.assertContains(response, 'test premoderation staff')
# Superuser can moderate
user.is_superuser = True
user.save()
admin_client = Client()
admin_client.login(username='admin', password='admin')
post = Post.objects.get(body='test premoderation')
response = admin_client.get(reverse('pybb:moderate_post', kwargs={'pk': post.id}), follow=True)
self.assertEqual(response.status_code, 200)
# Now all can see this post:
client = Client()
response = client.get(post.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test premoderation')
# Other users can't moderate
post.on_moderation = True
post.save()
client.login(username='zeus', password='zeus')
response = client.get(reverse('pybb:moderate_post', kwargs={'pk': post.id}), follow=True)
self.assertEqual(response.status_code, 403)
# If user create new topic it goes to moderation if MODERATION_ENABLE
# When first post is moderated, topic becomes moderated too
self.client.login(username='zeus', password='zeus')
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'new topic test'
values['name'] = 'new topic name'
values['poll_type'] = 0
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'new topic test')
client = Client()
response = client.get(self.forum.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'new topic name')
response = client.get(Topic.objects.get(name='new topic name').get_absolute_url())
self.assertEqual(response.status_code, 302)
response = admin_client.get(reverse('pybb:moderate_post',
kwargs={'pk': Post.objects.get(body='new topic test').id}),
follow=True)
self.assertEqual(response.status_code, 200)
response = client.get(self.forum.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'new topic name')
response = client.get(Topic.objects.get(name='new topic name').get_absolute_url())
self.assertEqual(response.status_code, 200)
def tearDown(self):
defaults.PYBB_PREMODERATION = self.ORIG_PYBB_PREMODERATION
class AttachmentTest(TestCase, SharedTestModule):
def setUp(self):
self.PYBB_ATTACHMENT_ENABLE = defaults.PYBB_ATTACHMENT_ENABLE
defaults.PYBB_ATTACHMENT_ENABLE = True
self.ORIG_PYBB_PREMODERATION = defaults.PYBB_PREMODERATION
defaults.PYBB_PREMODERATION = False
self.file_name = os.path.join(os.path.dirname(__file__), 'static', 'pybb', 'img', 'attachment.png')
self.create_user()
self.create_initial()
def test_attachment_one(self):
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
self.login_client()
response = self.client.get(add_post_url)
with open(self.file_name, 'rb') as fp:
values = self.get_form_values(response)
values['body'] = 'test attachment'
values['attachments-0-file'] = fp
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(Post.objects.filter(body='test attachment').exists())
post = Post.objects.filter(body='test attachment')[0]
self.assertEqual(post.attachments.count(), 1)
def test_attachment_two(self):
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
self.login_client()
response = self.client.get(add_post_url)
with open(self.file_name, 'rb') as fp:
values = self.get_form_values(response)
values['body'] = 'test attachment'
values['attachments-0-file'] = fp
del values['attachments-INITIAL_FORMS']
del values['attachments-TOTAL_FORMS']
with self.assertRaises(ValidationError):
self.client.post(add_post_url, values, follow=True)
def test_attachment_usage(self):
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
self.login_client()
response = self.client.get(add_post_url)
body = (
'test attachment: '
'[img][file-1][/img]'
'[img][file-2][/img]'
'[img][file-1][/img]'
'[file-3]'
'[file-a]'
)
with open(self.file_name, 'rb') as fp, open(self.file_name, 'rb') as fp2:
values = self.get_form_values(response)
values['body'] = body
values['attachments-0-file'] = fp
values['attachments-1-file'] = fp2
values['attachments-TOTAL_FORMS'] = 2
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
post = response.context['post']
imgs = html.fromstring(post.body_html).xpath('//img')
self.assertEqual(len(imgs), 3)
self.assertTrue('[file-3]' in post.body_html)
self.assertTrue('[file-a]' in post.body_html)
src1 = imgs[0].attrib.get('src')
src2 = imgs[1].attrib.get('src')
src3 = imgs[2].attrib.get('src')
attachments = [a for a in post.attachments.order_by('pk')]
self.assertEqual(src1, attachments[0].file.url)
self.assertEqual(src2, attachments[1].file.url)
self.assertEqual(src1, src3)
def tearDown(self):
defaults.PYBB_ATTACHMENT_ENABLE = self.PYBB_ATTACHMENT_ENABLE
defaults.PYBB_PREMODERATION = self.ORIG_PYBB_PREMODERATION
class PollTest(TestCase, SharedTestModule):
def setUp(self):
self.create_user()
self.create_initial()
self.PYBB_POLL_MAX_ANSWERS = defaults.PYBB_POLL_MAX_ANSWERS
defaults.PYBB_POLL_MAX_ANSWERS = 2
def test_poll_add(self):
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
self.login_client()
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'test poll body'
values['name'] = 'test poll name'
values['poll_type'] = 0 # poll_type = None, create topic without poll answers
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1'
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_topic = Topic.objects.get(name='test poll name')
self.assertIsNone(new_topic.poll_question)
self.assertFalse(PollAnswer.objects.filter(topic=new_topic).exists()) # no answers here
values['name'] = 'test poll name 1'
values['poll_type'] = 1
values['poll_answers-0-text'] = 'answer1' # not enough answers
values['poll_answers-TOTAL_FORMS'] = 1
response = self.client.post(add_topic_url, values, follow=True)
self.assertFalse(Topic.objects.filter(name='test poll name 1').exists())
values['name'] = 'test poll name 1'
values['poll_type'] = 1
values['poll_answers-0-text'] = 'answer1' # too many answers
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-2-text'] = 'answer3'
values['poll_answers-TOTAL_FORMS'] = 3
response = self.client.post(add_topic_url, values, follow=True)
self.assertFalse(Topic.objects.filter(name='test poll name 1').exists())
values['name'] = 'test poll name 1'
values['poll_type'] = 1 # poll type = single choice, create answers
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1' # two answers - what do we need to create poll
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_topic = Topic.objects.get(name='test poll name 1')
self.assertEqual(new_topic.poll_question, 'q1')
self.assertEqual(PollAnswer.objects.filter(topic=new_topic).count(), 2)
def test_regression_adding_poll_with_removed_answers(self):
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
self.login_client()
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'test poll body'
values['name'] = 'test poll name'
values['poll_type'] = 1
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = ''
values['poll_answers-0-DELETE'] = 'on'
values['poll_answers-1-text'] = ''
values['poll_answers-1-DELETE'] = 'on'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(Topic.objects.filter(name='test poll name').exists())
def test_regression_poll_deletion_after_second_post(self):
self.login_client()
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'test poll body'
values['name'] = 'test poll name'
values['poll_type'] = 1 # poll type = single choice, create answers
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1' # two answers - what do we need to create poll
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_topic = Topic.objects.get(name='test poll name')
self.assertEqual(new_topic.poll_question, 'q1')
self.assertEqual(PollAnswer.objects.filter(topic=new_topic).count(), 2)
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': new_topic.id})
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test answer body'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(PollAnswer.objects.filter(topic=new_topic).count(), 2)
def test_poll_edit(self):
edit_topic_url = reverse('pybb:edit_post', kwargs={'pk': self.post.id})
self.login_client()
response = self.client.get(edit_topic_url)
values = self.get_form_values(response)
values['poll_type'] = 1 # add_poll
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1'
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(edit_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_type, 1)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_question, 'q1')
self.assertEqual(PollAnswer.objects.filter(topic=self.topic).count(), 2)
values = self.get_form_values(self.client.get(edit_topic_url))
values['poll_type'] = 2 # change_poll type
values['poll_question'] = 'q100' # change poll question
values['poll_answers-0-text'] = 'answer100' # change poll answers
values['poll_answers-1-text'] = 'answer200'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(edit_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_type, 2)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_question, 'q100')
self.assertEqual(PollAnswer.objects.filter(topic=self.topic).count(), 2)
self.assertTrue(PollAnswer.objects.filter(text='answer100').exists())
self.assertTrue(PollAnswer.objects.filter(text='answer200').exists())
self.assertFalse(PollAnswer.objects.filter(text='answer1').exists())
self.assertFalse(PollAnswer.objects.filter(text='answer2').exists())
values['poll_type'] = 0 # remove poll
values['poll_answers-0-text'] = 'answer100' # no matter how many answers we provide
values['poll_answers-TOTAL_FORMS'] = 1
response = self.client.post(edit_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_type, 0)
self.assertIsNone(Topic.objects.get(id=self.topic.id).poll_question)
self.assertEqual(PollAnswer.objects.filter(topic=self.topic).count(), 0)
def test_poll_voting(self):
def recreate_poll(poll_type):
self.topic.poll_type = poll_type
self.topic.save()
PollAnswer.objects.filter(topic=self.topic).delete()
PollAnswer.objects.create(topic=self.topic, text='answer1')
PollAnswer.objects.create(topic=self.topic, text='answer2')
self.login_client()
recreate_poll(poll_type=Topic.POLL_TYPE_SINGLE)
vote_url = reverse('pybb:topic_poll_vote', kwargs={'pk': self.topic.id})
my_answer = PollAnswer.objects.all()[0]
values = {'answers': my_answer.id}
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_votes(), 1)
self.assertEqual(PollAnswer.objects.get(id=my_answer.id).votes(), 1)
self.assertEqual(PollAnswer.objects.get(id=my_answer.id).votes_percent(), 100.0)
# already voted
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 403) # bad request status
recreate_poll(poll_type=Topic.POLL_TYPE_MULTIPLE)
values = {'answers': [a.id for a in PollAnswer.objects.all()]}
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertListEqual([a.votes() for a in PollAnswer.objects.all()], [1, 1])
self.assertListEqual([a.votes_percent() for a in PollAnswer.objects.all()], [50.0, 50.0])
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 403) # already voted
cancel_vote_url = reverse('pybb:topic_cancel_poll_vote', kwargs={'pk': self.topic.id})
response = self.client.post(cancel_vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertListEqual([a.votes() for a in PollAnswer.objects.all()], [0, 0])
self.assertListEqual([a.votes_percent() for a in PollAnswer.objects.all()], [0, 0])
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertListEqual([a.votes() for a in PollAnswer.objects.all()], [1, 1])
self.assertListEqual([a.votes_percent() for a in PollAnswer.objects.all()], [50.0, 50.0])
def test_poll_voting_on_closed_topic(self):
self.login_client()
self.topic.poll_type = Topic.POLL_TYPE_SINGLE
self.topic.save()
PollAnswer.objects.create(topic=self.topic, text='answer1')
PollAnswer.objects.create(topic=self.topic, text='answer2')
self.topic.closed = True
self.topic.save()
vote_url = reverse('pybb:topic_poll_vote', kwargs={'pk': self.topic.id})
my_answer = PollAnswer.objects.all()[0]
values = {'answers': my_answer.id}
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 403)
def tearDown(self):
defaults.PYBB_POLL_MAX_ANSWERS = self.PYBB_POLL_MAX_ANSWERS
class FiltersTest(TestCase, SharedTestModule):
def setUp(self):
self.create_user()
self.create_initial(post=False)
def test_filters(self):
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
self.login_client()
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test\n \n \n\nmultiple empty lines\n'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Post.objects.all()[0].body, 'test\nmultiple empty lines')
class CustomPermissionHandler(permissions.DefaultPermissionHandler):
"""
a custom permission handler which changes the meaning of "hidden" forum:
"hidden" forum or category is visible for all logged on users, not only staff
"""
def filter_categories(self, user, qs):
return qs.filter(hidden=False) if user.is_anonymous() else qs
def may_view_category(self, user, category):
return user.is_authenticated() if category.hidden else True
def filter_forums(self, user, qs):
if user.is_anonymous():
qs = qs.filter(Q(hidden=False) & Q(category__hidden=False))
return qs
def may_view_forum(self, user, forum):
return user.is_authenticated() if forum.hidden or forum.category.hidden else True
def filter_topics(self, user, qs):
if user.is_anonymous():
qs = qs.filter(Q(forum__hidden=False) & Q(forum__category__hidden=False))
qs = qs.filter(closed=False) # filter out closed topics for test
return qs
def may_view_topic(self, user, topic):
return self.may_view_forum(user, topic.forum)
def filter_posts(self, user, qs):
if user.is_anonymous():
qs = qs.filter(Q(topic__forum__hidden=False) & Q(topic__forum__category__hidden=False))
return qs
def may_view_post(self, user, post):
return self.may_view_forum(user, post.topic.forum)
def may_create_poll(self, user):
return False
def may_edit_topic_slug(self, user):
return True
def may_change_forum(self, user, forum):
return not forum.pk % 3 == 0
class MarkupParserTest(TestCase, SharedTestModule):
def setUp(self):
# Reinit Engines because they are stored in memory and the current bbcode engine stored
# may be the old one, depending the test order exec.
self.ORIG_PYBB_MARKUP_ENGINES = util.PYBB_MARKUP_ENGINES
self.ORIG_PYBB_QUOTE_ENGINES = util.PYBB_QUOTE_ENGINES
util.PYBB_MARKUP_ENGINES = {
'bbcode': 'pybb.markup.bbcode.BBCodeParser', # default parser
'bbcode_custom': 'test_project.markup_parsers.CustomBBCodeParser', # overrided default parser
'liberator': 'test_project.markup_parsers.LiberatorParser', # completely new parser
'fake': 'pybb.markup.base.BaseParser', # base parser
'markdown': defaults.markdown # old-style callable parser,
}
util.PYBB_QUOTE_ENGINES = {
'bbcode': 'pybb.markup.bbcode.BBCodeParser', # default parser
'bbcode_custom': 'test_project.markup_parsers.CustomBBCodeParser', # overrided default parser
'liberator': 'test_project.markup_parsers.LiberatorParser', # completely new parser
'fake': 'pybb.markup.base.BaseParser', # base parser
'markdown': lambda text, username="": '>' + text.replace('\n', '\n>').replace('\r', '\n>') + '\n' # old-style callable parser
}
def tearDown(self):
util._MARKUP_ENGINES = {}
util._QUOTE_ENGINES = {}
util.PYBB_MARKUP_ENGINES = self.ORIG_PYBB_MARKUP_ENGINES
util.PYBB_QUOTE_ENGINES = self.ORIG_PYBB_QUOTE_ENGINES
def test_markup_engines(self):
def _test_engine(parser_name, text_to_html_map):
for item in text_to_html_map:
self.assertIn(util._get_markup_formatter(parser_name)(item[0]), item[1:])
text_to_html_map = [
['[b]bold[/b]', '<strong>bold</strong>'],
['[i]italic[/i]', '<em>italic</em>'],
['[u]underline[/u]', '<u>underline</u>'],
['[s]striked[/s]', '<strike>striked</strike>'],
[
'[img]http://domain.com/image.png[/img]',
'<img src="http://domain.com/image.png"></img>',
'<img src="http://domain.com/image.png">'
],
['[url=google.com]search in google[/url]', '<a href="http://google.com">search in google</a>'],
['http://google.com', '<a href="http://google.com">http://google.com</a>'],
['[list][*]1[*]2[/list]', '<ul><li>1</li><li>2</li></ul>'],
[
'[list=1][*]1[*]2[/list]',
'<ol><li>1</li><li>2</li></ol>',
'<ol style="list-style-type:decimal;"><li>1</li><li>2</li></ol>'
],
['[quote="post author"]quote[/quote]', '<blockquote><em>post author</em><br>quote</blockquote>'],
[
'[code]code[/code]',
'<div class="code"><pre>code</pre></div>',
'<pre><code>code</code></pre>']
,
]
_test_engine('bbcode', text_to_html_map)
text_to_html_map = text_to_html_map + [
['[ul][li]1[/li][li]2[/li][/ul]', '<ul><li>1</li><li>2</li></ul>'],
[
'[youtube]video_id[/youtube]',
(
'<iframe src="http://www.youtube.com/embed/video_id?wmode=opaque" '
'data-youtube-id="video_id" allowfullscreen="" frameborder="0" '
'height="315" width="560"></iframe>'
)
],
]
_test_engine('bbcode_custom', text_to_html_map)
text_to_html_map = [
['Windows and Mac OS are wonderfull OS !', 'GNU Linux and FreeBSD are wonderfull OS !'],
['I love PHP', 'I love Python'],
]
_test_engine('liberator', text_to_html_map)
text_to_html_map = [
['[b]bold[/b]', '[b]bold[/b]'],
['*italic*', '*italic*'],
]
_test_engine('fake', text_to_html_map)
_test_engine('not_existent', text_to_html_map)
text_to_html_map = [
['**bold**', '<p><strong>bold</strong></p>'],
['*italic*', '<p><em>italic</em></p>'],
[
'',
'<p><img alt="alt text" src="http://domain.com/image.png" title="title" /></p>'
],
[
'[search in google](https://www.google.com)',
'<p><a href="https://www.google.com">search in google</a></p>'
],
[
'[google] some text\n[google]: https://www.google.com',
'<p><a href="https://www.google.com">google</a> some text</p>'
],
['* 1\n* 2', '<ul>\n<li>1</li>\n<li>2</li>\n</ul>'],
['1. 1\n2. 2', '<ol>\n<li>1</li>\n<li>2</li>\n</ol>'],
['> quote', '<blockquote>\n<p>quote</p>\n</blockquote>'],
['```\ncode\n```', '<p><code>code</code></p>'],
]
_test_engine('markdown', text_to_html_map)
def test_quote_engines(self):
def _test_engine(parser_name, text_to_quote_map):
for item in text_to_quote_map:
self.assertEqual(util._get_markup_quoter(parser_name)(item[0]), item[1])
self.assertEqual(util._get_markup_quoter(parser_name)(item[0], 'username'), item[2])
text_to_quote_map = [
['quote text', '[quote=""]quote text[/quote]\n', '[quote="username"]quote text[/quote]\n']
]
_test_engine('bbcode', text_to_quote_map)
_test_engine('bbcode_custom', text_to_quote_map)
text_to_quote_map = [
['quote text', 'quote text', 'posted by: username\nquote text']
]
_test_engine('liberator', text_to_quote_map)
text_to_quote_map = [
['quote text', 'quote text', 'quote text']
]
_test_engine('fake', text_to_quote_map)
_test_engine('not_existent', text_to_quote_map)
text_to_quote_map = [
['quote\r\ntext', '>quote\n>\n>text\n', '>quote\n>\n>text\n']
]
_test_engine('markdown', text_to_quote_map)
def test_body_cleaners(self):
user = User.objects.create_user('zeus', 'zeus@localhost', 'zeus')
staff = User.objects.create_user('staff', 'staff@localhost', 'staff')
staff.is_staff = True
staff.save()
from pybb.markup.base import rstrip_str
cleaners_map = [
['pybb.markup.base.filter_blanks', 'some\n\n\n\ntext\n\nwith\nnew\nlines', 'some\ntext\n\nwith\nnew\nlines'],
[rstrip_str, 'text \n \nwith whitespaces ', 'text\n\nwith whitespaces'],
]
for cleaner, source, dest in cleaners_map:
self.assertEqual(util.get_body_cleaner(cleaner)(user, source), dest)
self.assertEqual(util.get_body_cleaner(cleaner)(staff, source), source)
def _attach_perms_class(class_name):
"""
override the permission handler. this cannot be done with @override_settings as
permissions.perms is already imported at import point, instead we got to monkeypatch
the modules (not really nice, but only an issue in tests)
"""
pybb_views.perms = permissions.perms = util.resolve_class(class_name)
def _detach_perms_class():
"""
reset permission handler (otherwise other tests may fail)
"""
pybb_views.perms = permissions.perms = util.resolve_class('pybb.permissions.DefaultPermissionHandler')
class CustomPermissionHandlerTest(TestCase, SharedTestModule):
""" test custom permission handler """
def setUp(self):
self.create_user()
# create public and hidden categories, forums, posts
c_pub = Category(name='public')
c_pub.save()
c_hid = Category(name='private', hidden=True)
c_hid.save()
self.forum = Forum.objects.create(name='pub1', category=c_pub)
Forum.objects.create(name='priv1', category=c_hid)
Forum.objects.create(name='private_in_public_cat', hidden=True, category=c_pub)
for f in Forum.objects.all():
t = Topic.objects.create(name='a topic', forum=f, user=self.user)
Post.objects.create(topic=t, user=self.user, body='test')
# make some topics closed => hidden
for t in Topic.objects.all()[0:2]:
t.closed = True
t.save()
_attach_perms_class('pybb.tests.CustomPermissionHandler')
def tearDown(self):
_detach_perms_class()
def test_category_permission(self):
for c in Category.objects.all():
# anon user may not see category
r = self.get_with_user(c.get_absolute_url())
if c.hidden:
self.assertEqual(r.status_code, 302)
else:
self.assertEqual(r.status_code, 200)
# logged on user may see all categories
r = self.get_with_user(c.get_absolute_url(), 'zeus', 'zeus')
self.assertEqual(r.status_code, 200)
def test_forum_permission(self):
for f in Forum.objects.all():
r = self.get_with_user(f.get_absolute_url())
self.assertEqual(r.status_code, 302 if f.hidden or f.category.hidden else 200)
r = self.get_with_user(f.get_absolute_url(), 'zeus', 'zeus')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.context['object_list'].count(), f.topics.filter(closed=False).count())
def test_topic_permission(self):
for t in Topic.objects.all():
r = self.get_with_user(t.get_absolute_url())
self.assertEqual(r.status_code, 302 if t.forum.hidden or t.forum.category.hidden else 200)
r = self.get_with_user(t.get_absolute_url(), 'zeus', 'zeus')
self.assertEqual(r.status_code, 200)
def test_post_permission(self):
for p in Post.objects.all():
r = self.get_with_user(p.get_absolute_url())
self.assertEqual(r.status_code, 302)
r = self.get_with_user(p.get_absolute_url(), 'zeus', 'zeus')
self.assertEqual(r.status_code, 302)
def test_poll_add(self):
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
self.login_client()
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'test poll body'
values['name'] = 'test poll name'
values['poll_type'] = 1 # poll_type = 1, create topic with poll
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1'
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_topic = Topic.objects.get(name='test poll name')
self.assertIsNone(new_topic.poll_question)
self.assertFalse(PollAnswer.objects.filter(topic=new_topic).exists()) # no answers here
class RestrictEditingHandler(permissions.DefaultPermissionHandler):
def may_create_topic(self, user, forum):
return False
def may_create_post(self, user, topic):
return False
def may_edit_post(self, user, post):
return False
class LogonRedirectTest(TestCase, SharedTestModule):
""" test whether anonymous user gets redirected, whereas unauthorized user gets PermissionDenied """
def setUp(self):
# create users
staff = User.objects.create_user('staff', 'staff@localhost', 'staff')
staff.is_staff = True
staff.save()
nostaff = User.objects.create_user('nostaff', 'nostaff@localhost', 'nostaff')
nostaff.is_staff = False
nostaff.save()
# create topic, post in hidden category
self.category = Category(name='private', hidden=True)
self.category.save()
self.forum = Forum(name='priv1', category=self.category)
self.forum.save()
self.topic = Topic(name='a topic', forum=self.forum, user=staff)
self.topic.save()
self.post = Post(body='body post', topic=self.topic, user=staff, on_moderation=True)
self.post.save()
def test_redirect_category(self):
# access without user should be redirected
r = self.get_with_user(self.category.get_absolute_url())
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % self.category.get_absolute_url())
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(self.category.get_absolute_url(), 'nostaff', 'nostaff')
self.assertEquals(r.status_code, 403)
# allowed user is allowed
r = self.get_with_user(self.category.get_absolute_url(), 'staff', 'staff')
self.assertEquals(r.status_code, 200)
def test_redirect_forum(self):
# access without user should be redirected
r = self.get_with_user(self.forum.get_absolute_url())
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % self.forum.get_absolute_url())
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(self.forum.get_absolute_url(), 'nostaff', 'nostaff')
self.assertEquals(r.status_code, 403)
# allowed user is allowed
r = self.get_with_user(self.forum.get_absolute_url(), 'staff', 'staff')
self.assertEquals(r.status_code, 200)
def test_redirect_topic(self):
# access without user should be redirected
r = self.get_with_user(self.topic.get_absolute_url())
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % self.topic.get_absolute_url())
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(self.topic.get_absolute_url(), 'nostaff', 'nostaff')
self.assertEquals(r.status_code, 403)
# allowed user is allowed
r = self.get_with_user(self.topic.get_absolute_url(), 'staff', 'staff')
self.assertEquals(r.status_code, 200)
def test_redirect_post(self):
# access without user should be redirected
r = self.get_with_user(self.post.get_absolute_url())
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % self.post.get_absolute_url())
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(self.post.get_absolute_url(), 'nostaff', 'nostaff')
self.assertEquals(r.status_code, 403)
# allowed user is allowed
r = self.get_with_user(self.post.get_absolute_url(), 'staff', 'staff')
self.assertEquals(r.status_code, 302)
@override_settings(PYBB_ENABLE_ANONYMOUS_POST=False)
def test_redirect_topic_add(self):
_attach_perms_class('pybb.tests.RestrictEditingHandler')
# access without user should be redirected
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
r = self.get_with_user(add_topic_url)
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % add_topic_url)
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(add_topic_url, 'staff', 'staff')
self.assertEquals(r.status_code, 403)
_detach_perms_class()
# allowed user is allowed
r = self.get_with_user(add_topic_url, 'staff', 'staff')
self.assertEquals(r.status_code, 200)
def test_redirect_post_edit(self):
_attach_perms_class('pybb.tests.RestrictEditingHandler')
# access without user should be redirected
edit_post_url = reverse('pybb:edit_post', kwargs={'pk': self.post.id})
r = self.get_with_user(edit_post_url)
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % edit_post_url)
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(edit_post_url, 'staff', 'staff')
self.assertEquals(r.status_code, 403)
_detach_perms_class()
# allowed user is allowed
r = self.get_with_user(edit_post_url, 'staff', 'staff')
self.assertEquals(r.status_code, 200)
def test_profile_autocreation_signal_on(self):
user = User.objects.create_user('cronos', 'cronos@localhost', 'cronos')
profile = getattr(user, defaults.PYBB_PROFILE_RELATED_NAME, None)
self.assertIsNotNone(profile)
self.assertEqual(type(profile), util.get_pybb_profile_model())
user.delete()
def test_profile_autocreation_middleware(self):
user = User.objects.create_user('cronos', 'cronos@localhost', 'cronos')
getattr(user, defaults.PYBB_PROFILE_RELATED_NAME).delete()
#just display a page : the middleware should create the profile
self.get_with_user('/', 'cronos', 'cronos')
user = User.objects.get(username='cronos')
profile = getattr(user, defaults.PYBB_PROFILE_RELATED_NAME, None)
self.assertIsNotNone(profile)
self.assertEqual(type(profile), util.get_pybb_profile_model())
user.delete()
def test_user_delete_cascade(self):
user = User.objects.create_user('cronos', 'cronos@localhost', 'cronos')
profile = getattr(user, defaults.PYBB_PROFILE_RELATED_NAME, None)
self.assertIsNotNone(profile)
post = Post(topic=self.topic, user=user, body='I \'ll be back')
post.save()
user_pk = user.pk
profile_pk = profile.pk
post_pk = post.pk
user.delete()
self.assertFalse(User.objects.filter(pk=user_pk).exists())
self.assertFalse(Profile.objects.filter(pk=profile_pk).exists())
self.assertFalse(Post.objects.filter(pk=post_pk).exists())
class NiceUrlsTest(TestCase, SharedTestModule):
def __init__(self, *args, **kwargs):
super(NiceUrlsTest, self).__init__(*args, **kwargs)
self.ORIGINAL_PYBB_NICE_URL = defaults.PYBB_NICE_URL
defaults.PYBB_NICE_URL = True
self.urls = settings.ROOT_URLCONF
def setUp(self):
self.create_user()
self.login_client()
self.create_initial()
self.ORIGINAL_PYBB_NICE_URL = defaults.PYBB_NICE_URL
defaults.PYBB_NICE_URL = True
def test_unicode_slugify(self):
self.assertEqual(compat.slugify('北京 (China), Москва (Russia), é_è (a sad smiley !)'),
'bei-jing-china-moskva-russia-e_e-a-sad-smiley')
def test_automatique_slug(self):
self.assertEqual(compat.slugify(self.category.name), self.category.slug)
self.assertEqual(compat.slugify(self.forum.name), self.forum.slug)
self.assertEqual(compat.slugify(self.topic.name), self.topic.slug)
def test_no_duplicate_slug(self):
category_name = self.category.name
forum_name = self.forum.name
topic_name = self.topic.name
# objects created without slug but the same name
category = Category.objects.create(name=category_name)
forum = Forum.objects.create(name=forum_name, description='bar', category=self.category)
topic = Topic.objects.create(name=topic_name, forum=self.forum, user=self.user)
slug_nb = len(Category.objects.filter(slug__startswith=category_name)) - 1
self.assertEqual('%s-%d' % (compat.slugify(category_name), slug_nb), category.slug)
slug_nb = len(Forum.objects.filter(slug__startswith=forum_name)) - 1
self.assertEqual('%s-%d' % (compat.slugify(forum_name), slug_nb), forum.slug)
slug_nb = len(Topic.objects.filter(slug__startswith=topic_name)) - 1
self.assertEqual('%s-%d' % (compat.slugify(topic_name), slug_nb), topic.slug)
# objects created with a duplicate slug but a different name
category = Category.objects.create(name='test_slug_category', slug=compat.slugify(category_name))
forum = Forum.objects.create(name='test_slug_forum', description='bar',
category=self.category, slug=compat.slugify(forum_name))
topic = Topic.objects.create(name='test_topic_slug', forum=self.forum,
user=self.user, slug=compat.slugify(topic_name))
slug_nb = len(Category.objects.filter(slug__startswith=category_name)) - 1
self.assertEqual('%s-%d' % (compat.slugify(category_name), slug_nb), category.slug)
slug_nb = len(Forum.objects.filter(slug__startswith=forum_name)) - 1
self.assertEqual('%s-%d' % (compat.slugify(forum_name), slug_nb), forum.slug)
slug_nb = len(Topic.objects.filter(slug__startswith=self.topic.name)) - 1
self.assertEqual('%s-%d' % (compat.slugify(topic_name), slug_nb), topic.slug)
def test_fail_on_too_many_duplicate_slug(self):
original_duplicate_limit = defaults.PYBB_NICE_URL_SLUG_DUPLICATE_LIMIT
defaults.PYBB_NICE_URL_SLUG_DUPLICATE_LIMIT = 200
try:
for _ in iter(range(200)):
Topic.objects.create(name='dolly', forum=self.forum, user=self.user)
except ValidationError as e:
self.fail('Should be able to create "dolly", "dolly-1", ..., "dolly-199".\n')
with self.assertRaises(ValidationError):
Topic.objects.create(name='dolly', forum=self.forum, user=self.user)
defaults.PYBB_NICE_URL_SLUG_DUPLICATE_LIMIT = original_duplicate_limit
def test_long_duplicate_slug(self):
long_name = 'abcde' * 51 # 255 symbols
topic1 = Topic.objects.create(name=long_name, forum=self.forum, user=self.user)
self.assertEqual(topic1.slug, long_name)
topic2 = Topic.objects.create(name=long_name, forum=self.forum, user=self.user)
self.assertEqual(topic2.slug, '%s-1' % long_name[:253])
topic3 = Topic.objects.create(name=long_name, forum=self.forum, user=self.user)
self.assertEqual(topic3.slug, '%s-2' % long_name[:253])
def test_absolute_url(self):
response = self.client.get(self.category.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['category'], self.category)
self.assertEqual('/c/%s/' % (self.category.slug), self.category.get_absolute_url())
response = self.client.get(self.forum.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['forum'], self.forum)
self.assertEqual(
'/c/%s/%s/' % (self.category.slug, self.forum.slug),
self.forum.get_absolute_url()
)
response = self.client.get(self.topic.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['topic'], self.topic)
self.assertEqual(
'/c/%s/%s/%s/' % (self.category.slug, self.forum.slug, self.topic.slug),
self.topic.get_absolute_url()
)
def test_add_topic(self):
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.pk})
response = self.client.get(add_topic_url)
inputs = dict(html.fromstring(response.content).xpath('//form[@class="%s"]' % "post-form")[0].inputs)
self.assertNotIn('slug', inputs)
values = self.get_form_values(response)
values.update({'name': self.topic.name, 'body': '[b]Test slug body[/b]', 'poll_type': 0})
response = self.client.post(add_topic_url, data=values, follow=True)
slug_nb = len(Topic.objects.filter(slug__startswith=compat.slugify(self.topic.name))) - 1
self.assertIsNotNone = Topic.objects.get(slug='%s-%d' % (self.topic.name, slug_nb))
_attach_perms_class('pybb.tests.CustomPermissionHandler')
response = self.client.get(add_topic_url)
inputs = dict(html.fromstring(response.content).xpath('//form[@class="%s"]' % "post-form")[0].inputs)
self.assertIn('slug', inputs)
values = self.get_form_values(response)
values.update({'name': self.topic.name, 'body': '[b]Test slug body[/b]',
'poll_type': 0, 'slug': 'test_slug'})
response = self.client.post(add_topic_url, data=values, follow=True)
self.assertIsNotNone = Topic.objects.get(slug='test_slug')
_detach_perms_class()
def test_old_url_redirection(self):
original_perm_redirect = defaults.PYBB_NICE_URL_PERMANENT_REDIRECT
for redirect_status in [301, 302]:
defaults.PYBB_NICE_URL_PERMANENT_REDIRECT = redirect_status == 301
response = self.client.get(reverse("pybb:category", kwargs={"pk": self.category.pk}))
self.assertRedirects(response, self.category.get_absolute_url(), status_code=redirect_status)
response = self.client.get(reverse("pybb:forum", kwargs={"pk": self.forum.pk}))
self.assertRedirects(response, self.forum.get_absolute_url(), status_code=redirect_status)
response = self.client.get(reverse("pybb:topic", kwargs={"pk": self.topic.pk}))
self.assertRedirects(response, self.topic.get_absolute_url(), status_code=redirect_status)
defaults.PYBB_NICE_URL_PERMANENT_REDIRECT = original_perm_redirect
def tearDown(self):
defaults.PYBB_NICE_URL = self.ORIGINAL_PYBB_NICE_URL
| webu/pybbm | pybb/tests.py | Python | bsd-2-clause | 121,776 | [
"VisIt"
] | 6abb2f289ef25bda4b4ec57a5c86b966091be54c39640e30a5b93c7767e19b73 |
"""
homeassistant.components.isy994
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Connects to an ISY-994 controller and loads relevant components to control its
devices. Also contains the base classes for ISY Sensors, Lights, and Switches.
For configuration details please visit the documentation for this component at
https://home-assistant.io/components/isy994.html
"""
import logging
from urllib.parse import urlparse
from homeassistant import bootstrap
from homeassistant.loader import get_component
from homeassistant.helpers import validate_config
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import (
CONF_HOST, CONF_USERNAME, CONF_PASSWORD, EVENT_PLATFORM_DISCOVERED,
EVENT_HOMEASSISTANT_STOP, ATTR_SERVICE, ATTR_DISCOVERED,
ATTR_FRIENDLY_NAME)
DOMAIN = "isy994"
DEPENDENCIES = []
REQUIREMENTS = ['PyISY==1.0.5']
DISCOVER_LIGHTS = "isy994.lights"
DISCOVER_SWITCHES = "isy994.switches"
DISCOVER_SENSORS = "isy994.sensors"
ISY = None
SENSOR_STRING = 'Sensor'
HIDDEN_STRING = '{HIDE ME}'
CONF_TLS_VER = 'tls'
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""
Setup ISY994 component.
This will automatically import associated lights, switches, and sensors.
"""
try:
import PyISY
except ImportError:
_LOGGER.error("Error while importing dependency PyISY.")
return False
# pylint: disable=global-statement
# check for required values in configuration file
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return False
# pull and parse standard configuration
user = config[DOMAIN][CONF_USERNAME]
password = config[DOMAIN][CONF_PASSWORD]
host = urlparse(config[DOMAIN][CONF_HOST])
addr = host.geturl()
if host.scheme == 'http':
addr = addr.replace('http://', '')
https = False
elif host.scheme == 'https':
addr = addr.replace('https://', '')
https = True
else:
_LOGGER.error('isy994 host value in configuration file is invalid.')
return False
port = host.port
addr = addr.replace(':{}'.format(port), '')
# pull and parse optional configuration
global SENSOR_STRING
global HIDDEN_STRING
SENSOR_STRING = str(config[DOMAIN].get('sensor_string', SENSOR_STRING))
HIDDEN_STRING = str(config[DOMAIN].get('hidden_string', HIDDEN_STRING))
tls_version = config[DOMAIN].get(CONF_TLS_VER, None)
# connect to ISY controller
global ISY
ISY = PyISY.ISY(addr, port, user, password, use_https=https,
tls_ver=tls_version, log=_LOGGER)
if not ISY.connected:
return False
# listen for HA stop to disconnect
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop)
# Load components for the devices in the ISY controller that we support
for comp_name, discovery in ((('sensor', DISCOVER_SENSORS),
('light', DISCOVER_LIGHTS),
('switch', DISCOVER_SWITCHES))):
component = get_component(comp_name)
bootstrap.setup_component(hass, component.DOMAIN, config)
hass.bus.fire(EVENT_PLATFORM_DISCOVERED,
{ATTR_SERVICE: discovery,
ATTR_DISCOVERED: {}})
ISY.auto_update = True
return True
def stop(event):
""" Cleanup the ISY subscription. """
ISY.auto_update = False
class ISYDeviceABC(ToggleEntity):
""" Abstract Class for an ISY device. """
_attrs = {}
_onattrs = []
_states = []
_dtype = None
_domain = None
_name = None
def __init__(self, node):
# setup properties
self.node = node
self.hidden = HIDDEN_STRING in self.raw_name
# track changes
self._change_handler = self.node.status. \
subscribe('changed', self.on_update)
def __del__(self):
""" cleanup subscriptions because it is the right thing to do. """
self._change_handler.unsubscribe()
@property
def domain(self):
""" Returns the domain of the entity. """
return self._domain
@property
def dtype(self):
""" Returns the data type of the entity (binary or analog). """
if self._dtype in ['analog', 'binary']:
return self._dtype
return 'binary' if self.unit_of_measurement is None else 'analog'
@property
def should_poll(self):
""" Tells Home Assistant not to poll this entity. """
return False
@property
def value(self):
""" Returns the unclean value from the controller. """
# pylint: disable=protected-access
return self.node.status._val
@property
def state_attributes(self):
""" Returns the state attributes for the node. """
attr = {ATTR_FRIENDLY_NAME: self.name}
for name, prop in self._attrs.items():
attr[name] = getattr(self, prop)
attr = self._attr_filter(attr)
return attr
def _attr_filter(self, attr):
""" Placeholder for attribute filters. """
# pylint: disable=no-self-use
return attr
@property
def unique_id(self):
""" Returns the id of this ISY sensor. """
# pylint: disable=protected-access
return self.node._id
@property
def raw_name(self):
""" Returns the unclean node name. """
return str(self._name) \
if self._name is not None else str(self.node.name)
@property
def name(self):
""" Returns the cleaned name of the node. """
return self.raw_name.replace(HIDDEN_STRING, '').strip() \
.replace('_', ' ')
def update(self):
""" Update state of the sensor. """
# ISY objects are automatically updated by the ISY's event stream
pass
def on_update(self, event):
""" Handles the update received event. """
self.update_ha_state()
@property
def is_on(self):
""" Returns boolean response if the node is on. """
return bool(self.value)
@property
def is_open(self):
""" Returns boolean respons if the node is open. On = Open. """
return self.is_on
@property
def state(self):
""" Returns the state of the node. """
if len(self._states) > 0:
return self._states[0] if self.is_on else self._states[1]
return self.value
def turn_on(self, **kwargs):
""" Turns the device on. """
if self.domain is not 'sensor':
attrs = [kwargs.get(name) for name in self._onattrs]
self.node.on(*attrs)
else:
_LOGGER.error('ISY cannot turn on sensors.')
def turn_off(self, **kwargs):
""" Turns the device off. """
if self.domain is not 'sensor':
self.node.off()
else:
_LOGGER.error('ISY cannot turn off sensors.')
@property
def unit_of_measurement(self):
""" Returns the defined units of measurement or None. """
try:
return self.node.units
except AttributeError:
return None
| alexkolar/home-assistant | homeassistant/components/isy994.py | Python | mit | 7,199 | [
"VisIt"
] | 90afccd63114b811d23643e39a9f079ca3bdcfec98a0ad28663a4586cc3be0c4 |
""" The Job Scheduling Executor takes the information gained from all previous
optimizers and makes a scheduling decision for the jobs.
Subsequent to this jobs are added into a Task Queue and pilot agents can be submitted.
All issues preventing the successful resolution of a site candidate are discovered
here where all information is available.
This Executor will fail affected jobs meaningfully.
"""
__RCSID__ = "$Id: $"
import random
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
from DIRAC.Core.Utilities.Time import fromString, toEpoch
from DIRAC.Core.Security import Properties
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
from DIRAC.StorageManagementSystem.Client.StorageManagerClient import StorageManagerClient, getFilesToStage
from DIRAC.WorkloadManagementSystem.Executor.Base.OptimizerExecutor import OptimizerExecutor
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
class JobScheduling(OptimizerExecutor):
"""
The specific Optimizer must provide the following methods:
- optimizeJob() - the main method called for each job
and it can provide:
- initializeOptimizer() before each execution cycle
"""
@classmethod
def initializeOptimizer(cls):
""" Initialization of the optimizer.
"""
cls.siteClient = SiteStatus()
cls.__jobDB = JobDB()
return S_OK()
def optimizeJob(self, jid, jobState):
""" 1. Banned sites are removed from the destination list.
2. Get input files
3. Production jobs are sent directly to TQ
4. Check if staging is necessary
"""
# Reschedule delay
result = jobState.getAttributes(['RescheduleCounter', 'RescheduleTime', 'ApplicationStatus'])
if not result['OK']:
return result
attDict = result['Value']
try:
reschedules = int(attDict['RescheduleCounter'])
except (ValueError, KeyError):
return S_ERROR("RescheduleCounter has to be an integer")
if reschedules != 0:
delays = self.ex_getOption('RescheduleDelays', [60, 180, 300, 600])
delay = delays[min(reschedules, len(delays) - 1)]
waited = toEpoch() - toEpoch(fromString(attDict['RescheduleTime']))
if waited < delay:
return self.__holdJob(jobState, 'On Hold: after rescheduling %s' % reschedules, delay)
# Get the job manifest for the later checks
result = jobState.getManifest()
if not result['OK']:
return S_ERROR("Could not retrieve job manifest: %s" % result['Message'])
jobManifest = result['Value']
# Get site requirements
result = self.__getSitesRequired(jobManifest)
if not result['OK']:
return result
userSites, userBannedSites = result['Value']
# Get job type
result = jobState.getAttribute("JobType")
if not result['OK']:
return S_ERROR("Could not retrieve job type")
jobType = result['Value']
# Get banned sites from DIRAC
result = self.siteClient.getSites('Banned')
if not result['OK']:
return S_ERROR("Cannot retrieve banned sites from JobDB")
wmsBannedSites = result['Value']
# If the user has selected any site, filter them and hold the job if not able to run
if userSites:
if jobType not in self.ex_getOption('ExcludedOnHoldJobTypes', []):
result = self.siteClient.getUsableSites(userSites)
if not result['OK']:
return S_ERROR("Problem checking userSites for tuple of active/banned/invalid sites")
usableSites = set(result['Value'])
bannedSites = []
invalidSites = []
for site in userSites:
if site in wmsBannedSites:
bannedSites.append(site)
elif site not in usableSites:
invalidSites.append(site)
if invalidSites:
self.jobLog.debug("Invalid site(s) requested: %s" % ','.join(invalidSites))
if not self.ex_getOption('AllowInvalidSites', True):
return self.__holdJob(jobState, "Requested site(s) %s are invalid" % ",".join(invalidSites))
if bannedSites:
self.jobLog.debug("Banned site(s) %s ignored" % ",".join(bannedSites))
if not usableSites:
return self.__holdJob(jobState, "Requested site(s) %s are inactive" % ",".join(bannedSites))
if not usableSites:
return self.__holdJob(jobState, "No requested site(s) are active/valid")
userSites = list(usableSites)
checkPlatform = self.ex_getOption('CheckPlatform', False)
jobPlatform = jobManifest.getOption("Platform", None)
# First check that the platform is valid (in OSCompatibility list)
if checkPlatform and jobPlatform:
result = gConfig.getOptionsDict('/Resources/Computing/OSCompatibility')
if not result['OK']:
return S_ERROR("Unable to get OSCompatibility list")
allPlatforms = result['Value']
if jobPlatform not in allPlatforms:
self.jobLog.error("Platform not supported", jobPlatform)
return S_ERROR("Platform %s is not supported" % jobPlatform)
# Filter the userSites by the platform selection (if there is one)
if checkPlatform and userSites:
if jobPlatform:
result = self.__filterByPlatform(jobPlatform, userSites)
if not result['OK']:
self.jobLog.error("Failed to filter job sites by platform", result['Message'])
return S_ERROR("Failed to filter job sites by platform")
userSites = result['Value']
if not userSites:
# No sites left after filtering -> Invalid platform/sites combination
self.jobLog.error("No selected sites match platform", jobPlatform)
return S_ERROR("No selected sites match platform '%s'" % jobPlatform)
# Check if there is input data
result = jobState.getInputData()
if not result['OK']:
self.jobLog.error("Cannot get input data", result['Message'])
return S_ERROR("Failed to get input data from JobDB")
if not result['Value']:
# No input data? Just send to TQ
return self.__sendToTQ(jobState, jobManifest, userSites, userBannedSites)
self.jobLog.verbose("Has an input data requirement")
inputData = result['Value']
# ===================================================================================
# Production jobs are sent to TQ, but first we have to verify if staging is necessary
# ===================================================================================
if jobType in Operations().getValue('Transformations/DataProcessing', []):
self.jobLog.info("Production job: sending to TQ, but first checking if staging is requested")
res = getFilesToStage(inputData,
jobState=jobState,
checkOnlyTapeSEs=self.ex_getOption('CheckOnlyTapeSEs', True),
jobLog=self.jobLog)
if not res['OK']:
return self.__holdJob(jobState, res['Message'])
if res['Value']['absentLFNs']:
# Some files do not exist at all... set the job Failed
# Reverse errors
reasons = {}
for lfn, reason in res['Value']['absentLFNs'].iteritems():
reasons.setdefault(reason, []).append(lfn)
for reason, lfns in reasons.iteritems():
# Some files are missing in the FC or in SEs, fail the job
self.jobLog.error(reason, ','.join(lfns))
error = ','.join(reasons)
return S_ERROR(error)
if res['Value']['failedLFNs']:
return self.__holdJob(jobState, "Couldn't get storage metadata of some files")
stageLFNs = res['Value']['offlineLFNs']
if stageLFNs:
res = self.__checkStageAllowed(jobState)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Stage not allowed")
self.__requestStaging(jobState, stageLFNs)
return S_OK()
else:
# No staging required
onlineSites = res['Value']['onlineSites']
if onlineSites:
# Set the online site(s) first
userSites = set(userSites)
onlineSites &= userSites
userSites = list(onlineSites) + list(userSites - onlineSites)
return self.__sendToTQ(jobState, jobManifest, userSites, userBannedSites, onlineSites=onlineSites)
# ===================================================
# From now on we know it's a user job with input data
# ===================================================
idAgent = self.ex_getOption('InputDataAgent', 'InputData')
result = self.retrieveOptimizerParam(idAgent)
if not result['OK']:
self.jobLog.error("Could not retrieve input data info", result['Message'])
return S_ERROR("Could not retrieve input data info")
opData = result['Value']
if 'SiteCandidates' not in opData:
return S_ERROR("No possible site candidates")
# Filter input data sites with user requirement
siteCandidates = list(opData['SiteCandidates'])
self.jobLog.info("Site candidates are %s" % siteCandidates)
if userSites:
siteCandidates = list(set(siteCandidates) & set(userSites))
siteCandidates = self._applySiteFilter(siteCandidates, banned=userBannedSites)
if not siteCandidates:
return S_ERROR("Impossible InputData * Site requirements")
idSites = {}
for site in siteCandidates:
idSites[site] = opData['SiteCandidates'][site]
# Check if sites have correct count of disk+tape replicas
numData = len(inputData)
errorSites = set()
for site in idSites:
if numData != idSites[site]['disk'] + idSites[site]['tape']:
self.jobLog.error("Site candidate %s does not have all the input data" % site)
errorSites.add(site)
for site in errorSites:
idSites.pop(site)
if not idSites:
return S_ERROR("Site candidates do not have all the input data")
# Check if staging is required
stageRequired, siteCandidates = self.__resolveStaging(inputData, idSites)
if not siteCandidates:
return S_ERROR("No destination sites available")
# Is any site active?
stageSites = self._applySiteFilter(siteCandidates, banned=wmsBannedSites)
if not stageSites:
return self.__holdJob(jobState, "Sites %s are inactive or banned" % ", ".join(siteCandidates))
# If no staging is required send to TQ
if not stageRequired:
# Use siteCandidates and not stageSites because active and banned sites
# will be taken into account on matching time
return self.__sendToTQ(jobState, jobManifest, siteCandidates, userBannedSites)
# Check if the user is allowed to stage
if self.ex_getOption("RestrictDataStage", False):
res = self.__checkStageAllowed(jobState)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Stage not allowed")
# Get stageSites[0] because it has already been randomized and it's as good as any in stageSites
stageSite = stageSites[0]
self.jobLog.verbose(" Staging site will be %s" % (stageSite))
stageData = idSites[stageSite]
# Set as if everything has already been staged
stageData['disk'] += stageData['tape']
stageData['tape'] = 0
# Set the site info back to the original dict to save afterwards
opData['SiteCandidates'][stageSite] = stageData
stageRequest = self.__preRequestStaging(jobManifest, stageSite, opData)
if not stageRequest['OK']:
return stageRequest
stageLFNs = stageRequest['Value']
result = self.__requestStaging(jobState, stageLFNs)
if not result['OK']:
return result
stageLFNs = result['Value']
self.__updateSharedSESites(jobManifest, stageSite, stageLFNs, opData)
# Save the optimizer data again
self.jobLog.verbose('Updating %s Optimizer Info:' % (idAgent), opData)
result = self.storeOptimizerParam(idAgent, opData)
if not result['OK']:
return result
return self.__setJobSite(jobState, stageSites)
def _applySiteFilter(self, sites, banned=False):
""" Filters out banned sites
"""
if not sites:
return sites
filtered = set(sites)
if banned and isinstance(banned, (list, set, dict)):
filtered -= set(banned)
return list(filtered)
def __holdJob(self, jobState, holdMsg, delay=0):
if delay:
self.freezeTask(delay)
else:
self.freezeTask(self.ex_getOption("HoldTime", 300))
self.jobLog.info("On hold -> %s" % holdMsg)
return jobState.setAppStatus(holdMsg, source=self.ex_optimizerName())
def __getSitesRequired(self, jobManifest):
"""Returns any candidate sites specified by the job or sites that have been
banned and could affect the scheduling decision.
"""
bannedSites = jobManifest.getOption("BannedSites", [])
if not bannedSites:
bannedSites = jobManifest.getOption("BannedSite", [])
if bannedSites:
self.jobLog.info("Banned %s sites" % ", ".join(bannedSites))
sites = jobManifest.getOption("Site", [])
# TODO: Only accept known sites after removing crap like ANY set in the original manifest
sites = [site for site in sites if site.strip().lower() not in ("any", "")]
if sites:
if len(sites) == 1:
self.jobLog.info("Single chosen site %s specified" % (sites[0]))
else:
self.jobLog.info("Multiple sites requested: %s" % ','.join(sites))
sites = self._applySiteFilter(sites, banned=bannedSites)
if not sites:
return S_ERROR("Impossible site requirement")
return S_OK((sites, bannedSites))
def __filterByPlatform(self, jobPlatform, userSites):
""" Filters out sites that have no CE with a matching platform.
"""
basePath = "/Resources/Sites"
filteredSites = set()
for site in userSites:
if "." not in site:
# Invalid site name: Doesn't contain a dot!
self.jobLog.info("Skipped invalid site name: %s" % site)
continue
grid = site.split('.')[0]
sitePath = cfgPath(basePath, grid, site, "CEs")
result = gConfig.getSections(sitePath)
if not result['OK']:
self.jobLog.info("Failed to get CEs at site %s." % site)
continue
siteCEs = result['Value']
for CEName in siteCEs:
CEPlatform = gConfig.getValue(cfgPath(sitePath, CEName, "OS"))
if jobPlatform == CEPlatform:
# Site has a CE with a matchin platform
filteredSites.add(site)
return S_OK(list(filteredSites))
def __sendToTQ(self, jobState, jobManifest, sites, bannedSites, onlineSites=None):
"""This method sends jobs to the task queue agent and if candidate sites
are defined, updates job JDL accordingly.
"""
# Generate Tags from specific requirements
tagList = []
if "MaxRAM" in jobManifest:
maxRAM = jobManifest.getOption("MaxRAM", 0)
if maxRAM:
tagList.append("%dGB" % maxRAM)
if "NumberOfProcessors" in jobManifest:
nProcessors = jobManifest.getOption("NumberOfProcessors", 0)
if nProcessors:
tagList.append("%dProcessors" % nProcessors)
tagList.append("MultiProcessor")
if "WholeNode" in jobManifest:
if jobManifest.getOption("WholeNode", "").lower() in ["1", "yes", "true"]:
tagList.append("WholeNode")
tagList.append("MultiProcessor")
if "Tags" in jobManifest:
tagList.extend(jobManifest.getOption("Tags", []))
if "Tag" in jobManifest:
tagList.extend(jobManifest.getOption("Tag", []))
if tagList:
jobManifest.setOption("Tags", ", ".join(tagList))
reqSection = "JobRequirements"
if reqSection in jobManifest:
result = jobManifest.getSection(reqSection)
else:
result = jobManifest.createSection(reqSection)
if not result['OK']:
self.jobLog.error("Cannot create %s: %s" % reqSection, result['Value'])
return S_ERROR("Cannot create %s in the manifest" % reqSection)
reqCfg = result['Value']
if sites:
reqCfg.setOption("Sites", ", ".join(sites))
if bannedSites:
reqCfg.setOption("BannedSites", ", ".join(bannedSites))
# Job multivalue requirement keys are specified as singles in the job descriptions
# but for backward compatibility can be also plurals
for key in ('SubmitPools', "SubmitPool", "GridMiddleware", "PilotTypes", "PilotType",
"JobType", "GridRequiredCEs", "GridCE", "Tags"):
reqKey = key
if key == "JobType":
reqKey = "JobTypes"
elif key == "GridRequiredCEs" or key == "GridCE":
reqKey = "GridCEs"
elif key == "SubmitPools" or key == "SubmitPool":
reqKey = "SubmitPools"
elif key == "PilotTypes" or key == "PilotType":
reqKey = "PilotTypes"
if key in jobManifest:
reqCfg.setOption(reqKey, ", ".join(jobManifest.getOption(key, [])))
result = self.__setJobSite(jobState, sites, onlineSites=onlineSites)
if not result['OK']:
return result
self.jobLog.info("Done")
return self.setNextOptimizer(jobState)
def __resolveStaging(self, inputData, idSites):
diskSites = []
maxOnDisk = 0
bestSites = []
for site in idSites:
nTape = idSites[site]['tape']
nDisk = idSites[site]['disk']
if nTape > 0:
self.jobLog.verbose("%s tape replicas on site %s" % (nTape, site))
if nDisk > 0:
self.jobLog.verbose("%s disk replicas on site %s" % (nDisk, site))
if nDisk == len(inputData):
diskSites.append(site)
if nDisk > maxOnDisk:
maxOnDisk = nDisk
bestSites = [site]
elif nDisk == maxOnDisk:
bestSites.append(site)
# If there are selected sites, those are disk only sites
if diskSites:
self.jobLog.info("No staging required")
return (False, diskSites)
self.jobLog.info("Staging required")
if len(bestSites) > 1:
random.shuffle(bestSites)
return (True, bestSites)
def __preRequestStaging(self, jobManifest, stageSite, opData):
tapeSEs = []
diskSEs = []
vo = jobManifest.getOption('VirtualOrganization')
inputDataPolicy = jobManifest.getOption('InputDataPolicy', 'Protocol')
connectionLevel = 'DOWNLOAD' if 'download' in inputDataPolicy.lower() else 'PROTOCOL'
# Allow staging from SEs accessible by protocol
result = DMSHelpers(vo=vo).getSEsForSite(stageSite, connectionLevel=connectionLevel)
if not result['OK']:
return S_ERROR('Could not determine SEs for site %s' % stageSite)
siteSEs = result['Value']
for seName in siteSEs:
se = StorageElement(seName, vo=vo)
seStatus = se.getStatus()
if not seStatus['OK']:
return seStatus
seStatus = seStatus['Value']
if seStatus['Read'] and seStatus['TapeSE']:
tapeSEs.append(seName)
if seStatus['Read'] and seStatus['DiskSE']:
diskSEs.append(seName)
if not tapeSEs:
return S_ERROR("No Local SEs for site %s" % stageSite)
self.jobLog.verbose("Tape SEs are %s" % (", ".join(tapeSEs)))
# I swear this is horrible DM code it's not mine.
# Eternity of hell to the inventor of the Value of Value of Success of...
inputData = opData['Value']['Value']['Successful']
stageLFNs = {}
lfnToStage = []
for lfn in inputData:
replicas = inputData[lfn]
# Check SEs
seStage = []
for seName in replicas:
if seName in diskSEs:
# This lfn is in disk. Skip it
seStage = []
break
if seName not in tapeSEs:
# This lfn is not in this tape SE. Check next SE
continue
seStage.append(seName)
for seName in seStage:
if seName not in stageLFNs:
stageLFNs[seName] = []
stageLFNs[seName].append(lfn)
if lfn not in lfnToStage:
lfnToStage.append(lfn)
if not stageLFNs:
return S_ERROR("Cannot find tape replicas")
# Check if any LFN is in more than one SE
# If that's the case, try to stage from the SE that has more LFNs to stage to group the request
# 1.- Get the SEs ordered by ascending replicas
sortedSEs = reversed(sorted([(len(stageLFNs[seName]), seName) for seName in stageLFNs]))
for lfn in lfnToStage:
found = False
# 2.- Traverse the SEs
for _stageCount, seName in sortedSEs:
if lfn in stageLFNs[seName]:
# 3.- If first time found, just mark as found. Next time delete the replica from the request
if found:
stageLFNs[seName].remove(lfn)
else:
found = True
# 4.-If empty SE, remove
if not stageLFNs[seName]:
stageLFNs.pop(seName)
return S_OK(stageLFNs)
def __requestStaging(self, jobState, stageLFNs):
""" Actual request for staging LFNs through the StorageManagerClient
"""
self.jobLog.verbose("Stage request will be \n\t%s" % "\n\t".join(
["%s:%s" % (lfn, stageLFNs[lfn]) for lfn in stageLFNs]))
stagerClient = StorageManagerClient()
result = jobState.setStatus(self.ex_getOption('StagingStatus', 'Staging'),
self.ex_getOption('StagingMinorStatus', 'Request To Be Sent'),
appStatus="",
source=self.ex_optimizerName())
if not result['OK']:
return result
result = stagerClient.setRequest(stageLFNs, 'WorkloadManagement',
'updateJobFromStager@WorkloadManagement/JobStateUpdate',
int(jobState.jid))
if not result['OK']:
self.jobLog.error("Could not send stage request: %s" % result['Message'])
return S_ERROR("Problem sending staging request")
rid = str(result['Value'])
self.jobLog.info("Stage request %s sent" % rid)
self.storeOptimizerParam('StageRequest', rid)
result = jobState.setStatus(self.ex_getOption('StagingStatus', 'Staging'),
self.ex_getOption('StagingMinorStatus', 'Request Sent'),
appStatus="",
source=self.ex_optimizerName())
if not result['OK']:
return result
return S_OK(stageLFNs)
def __updateSharedSESites(self, jobManifest, stageSite, stagedLFNs, opData):
siteCandidates = opData['SiteCandidates']
seStatus = {}
vo = jobManifest.getOption('VirtualOrganization')
for siteName in siteCandidates:
if siteName == stageSite:
continue
self.jobLog.verbose("Checking %s for shared SEs" % siteName)
siteData = siteCandidates[siteName]
result = getSEsForSite(siteName)
if not result['OK']:
continue
closeSEs = result['Value']
diskSEs = []
for seName in closeSEs:
# If we don't have the SE status get it and store it
if seName not in seStatus:
seStatus[seName] = StorageElement(seName, vo=vo).status()
# get the SE status from mem and add it if its disk
status = seStatus[seName]
if status['Read'] and status['DiskSE']:
diskSEs.append(seName)
self.jobLog.verbose("Disk SEs for %s are %s" % (siteName, ", ".join(diskSEs)))
# Hell again to the dev of this crappy value of value of successful of ...
lfnData = opData['Value']['Value']['Successful']
for seName in stagedLFNs:
# If the SE is not close then skip it
if seName not in closeSEs:
continue
for lfn in stagedLFNs[seName]:
self.jobLog.verbose("Checking %s for %s" % (seName, lfn))
# I'm pretty sure that this cannot happen :P
if lfn not in lfnData:
continue
# Check if it's already on disk at the site
onDisk = False
for siteSE in lfnData[lfn]:
if siteSE in diskSEs:
self.jobLog.verbose("%s on disk for %s" % (lfn, siteSE))
onDisk = True
# If not on disk, then update!
if not onDisk:
self.jobLog.verbose("Setting LFN to disk for %s" % (seName))
siteData['disk'] += 1
siteData['tape'] -= 1
def __setJobSite(self, jobState, siteList, onlineSites=None):
""" Set the site attribute
"""
if onlineSites is None:
onlineSites = []
numSites = len(siteList)
if numSites == 0:
self.jobLog.info("Any site is candidate")
return jobState.setAttribute("Site", "ANY")
elif numSites == 1:
self.jobLog.info("Only site %s is candidate" % siteList[0])
return jobState.setAttribute("Site", siteList[0])
# If the job has input data, the online sites are hosting the data
if len(onlineSites) == 1:
siteName = "Group.%s" % ".".join(list(onlineSites)[0].split(".")[1:])
self.jobLog.info("Group %s is candidate" % siteName)
elif onlineSites:
# More than one site with input
siteName = "MultipleInput"
self.jobLog.info("Several input sites are candidate: %s" % ','.join(onlineSites))
else:
# No input site reported (could be a user job)
siteName = "Multiple"
self.jobLog.info("Multiple sites are candidate")
return jobState.setAttribute("Site", siteName)
def __checkStageAllowed(self, jobState):
"""Check if the job credentials allow to stage date """
result = jobState.getAttribute("OwnerGroup")
if not result['OK']:
self.jobLog.error("Cannot retrieve OwnerGroup from DB: %s" % result['Message'])
return S_ERROR("Cannot get OwnerGroup")
group = result['Value']
return S_OK(Properties.STAGE_ALLOWED in Registry.getPropertiesForGroup(group))
| chaen/DIRAC | WorkloadManagementSystem/Executor/JobScheduling.py | Python | gpl-3.0 | 25,924 | [
"DIRAC"
] | ae4d34652f5ef797830e21bcee976cd5330d1bded783f5ed2caff36d9fae73f1 |
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pytest
import dill
import copy
import hyperspy.api as hs
from hyperspy.samfire_utils.samfire_kernel import multi_kernel
from hyperspy.misc.utils import DictionaryTreeBrowser
from hyperspy.samfire_utils.samfire_worker import create_worker
class Mock_queue(object):
def __init__(self):
self.var = []
def put(self, value):
self.var.append(value)
def generate_test_model():
# import hyperspy.api as hs
from hyperspy.signals import Signal1D
from hyperspy.components1d import (Gaussian, Lorentzian)
import numpy as np
from scipy.ndimage import gaussian_filter
total = None
# blurs = [0., 0.5, 1., 2.,5.]
blurs = [1.5]
rnd = np.random.RandomState(17)
radius = 5
domain = 15
# do circle/domain
cent = (domain // 2, domain // 2)
y, x = np.ogrid[-cent[0]:domain - cent[0], -cent[1]:domain - cent[1]]
mask = x * x + y * y <= radius * radius
lor_map = None
for blur in blurs:
s = Signal1D(np.ones((domain, domain, 1024)))
cent = tuple([int(0.5 * i) for i in s.data.shape[:-1]])
m0 = s.create_model()
gs01 = Lorentzian()
m0.append(gs01)
gs01.gamma.map['values'][:] = 50
gs01.gamma.map['is_set'][:] = True
gs01.centre.map['values'][:] = 300
gs01.centre.map['values'][mask] = 400
gs01.centre.map['values'] = gaussian_filter(
gs01.centre.map['values'],
blur)
gs01.centre.map['is_set'][:] = True
gs01.A.map['values'][:] = 100 * \
rnd.rand(domain, domain) + 300000
gs01.A.map['values'][mask] *= 0.75
gs01.A.map['values'] = gaussian_filter(gs01.A.map['values'], blur)
gs01.A.map['is_set'][:] = True
gs02 = Gaussian()
m0.append(gs02)
gs02.sigma.map['values'][:] = 15
gs02.sigma.map['is_set'][:] = True
gs02.centre.map['values'][:] = 400
gs02.centre.map['values'][mask] = 300
gs02.centre.map['values'] = gaussian_filter(
gs02.centre.map['values'],
blur)
gs02.centre.map['is_set'][:] = True
gs02.A.map['values'][:] = 50000
gs02.A.map['is_set'][:] = True
gs03 = Lorentzian()
m0.append(gs03)
gs03.gamma.map['values'][:] = 20
gs03.gamma.map['is_set'][:] = True
gs03.centre.map['values'][:] = 100
gs03.centre.map['values'][mask] = 900
gs03.centre.map['is_set'][:] = True
gs03.A.map['values'][:] = 100 * \
rnd.rand(domain, domain) + 50000
gs03.A.map['values'][mask] *= 0.
gs03.A.map['is_set'][:] = True
s11 = m0.as_signal(show_progressbar=False, parallel=False)
if total is None:
total = s11.data.copy()
lor_map = gs01.centre.map['values'].copy()
else:
total = np.concatenate((total, s11.data), axis=1)
lor_map = np.concatenate(
(lor_map, gs01.centre.map['values'].copy()), axis=1)
s = Signal1D(total)
s.data = rnd.poisson(lam=s.data) + 0.1
s.estimate_poissonian_noise_variance()
m = s.inav[:, :7].create_model()
g = Gaussian()
l1 = Lorentzian()
l2 = Lorentzian()
g.sigma.value = 50
g.centre.value = 400
g.A.value = 50000
l1.gamma.value = 40
l1.centre.value = 300
l1.A.value = 300000
l2.gamma.value = 15
l2.centre.value = 100
l2.A.value = 50000
l2.centre.bmin = 0
l2.centre.bmax = 200
l2.A.bmin = 30000
l2.A.bmax = 100000
l2.gamma.bmin = 0
l2.gamma.bmax = 60
m.extend([g, l1, l2])
m.assign_current_values_to_all()
l2.active_is_multidimensional = True
return m, gs01, gs02, gs03
class TestSamfireEmpty:
def setup_method(self, method):
self.shape = (7, 15)
s = hs.signals.Signal1D(np.ones(self.shape + (1024,)) + 3.)
s.estimate_poissonian_noise_variance()
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m.append(hs.model.components1D.Lorentzian())
m.append(hs.model.components1D.Lorentzian())
self.model = m
@pytest.mark.parallel
def test_setup(self):
m = self.model
samf = m.create_samfire(workers=1, setup=False)
assert samf.metadata._gt_dump is None
assert samf.pool is None
samf._setup(ipyparallel=False)
assert samf.metadata._gt_dump is not None
assert samf.pool is not None
def test_samfire_init_marker(self):
m = self.model
samf = m.create_samfire(workers=1, setup=False)
np.testing.assert_array_almost_equal(samf.metadata.marker,
np.zeros(self.shape))
def test_samfire_init_model(self):
m = self.model
samf = m.create_samfire(workers=1, setup=False)
assert samf.model is m
def test_samfire_init_metadata(self):
m = self.model
samf = m.create_samfire(workers=1, setup=False)
assert isinstance(samf.metadata, DictionaryTreeBrowser)
def test_samfire_init_strategy_list(self):
from hyperspy.samfire import StrategyList
m = self.model
samf = m.create_samfire(workers=1, setup=False)
assert isinstance(samf.strategies, StrategyList)
def test_samfire_init_strategies(self):
m = self.model
samf = m.create_samfire(workers=1, setup=False)
from hyperspy.samfire_utils.local_strategies import ReducedChiSquaredStrategy
from hyperspy.samfire_utils.global_strategies import HistogramStrategy
assert isinstance(samf.strategies[0],
ReducedChiSquaredStrategy)
assert isinstance(samf.strategies[1], HistogramStrategy)
def test_samfire_init_fig(self):
m = self.model
samf = m.create_samfire(workers=1, setup=False)
assert samf._figure is None
def test_samfire_init_default(self):
m = self.model
from multiprocessing import cpu_count
samf = m.create_samfire(setup=False)
assert samf._workers == cpu_count() - 1
assert np.allclose(samf.metadata.marker, np.zeros(self.shape))
def test_optional_components(self):
m = self.model
m[-1].active_is_multidimensional = False
samf = m.create_samfire(setup=False)
samf.optional_components = [m[0], 1]
samf._enable_optional_components()
assert m[0].active_is_multidimensional
assert m[1].active_is_multidimensional
assert np.all([isinstance(a, int)
for a in samf.optional_components])
np.testing.assert_equal(samf.optional_components, [0, 1])
def test_swap_dict_and_model(self):
m = self.model
for i in range(len(m)):
for ip, p in enumerate(m[i].parameters):
p.map['values'][0, 0] = 3.0 + i + ip
p.map['std'][0, 0] = 2.44 + i + ip
p.map['is_set'][0, 0] = True
m[1].active_is_multidimensional = True
m[1]._active_array[0, 0] = False
assert m[1]._active_array[1, 0]
m.chisq.data[0, 0] = 1200.
m.dof.data[0, 0] = 1.
small_m = m.inav[0, 0]
d = {'chisq.data': np.array(small_m.chisq.data[0]),
'dof.data': np.array(small_m.dof.data[0]),
'components': {component.name: {parameter.name: parameter.map for
parameter in component.parameters}
for component in small_m if component.active}
}
d = copy.deepcopy(d)
samf = m.create_samfire(setup=False)
samf._swap_dict_and_model((1, 0), d)
assert m.chisq.data[1, 0] == 1200.
assert m.dof.data[1, 0] == 1.
assert d['dof.data'] == 0.
assert np.isnan(d['chisq.data'])
assert np.all(~m[1]._active_array[:2, 0])
for c in m:
if c.active:
for p in c.parameters:
assert (
p.map['values'][
0, 0] == p.map['values'][
1, 0])
assert p.map['std'][0, 0] == p.map['std'][1, 0]
assert (
p.map['is_set'][
0, 0] == p.map['is_set'][
1, 0])
def test_next_pixels(self):
m = self.model
samf = m.create_samfire(setup=False)
ans = samf._next_pixels(3)
assert len(ans) == 0
ind_list = [(1, 2), (0, 1), (3, 3), (4, 6)]
for ind in ind_list:
samf.metadata.marker[ind] += 2.
ans = samf._next_pixels(10)
assert len(ans) == 4
for ind in ans:
assert ind in ind_list
for n, ind in enumerate(ind_list):
samf.metadata.marker[ind] += n
ans = samf._next_pixels(10)
assert ans == [(4, 6), ]
def test_change_strategy(self):
m = self.model
samf = m.create_samfire(setup=False)
from hyperspy.samfire_utils.local_strategies import ReducedChiSquaredStrategy
from hyperspy.samfire_utils.global_strategies import HistogramStrategy
ind = (0, 0)
samf.metadata.marker[ind] = -2
samf.strategies.append(ReducedChiSquaredStrategy())
samf.change_strategy(2)
assert samf.metadata.marker[ind] == -1
assert samf._active_strategy_ind == 2
samf.change_strategy(samf.strategies[1])
assert samf._active_strategy_ind == 1
assert samf.metadata.marker[ind] == -2
new_strat = HistogramStrategy()
samf.strategies.append(new_strat)
samf.change_strategy(3)
assert samf._active_strategy_ind == 3
assert samf.active_strategy is new_strat
assert samf.metadata.marker[ind] == -2
class TestSamfireMain:
def setup_method(self, method):
self.model, self.lor1, self.g, self.lor2 = generate_test_model()
self.shape = (7, 15)
@pytest.mark.xfail(
reason="Sometimes it fails in CirCleCI for no know reason.")
def test_multiprocessed(self, mpl_cleanup):
self.model.fit()
samf = self.model.create_samfire(ipyparallel=False)
samf.plot_every = np.nan
samf.strategies[0].radii = 1.
samf.strategies.remove(1)
samf.optional_components = [self.model[2]]
samf.start(bounded=True)
# let at most 3 pixels to fail randomly.
fitmask = samf.metadata.marker == -np.ones(self.shape)
print('number of pixels failed: {}'.format(
np.prod(self.shape) - np.sum(fitmask)))
assert np.sum(fitmask) >= np.prod(self.shape) - 5
for o_c, n_c in zip([self.g, self.lor1, self.lor2], self.model):
for p, p1 in zip(o_c.parameters, n_c.parameters):
if n_c._active_array is not None:
mask = np.logical_and(n_c._active_array, fitmask)
else:
mask = fitmask
print(o_c._id_name, n_c._id_name, p1._id_name, p._id_name)
print(p.map['values'][:4, :4])
print('----------------------------')
print(p1.map['values'][:4, :4])
print('ooooooooooooooooooooooooooooooooooooooooooo')
np.testing.assert_allclose(
p1.map['values'][mask],
p.map['values'][:7, :15][mask],
rtol=0.3)
def test_create_worker_defaults():
worker = create_worker('worker')
assert worker.identity == 'worker'
assert worker.shared_queue is None
assert worker.result_queue is None
assert worker.individual_queue is None
np.testing.assert_equal(worker.best_AICc, np.inf)
np.testing.assert_equal(worker.best_values, [])
np.testing.assert_equal(worker.best_dof, np.inf)
np.testing.assert_equal(worker.last_time, 1)
class TestSamfireWorker:
def setup_method(self, method):
np.random.seed(17)
ax = np.arange(250)
self.widths = [5, 10, 15]
self.centres = [50, 105, 180]
self.areas = [5000, 10000, 20000]
g = hs.model.components1D.Gaussian()
g.sigma.value = self.widths[0]
g.A.value = self.areas[0]
l = hs.model.components1D.Lorentzian()
l.gamma.value = self.widths[1]
l.A.value = self.areas[1]
l1 = hs.model.components1D.Lorentzian()
l1.gamma.value = self.widths[2]
l1.A.value = self.areas[2]
d = g.function(ax - self.centres[0]) + \
l.function(ax - self.centres[1]) + \
l1.function(ax - self.centres[2])
s = hs.signals.Signal1D(np.array([d, d]))
s.add_poissonian_noise()
s.metadata.Signal.set_item("Noise_properties.variance",
s.deepcopy() + 1.)
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m[-1].name = 'g1'
m.append(hs.model.components1D.Lorentzian())
m[-1].name = 'l1'
m.append(hs.model.components1D.Lorentzian())
m[-1].name = 'l2'
m.append(hs.model.components1D.Gaussian())
m[-1].name = 'g2'
m.append(hs.model.components1D.Gaussian())
m[-1].name = 'g3'
m.append(hs.model.components1D.Lorentzian())
m[-1].name = 'l3'
for c in m:
c.active_is_multidimensional = True
vals = {'g1': {},
'g2': {},
'g3': {},
'l1': {},
'l2': {},
'l3': {},
}
vals['g1']['centre'] = [50, 150]
vals['g1']['sigma'] = [5]
vals['g1']['A'] = [10000]
vals['l1']['centre'] = [43]
vals['l1']['gamma'] = [25]
vals['l1']['A'] = [10000]
vals['l2']['centre'] = [125]
vals['l2']['gamma'] = [8]
vals['l2']['A'] = [10000]
vals['g2']['centre'] = [105]
vals['g2']['sigma'] = [20]
vals['g2']['A'] = [10000]
vals['l3']['centre'] = [185]
vals['l3']['gamma'] = [11]
vals['l3']['A'] = [10000]
vals['g3']['centre'] = [175]
vals['g3']['sigma'] = [12]
vals['g3']['A'] = [10000]
self.vals = vals
self.model = m
self.q = Mock_queue()
self.ind = (1,)
self.args = {}
self.model_letter = 'sldkfjg'
from hyperspy.samfire_utils.fit_tests import red_chisq_test as rct
self._gt_dump = dill.dumps(rct(tolerance=1.0))
m_slice = m.inav[self.ind[::-1]]
m_slice.store(self.model_letter)
m_dict = m_slice.signal._to_dictionary(False)
m_dict['models'] = m_slice.signal.models._models.as_dictionary()
self.model_dictionary = m_dict
self.optional_comps = [1, 2, 3, 4, 5]
def test_add_model(self):
worker = create_worker('worker')
worker.create_model(self.model_dictionary, self.model_letter)
from hyperspy.model import BaseModel
assert isinstance(worker.model, BaseModel)
for component in worker.model:
assert not component.active_is_multidimensional
assert component.active
def test_main_result(self):
worker = create_worker('worker')
worker.create_model(self.model_dictionary, self.model_letter)
worker.setup_test(self._gt_dump)
worker.set_optional_names({self.model[comp].name for comp in
self.optional_comps})
self.vals.update({
'signal.data': self.model.signal(),
'fitting_kwargs': {},
'variance.data':
self.model.signal.metadata.Signal.Noise_properties.variance()
})
keyword, (_id, _ind, result, found_solution) = \
worker.run_pixel(self.ind, self.vals)
assert _id == 'worker'
assert _ind == self.ind
assert found_solution
assert result['dof.data'][()] == 9
lor_components = [key for key in result['components'].keys() if
key.find('l') == 0]
assert len(result['components']) == 3
assert len(lor_components) == 2
gauss_name = list(set(result['components'].keys()) -
set(lor_components))[0]
gauss = result['components'][gauss_name]
np.testing.assert_allclose(gauss['A'][0]['values'], self.areas[0],
rtol=0.05)
np.testing.assert_allclose(gauss['sigma'][0]['values'], self.widths[0],
rtol=0.05)
np.testing.assert_allclose(gauss['centre'][0]['values'],
self.centres[0], rtol=0.05)
lor1 = result['components'][lor_components[0]]
lor1_values = tuple(lor1[par][0]['values'] for par in ['A', 'gamma',
'centre'])
lor2 = result['components'][lor_components[1]]
lor2_values = tuple(lor2[par][0]['values'] for par in ['A', 'gamma',
'centre'])
possible_values1 = (self.areas[1], self.widths[1], self.centres[1])
possible_values2 = (self.areas[2], self.widths[2], self.centres[2])
assert (np.allclose(lor1_values, possible_values1, rtol=0.05)
or
np.allclose(lor1_values, possible_values2, rtol=0.05))
assert (np.allclose(lor2_values, possible_values1, rtol=0.05)
or
np.allclose(lor2_values, possible_values2, rtol=0.05))
| magnunor/hyperspy | hyperspy/tests/samfire/test_samfire.py | Python | gpl-3.0 | 18,333 | [
"Gaussian"
] | e490ccebb25b2202b91e227b80ae5dbd9bf3b3d1c63fd63ae09bb0a24d956c5e |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import os
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.structure import Structure
from pymatgen.core.units import Ha_to_eV
from pymatgen.io.abinitio.abiobjects import *
import warnings
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
def cif_paths():
cifpaths = []
print(test_dir)
for fname in os.listdir(test_dir):
fname = os.path.join(test_dir, fname)
if os.path.isfile(fname) and fname.endswith(".cif"):
cifpaths.append(fname)
assert cifpaths
return cifpaths
class SpinModeTest(PymatgenTest):
def test_base(self):
polarized = SpinMode.as_spinmode("polarized")
other_polarized = SpinMode.as_spinmode("polarized")
unpolarized = SpinMode.as_spinmode("unpolarized")
polarized.to_abivars()
self.assertTrue(polarized is other_polarized)
self.assertTrue(polarized == other_polarized)
self.assertTrue(polarized != unpolarized)
# Test pickle
self.serialize_with_pickle(polarized)
# Test dict methods
self.assertPMGSONable(polarized)
self.assertPMGSONable(unpolarized)
class SmearingTest(PymatgenTest):
def test_base(self):
fd1ev = Smearing.as_smearing("fermi_dirac:1 eV")
print(fd1ev)
fd1ev.to_abivars()
self.assertTrue(fd1ev)
same_fd = Smearing.as_smearing("fermi_dirac:"+ str(1.0/Ha_to_eV))
self.assertTrue(same_fd == fd1ev)
nosmear = Smearing.nosmearing()
self.assertFalse(nosmear)
self.assertTrue(nosmear != fd1ev)
new_fd1ev = Smearing.from_dict(fd1ev.as_dict())
self.assertTrue(new_fd1ev == fd1ev)
# Test pickle
self.serialize_with_pickle(fd1ev)
# Test dict methods
self.assertPMGSONable(fd1ev)
class ElectronsAlgorithmTest(PymatgenTest):
def test_base(self):
algo = ElectronsAlgorithm(nstep=70)
print(algo.to_abivars())
# Test pickle
self.serialize_with_pickle(algo)
class ElectronsTest(PymatgenTest):
def test_base(self):
default_electrons = Electrons()
self.assertTrue(default_electrons.nsppol==2)
self.assertTrue(default_electrons.nspinor==1)
self.assertTrue(default_electrons.nspden==2)
print(default_electrons.to_abivars())
#new = Electron.from_dict(default_electrons.as_dict())
# Test pickle
self.serialize_with_pickle(default_electrons, test_eq=False)
class KSamplingTest(PymatgenTest):
def test_base(self):
monkhorst = KSampling.monkhorst((3, 3, 3), (0.5, 0.5, 0.5), 0, False, False)
gamma_centered = KSampling.gamma_centered((3, 3, 3), False, False)
monkhorst.to_abivars()
# Test dict methods
self.assertPMGSONable(monkhorst)
self.assertPMGSONable(gamma_centered)
class RelaxationTest(PymatgenTest):
def test_base(self):
atoms_and_cell = RelaxationMethod.atoms_and_cell()
atoms_only = RelaxationMethod.atoms_only()
atoms_and_cell.to_abivars()
# Test dict methods
self.assertPMGSONable(atoms_and_cell)
self.assertPMGSONable(atoms_only)
class PPModelTest(PymatgenTest):
def test_base(self):
godby = PPModel.as_ppmodel("godby:12 eV")
print(godby)
print(repr(godby))
godby.to_abivars()
self.assertTrue(godby)
same_godby = PPModel.as_ppmodel("godby:"+ str(12.0/Ha_to_eV))
self.assertTrue(same_godby == godby)
noppm = PPModel.noppmodel()
self.assertFalse(noppm)
self.assertTrue(noppm != godby)
new_godby = PPModel.from_dict(godby.as_dict())
self.assertTrue(new_godby == godby)
# Test pickle
self.serialize_with_pickle(godby)
# Test dict methods
self.assertPMGSONable(godby)
if __name__ == '__main__':
import unittest
unittest.main()
| sonium0/pymatgen | pymatgen/io/abinitio/tests/test_abiobjects.py | Python | mit | 4,167 | [
"pymatgen"
] | f43ea2e90bb69f6695a4d54190c91b446c37c61f073062af052158460ee9f756 |
"""Comparator objects relevant to particles with adsorbates."""
from ase import Atoms
def count_ads(atoms, adsorbate):
"""Very naive implementation only taking into account
the symbols. atoms and adsorbate should both be supplied
as Atoms objects."""
syms = atoms.get_chemical_symbols()
try:
ads_syms = adsorbate.get_chemical_symbols()
except AttributeError:
# It is hopefully a string
ads_syms = Atoms(adsorbate).get_chemical_symbols()
counts = []
for c in ads_syms:
counts.append(syms.count(c))
if len(set(counts)) == 1:
return counts[0]
else:
raise NotImplementedError
class AdsorbateCountComparator(object):
"""Compares the number of adsorbates on the particles and
returns True if the numbers are the same, False otherwise.
Parameters:
adsorbate: list or string
a supplied list of adsorbates or a string if only one adsorbate
is possible
"""
def __init__(self, adsorbate):
try:
adsorbate + ''
# It is a string (or similar) type
self.adsorbate = [adsorbate]
except TypeError:
self.adsorbate = adsorbate
def looks_like(self, a1, a2):
"""Does the actual comparison."""
for ads in self.adsorbate:
ads = Atoms(ads)
if count_ads(a1, ads) != count_ads(a2, ads):
return False
return True
class AdsorptionSitesComparator(object):
"""Compares the metal atoms in the adsorption sites and returns True
if less than min_diff_adsorption_sites of the sites with adsorbates
consist of different atoms.
Ex:
a1.info['data']['adsorbates_site_atoms'] =
[('Cu','Ni'),('Cu','Ni'),('Ni'),('Ni')]
a2.info['data']['adsorbates_site_atoms'] =
[('Cu','Ni'),('Ni','Ni', 'Ni'),('Ni'),('Ni')]
will have a difference of 2:
(2*('Cu','Ni')-1*('Cu','Ni')=1, 1*('Ni','Ni','Ni')=1, 2*('Ni')-2*('Ni')=0)
"""
def __init__(self, min_diff_adsorption_sites=2):
self.min_diff_adsorption_sites = min_diff_adsorption_sites
def looks_like(self, a1, a2):
s = 'adsorbates_site_atoms'
if not all([(s in a.info['data'] and
a.info['data'][s] != [])
for a in [a1, a2]]):
return False
counter = {}
for asa in a1.info['data'][s]:
t_asa = tuple(sorted(asa))
if t_asa not in counter.keys():
counter[t_asa] = 1
else:
counter[t_asa] += 1
for asa in a2.info['data'][s]:
t_asa = tuple(sorted(asa))
if t_asa not in counter.keys():
counter[t_asa] = -1
else:
counter[t_asa] -= 1
# diffs = len([k for k, v in counter.items() if v != 0])
sumdiffs = sum([abs(v) for k, v in counter.items()])
if sumdiffs < self.min_diff_adsorption_sites:
return True
return False
class AdsorptionMetalsComparator(object):
"""Compares the number of adsorbate-metal bonds and returns True if the
number for a1 and a2 differs by less than the supplied parameter
``same_adsorption_number``
Ex:
a1.info['data']['adsorbates_bound_to'] = {'Cu':1, 'Ni':3}
a2.info['data']['adsorbates_bound_to'] = {'Cu':.5, 'Ni':3.5}
will have a difference of .5 in both elements:
"""
def __init__(self, same_adsorption_number):
self.same_adsorption_number = same_adsorption_number
def looks_like(self, a1, a2):
s = 'adsorbates_bound_to'
if not all([(s in a.info['data'] and
any(a.info['data'][s].values()))
for a in [a1, a2]]):
return False
diffs = [a1.info['data'][s][k] - a2.info['data'][s][k]
for k in a1.info['data'][s].keys()]
for d in diffs:
if abs(d) < self.same_adsorption_number:
return True
return False
| suttond/MODOI | ase/ga/adsorbate_comparators.py | Python | lgpl-3.0 | 4,037 | [
"ASE"
] | a422aef1a3229673f73a7c45ab093ecb163f7217745480cf8c1b36f86ab57e4c |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC import gLogger
from DIRAC.Core.Tornado.Server.TornadoService import TornadoService
from DIRAC.DataManagementSystem.Service.FTS3ManagerHandler import FTS3ManagerHandlerMixin
sLog = gLogger.getSubLogger(__name__)
class TornadoFTS3ManagerHandler(FTS3ManagerHandlerMixin, TornadoService):
"""Tornado handler for the FTS3Manager"""
log = sLog
| ic-hep/DIRAC | src/DIRAC/DataManagementSystem/Service/TornadoFTS3ManagerHandler.py | Python | gpl-3.0 | 491 | [
"DIRAC"
] | a4f394681a0124edc1a24c851054d87d07174d2280cbb48dc538bad3d5db2463 |
#encoding=utf-8
############################################
# [config.py]
# CONFIGURATION SETTINGS FOR PROSODIC
#
# Here you may change the runtime settings for prosodic.
# For more help on this file, please see the README in this folder,
# or visit it online: <https://github.com/quadrismegistus/prosodic>.
# If you have any questions, please email Ryan <heuser@stanford.edu>.
#
############################################
############################################
# PATHS
# Relative paths are relative to dir_prosodic_home, which defauls to ~/prosodic_data
# and which can be changed by editing the hidden variable ~/.path_prosodic_data
path_corpora = 'corpora'
path_meters = 'meters'
path_results = 'results'
path_tagged_samples = 'tagged_samples'
path_nlp_data = 'nlp_libraries'
############################################
# METRICAL PARSING
#
# Set the Meter ID: the filename to its configuration file
# in the "meters" subdirectory, e.g. "kiparskyhanson_shakespeare"
# (omit the .py from the filename).
#
meter = 'meter_ryan'
#
# If no Meter ID is provided, PROSODIC will ask you to set the meter
# in its interactive mode. As a python module, you will have to
# create the meter first and pass it to the Text object to parse.
#
# meter = 'meter_arto'
############################################
############################################
# SELECT THE LANGUAGE
#
# Select the language that will be used in PROSODIC,
# when typing text directly or loading text.
#
# All text is English:
lang='en'
#
# All text is Finnish:
#lang='fi'
#
# Detect language from first two characters of filename:
# e.g. "en.[filename].txt" is English, "fi.[filename].txt" is Finnish
#lang='**'
############################################
############################################
# CONFIGURE TEXT-TO-SPEECH ENGINE (for English)
#
# To parse unknown English words, you'll need a TTS engine installed.
# For instructions, please see the README.
#
# Use espeak for TTS (recommended):
# [Note: syllabification done with NLTK]
en_TTS_ENGINE = 'espeak'
#
# Use OpenMary for TTS:
#en_TTS_ENGINE = 'openmary'
#
# Do not use TTS:
# [Lines with unknown words will be skipped during metrical parsing]
# en_TTS_ENGINE = 'none'
#
# Cache results of TTS for an unknown word so it's not necessary
# to use TTS for that word again [Change to 0 to be false]
en_TTS_cache = 1
############################################
############################################
# CONFIGURE METRICALTREE
#
# Parse text using metrical tree? (Only for English).
parse_using_metrical_tree = 0
############################################
############################################
# OPTIONS ABOUT PRINTING TO SCREEN
#
# Print loaded words, parses, etc. to screen:
#print_to_screen=True
#
# Do not print loaded words, parses, etc. to screen:
# Although hiden, you may still save any output to disk
# using the /save command.
print_to_screen=True
#
# The default length for the line used by printing
linelen=60
############################################
############################################
# OPTIONS ABOUT LINES
#
######
# [Line SIZE]
#
# The maximum size of the line to parse:
# [others will be skipped during parsing]
# [PROSODIC can parse lines of up to approximately 20 syllables
# before the number of possibilities become too large,
# slowing the algorithm down to a halt.]
line_maxsylls=60
#
# The minimum size of the line to parse:
# [useful if lines are determined by punctuation,
# because sometimes they can be very very short
# and so pointless for metrical parsing.]
#line_minsylls=9
#
# Alternatively, after how many seconds should Prosodic give up
# when trying to parse a (long or ambiguous) line?
parse_maxsec = 30
#
#
######
# [Line DIVISIONS]
#
# Here you may decide how texts divide into lines.
# This is significant only because the line,
# with its words and syllables, is the unit passed
# to the metrical parser for parsing.
#
# Linebreaks occur only at actual linebreaks in the
# processed text file (good for metrical poetry):
linebreak='line'
#
# Linebreaks occur only upon encountering any of these
# punctuation marks (good for prose):
#linebreak=',;:.?!()[]{}<>'
#
# Linebreaks occur both at linebreaks in the text,
# *and* at any of these punctuation marks (good for
# prose and free-verse poetry):
#linebreak='line,;:.?!()[]{}<>'
#
#
######
# [MISCELLANEOUS line options]
#
# Headedness [optional]
# If there are multiple parses tied for the lowest score,
# break the tie by preferring lines that begin with this pattern:
line_headedness='ws'
#line_headedness='sw'
#line_headedness='wws'
#line_headedness='ssw'
############################################
############################################
# OPTIONS ABOUT WORDS
#
######
# [Tokenization]
#
# How are lines of text split into words? Define the regular
# expression that is applied to a string of text in order
# to split it into a list of words.
#
# Words are tokenized against [^] white-spaces [\s+] and hyphens [-]
#tokenizer='[^\s+-]+'
#
# Words are tokenized against [^] white-spaces [\s+]
tokenizer='[^\s+]+'
#
######
# [Resolving stress ambiguity]
#
# Some words are multiple stress profiles: ambiguous polysyllabic
# words, and also ambiguous monosyllabic words. Words in the
# "maybestressed.txt" file of a language folder (e.g. dicts/en)
# will be given two stress profiles, one stressed and the other
# unstressed. The CMU also has multiple stress profiles for words.
#
# Allow the metrical parser to parse all stress profiles for all
# words in the line, thus choosing the stress profile for each
# word that best fit the metrical parse:
resolve_optionality=1
#resolve_optionality=0
#
#
######
# [ELISIONS of Syllables: English only]
#
# Some syllables are elided in English verse, e.g.
# e.g. sweet as love, which overflows her bower
# --> with|MU|sic|SWEET|as|LOVE|which|OV|er|FLOWS|her|BOW'R
# or e.g. scattering unbeholden
# --> SCAT|tring|UN|be|HOLD|en
#
# Add pronunciations for words that could have elided syllables:
add_elided_pronunciations=1
#add_elided_pronunciations=0
#
#
######
# [Output formatting]
#
# Here you may change the format under which the syllabified,
# phonetic output will appear. The options are:
# - ipa
# - cmu (the formatting used in the CMU Pronunciation Dictionary)
# - orth (the orthography itself [good for Finnish])
#
# The default phonetic output for all languages:
output='ipa'
#
# The phonetic output for English:
output_en='ipa'
#
# The phonetic output for Finnish:
output_fi='orth' # since finnish pronunciation is essentially identical to its orthography
############################################
# ############################################
# @DEPRECATED
# # PATHS USED BY PROSODIC
# #
# # If these are relative paths (no leading /),
# # they are defined from the point of view of
# # the root directory of PROSODIC.
# #
# # Folder used as the folder of corpora:
# # [it should contain folders, each of which contains text files]
# folder_corpora='corpora/'
# #
# # Folder to store results within (statistics, etc)
# folder_results='results/'
# #
# # Folder in which tagged samples (hand-parsed lines) are stored:
folder_tagged_samples = 'tagged_samples/' # adding back temporarily? 1/23/2020
# ############################################
############################################
# MAXIMUM ENTROPY settings
#
# Should negative weights be allowed?
maxent_negative_weights_allowed = False
#
# How many epochs should it run for at most?
maxent_max_epochs = 10000
#
# What should the step size be?
maxent_step_size = 0.1
#
# How small does the gradient have to be before
# we consider it converged?
maxent_gradient_norm_tolerance = 1e-6
############################################
####
# MEMORY DECISIONS
#
num_bounded_parses_to_store = 100
#
###
| quadrismegistus/prosodic | prosodic/config.py | Python | gpl-3.0 | 7,803 | [
"VisIt"
] | 2f2f295f0294ac41248ba3d870fa43bfca5c880a3b9a23725a8d66b3db8a1023 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send VLAN commands to Lenovo Switches
# Overloading aspect of vlan creation in a range is pending
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_vlan
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Manage VLAN resources and attributes on devices running Lenovo CNOS
description:
- This module allows you to work with VLAN related configurations. The
operators used are overloaded to ensure control over switch VLAN
configurations. The first level of VLAN configuration allows to set up the
VLAN range, the VLAN tag persistence, a VLAN access map and access map
filter. After passing this level, there are five VLAN arguments that will
perform further configurations. They are vlanArg1, vlanArg2, vlanArg3,
vlanArg4, and vlanArg5. The value of vlanArg1 will determine the way
following arguments will be evaluated. For more details on how to use these
arguments, see [Overloaded Variables].
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_vlan.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
vlanArg1:
description:
- This is an overloaded vlan first argument. Usage of this argument can be found is the User Guide referenced above.
required: true
choices: [access-map, dot1q, filter, <1-3999> VLAN ID 1-3999 or range]
vlanArg2:
description:
- This is an overloaded vlan second argument. Usage of this argument can be found is the User Guide referenced above.
choices: [VLAN Access Map name,egress-only,name, flood,state, ip]
vlanArg3:
description:
- This is an overloaded vlan third argument. Usage of this argument can be found is the User Guide referenced above.
choices: [action, match, statistics, enter VLAN id or range of vlan, ascii name for the VLAN, ipv4 or ipv6, active or suspend, fast-leave,
last-member-query-interval, mrouter, querier, querier-timeout, query-interval, query-max-response-time, report-suppression,
robustness-variable, startup-query-count, startup-query-interval, static-group]
vlanArg4:
description:
- This is an overloaded vlan fourth argument. Usage of this argument can be found is the User Guide referenced above.
choices: [drop or forward or redirect, ip or mac,Interval in seconds,ethernet, port-aggregation, Querier IP address,
Querier Timeout in seconds, Query Interval in seconds, Query Max Response Time in seconds, Robustness Variable value,
Number of queries sent at startup, Query Interval at startup]
vlanArg5:
description:
- This is an overloaded vlan fifth argument. Usage of this argument can be found is the User Guide referenced above.
choices: [access-list name, Slot/chassis number, Port Aggregation Number]
'''
EXAMPLES = '''
Tasks: The following are examples of using the module cnos_vlan. These are written in the main.yml file of the tasks directory.
---
- name: Test Vlan - Create a vlan, name it
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "name"
vlanArg3: "Anil"
- name: Test Vlan - Create a vlan, Flood configuration
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "flood"
vlanArg3: "ipv4"
- name: Test Vlan - Create a vlan, State configuration
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "state"
vlanArg3: "active"
- name: Test Vlan - VLAN Access map1
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: "access-map"
vlanArg2: "Anil"
vlanArg3: "statistics"
- name: Test Vlan - VLAN Accep Map2
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: "access-map"
vlanArg2: "Anil"
vlanArg3: "action"
vlanArg4: "forward"
- name: Test Vlan - ip igmp snooping query interval
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "ip"
vlanArg3: "query-interval"
vlanArg4: 1313
- name: Test Vlan - ip igmp snooping mrouter interface port-aggregation 23
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "ip"
vlanArg3: "mrouter"
vlanArg4: "port-aggregation"
vlanArg5: 23
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "VLAN configuration is accomplished"
'''
import sys
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
#
# Define parameters for vlan creation entry
#
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
vlanArg1=dict(required=True),
vlanArg2=dict(required=False),
vlanArg3=dict(required=False),
vlanArg4=dict(required=False),
vlanArg5=dict(required=False),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
vlanArg1 = module.params['vlanArg1']
vlanArg2 = module.params['vlanArg2']
vlanArg3 = module.params['vlanArg3']
vlanArg4 = module.params['vlanArg4']
vlanArg5 = module.params['vlanArg5']
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
output = ""
if not HAS_PARAMIKO:
module.fail_json(msg='paramiko is required for this module')
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in
# your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + \
cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + \
cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + \
cnos.waitForDeviceResponse("configure device\n", "(config)#", 2, remote_conn)
# Send the CLi command
output = output + \
cnos.vlanConfig(
remote_conn, deviceType, "(config)#", 2, vlanArg1, vlanArg2,
vlanArg3, vlanArg4, vlanArg5)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# need to add logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="VLAN configuration is accomplished")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| fxfitz/ansible | lib/ansible/modules/network/cnos/cnos_vlan.py | Python | gpl-3.0 | 11,596 | [
"VisIt"
] | 42be4786e8a78f1ceee9fcda721b3d9d3116a7824b1340c903fd67415be0c36a |
#!/usr/bin/python
# Copyright (C) 2012 Sibi <sibi@psibi.in>
#
# This file is part of Neuron.
#
# Neuron is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neuron is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neuron. If not, see <http://www.gnu.org/licenses/>.
from pyfann import libfann
import dbm
import sys
import gtk
import shutil
import os
try:
db=dbm.open('config.dat','c')
talgo=db['Training Algorithm']
bpn_type=db['Network Type']
numl=int(db['Number of Layers'])
num_input=int(db['Input Neurons'])
num_output=int(db['Output Neurons'])
num_neurons_hidden=db['Hidden Neurons']
num_hlay=int(db['Number of Hidden Layers'])
tfile=db['Training File']
except KeyError as key:
dlg=gtk.MessageDialog(None,gtk.DIALOG_DESTROY_WITH_PARENT,gtk.MESSAGE_ERROR,gtk.BUTTONS_OK, str(key)+ " Uninitialized")
dlg.run()
dlg.destroy()
db.close()
sys.exit(1)
finally:
db.close()
class bpn:
"""Class for training the BPN Network."""
def __init__(self):
print "Training Initialization taking place\n"
self.ann = libfann.neural_net()
self.network_file=""
def train(self):
"""Trains the BPN network."""
db=dbm.open('config.dat','c')
connection_rate=float(db['Connection Rate'])
learning_rate=float(db['Learning Rate'])
desired_error=float(db['Desired Error'])
max_iterations=int(db['Maximum Iterations'])
iterations_between_reports=int(db['Iteration Between Reports'])
ol_act_fun=db['Output Layer Activation Function']
db.close()
hidden_neurons_list = [num_input]
lay_neurons = tuple(num_neurons_hidden.split(",")) #Hidden Neurons in String
for hid_neuron in lay_neurons:
hidden_neurons_list.append(int(hid_neuron))
hidden_neurons_list.append(num_output)
hnt = tuple(hidden_neurons_list)
hiddenn = num_neurons_hidden.split(",")
if bpn_type=="SPR":
self.ann.create_sparse_array(connection_rate, hnt)
elif bpn_type=="STD":
self.ann.create_standard_array(hnt)
elif bpn_type=="SRT":
self.ann.create_shortcut_array(hnt)
if talgo=="FANN_TRAIN_INCREMENTAL":
self.ann.set_training_algorithm(libfann.TRAIN_INCREMENTAL)
elif talgo=="FANN_TRAIN_BATCH":
self.ann.set_training_algorithm(libfann.TRAIN_BATCH)
elif talgo=="FANN_TRAIN_RPROP":
self.ann.set_training_algorithm(libfann.TRAIN_RPROP)
try:
db=dbm.open('config.dat','c')
inc_factor=float(db['Increase Factor'])
dec_factor=float(db['Decrease Factor'])
delta_min=float(db['Delta Minimum'])
delta_max=float(db['Delta Maximum'])
delta_zero=float(db['Delta Zero'])
db.close()
except KeyError:
pass
else:
self.ann.set_rprop_increase_factor(inc_factor)
self.ann.set_rprop_decrease_factor(dec_factor)
self.ann.set_rprop_delta_min(delta_min)
self.ann.set_rprop_delta_max(delta_max)
elif talgo=="FANN_TRAIN_QUICKPROP":
self.ann.set_training_algorithm(libfann.TRAIN_QUICKPROP)
try:
db=dbm.open('config.dat','c')
decay_val=float(db['Decay Value'])
mu_val=float(db['Mu Value'])
db.close()
except KeyError:
pass
else:
self.ann.set_quickprop_decay(decay_val)
self.ann.set_quickprop_mu(mu_val)
self.ann.set_learning_rate(learning_rate)
if ol_act_fun=="LINEAR":
self.ann.set_activation_function_output(libfann.LINEAR)
elif ol_act_fun=="THRESHOLD":
self.ann.set_activation_function_output(libfann.THRESHOLD)
elif ol_act_fun=="THRESHOLD SYMMETRIC":
self.ann.set_activation_function_output(libfann.THRESHOLD_SYMMETRIC)
elif ol_act_fun=="SIGMOID":
self.ann.set_activation_function_output(libfann.SIGMOID)
elif ol_act_fun=="SIGMOID STEPWISE":
self.ann.set_activation_function_output(libfann.SIGMOID_STEPWISE)
elif ol_act_fun=="SIGMOID SYMMETRIC":
self.ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC)
elif ol_act_fun=="GAUSSIAN":
self.ann.set_activation_function_output(libfann.GAUSSIAN)
elif ol_act_fun=="GAUSSIAN SYMMETRIC":
self.ann.set_activation_function_output(libfann.GAUSSIAN_SYMMETRIC)
elif ol_act_fun=="ELLIOT":
self.ann.set_activation_function_output(libfann.ELLIOT)
elif ol_act_fun=="ELLIOT SYMMETRIC":
self.ann.set_activation_function_output(libfann.ELLIOT_SYMMETRIC)
elif ol_act_fun=="LINEAR PIECE":
self.ann.set_activation_function_output(libfann.LINEAR_PIECE)
elif ol_act_fun=="LINEAR PIECE SYMMETRIC":
self.ann.set_activation_function_output(libfann.LINEAR_PIECE_SYMMETRIC)
elif ol_act_fun=="SIN SYMMETRIC":
self.ann.set_activation_function_output(libfann.SIN_SYMMETRIC)
elif ol_act_fun=="COS SYMMETRIC":
self.ann.set_activation_function_output(libfann.COS_SYMMETRIC)
elif ol_act_fun=="SIN":
self.ann.set_activation_function_output(libfann.SIN)
elif ol_act_fun=="COS":
self.ann.set_activation_function_output(libfann.COS)
elif ol_act_fun=="SIGMOID SYMMETRIC STEPWISE":
self.ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC_STEPWISE)
#For Advanced Parameters related to Fixed Topology
try:
db=dbm.open('config.dat','c')
lmomentum=float(db['Learning Momentum'])
af_neuron_number=db['AF for Neuron']
af_n=db['AF Neuron']
af_layer_number=int(db['AF for layer'])
af_l=db['AF Layer']
asn=db['Activation Steepness for Neuron']
asl=db['Activation Steepness for layer']
tef=db['Train Error Function']
tsf=db['Train Stop Function']
bfl=float(db['Bit Fail Limit'])
db.close()
except KeyError:
pass
else:
self.ann.set_learning_momentum(lmomentum)
temp_list=af_neuron_number.split(",")
layer_no=int(temp_list[0])
neuron_no=int(temp_list[1])
steepness_list=asn.split(",")
svalue=float(steepness_list[0])
layer=int(steepness_list[1])
neuron=int(steepness_list[2])
steep_layer_list=asl.split(",")
vsteep=float(steep_layer_list[0])
vslayer=int(steep_layer_list[1])
if af_n=="LINEAR":
self.ann.set_activation_function(libfann.LINEAR,layer_no,neuron_no)
elif af_n=="THRESHOLD":
self.ann.set_activation_function(libfann.THRESHOLD,layer_no,neuron_no)
elif af_n=="THRESHOLD SYMMETRIC":
self.ann.set_activation_function(libfann.THRESHOLD_SYMMETRIC,layer_no,neuron_no)
elif af_n=="SIGMOID":
self.ann.set_activation_function(libfann.SIGMOID,layer_no,neuron_no)
elif af_n=="SIGMOID STEPWISE":
self.ann.set_activation_function(libfann.SIGMOID_STEPWISE,layer_no,neuron_no)
elif af_n=="SIGMOID SYMMETRIC":
self.ann.set_activation_function(libfann.SIGMOID_SYMMETRIC,layer_no,neuron_no)
elif af_n=="GAUSSIAN":
self.ann.set_activation_function(libfann.GAUSSIAN,layer_no,neuron_no)
elif af_n=="GAUSSIAN SYMMETRIC":
self.ann.set_activation_function(libfann.GAUSSIAN_SYMMETRIC,layer_no,neuron_no)
elif af_n=="ELLIOT":
self.ann.set_activation_function(libfann.ELLIOT,layer_no,neuron_no)
elif af_n=="ELLIOT SYMMETRIC":
self.ann.set_activation_function(libfann.ELLIOT_SYMMETRIC,layer_no,neuron_no)
elif af_n=="LINEAR PIECE":
self.ann.set_activation_function(libfann.LINEAR_PIECE,layer_no,neuron_no)
elif af_n=="LINEAR PIECE SYMMETRIC":
self.ann.set_activation_function(libfann.LINEAR_PIECE_SYMMETRIC,layer_no,neuron_no)
elif af_n=="SIN SYMMETRIC":
self.ann.set_activation_function(libfann.SIN_SYMMETRIC,layer_no,neuron_no)
elif af_n=="COS SYMMETRIC":
self.ann.set_activation_function(libfann.COS_SYMMETRIC,layer_no,neuron_no)
elif af_n=="SIN":
self.ann.set_activation_function(libfann.SIN,layer_no,neuron_no)
elif af_n=="COS":
self.ann.set_activation_function(libfann.COS,layer_no,neuron_no)
if af_l=="LINEAR":
self.ann.set_activation_function_layer(libfann.LINEAR,af_layer_number)
elif af_l=="THRESHOLD":
self.ann.set_activation_function(libfann.THRESHOLD,layer_no,neuron_no)
elif af_l=="THRESHOLD SYMMETRIC":
self.ann.set_activation_function(libfann.THRESHOLD_SYMMETRIC,layer_no,neuron_no)
elif af_l=="SIGMOID":
self.ann.set_activation_function(libfann.SIGMOID,layer_no,neuron_no)
elif af_l=="SIGMOID STEPWISE":
self.ann.set_activation_function(libfann.SIGMOID_STEPWISE,layer_no,neuron_no)
elif af_l=="SIGMOID SYMMETRIC":
self.ann.set_activation_function(libfann.SIGMOID_SYMMETRIC,layer_no,neuron_no)
elif af_l=="GAUSSIAN":
self.ann.set_activation_function(libfann.GAUSSIAN,layer_no,neuron_no)
elif af_l=="GAUSSIAN SYMMETRIC":
self.ann.set_activation_function(libfann.GAUSSIAN_SYMMETRIC,layer_no,neuron_no)
elif af_l=="ELLIOT":
self.ann.set_activation_function(libfann.ELLIOT,layer_no,neuron_no)
elif af_l=="ELLIOT SYMMETRIC":
self.ann.set_activation_function(libfann.ELLIOT_SYMMETRIC,layer_no,neuron_no)
elif af_l=="LINEAR PIECE":
self.ann.set_activation_function(libfann.LINEAR_PIECE,layer_no,neuron_no)
elif af_l=="LINEAR PIECE SYMMETRIC":
self.ann.set_activation_function(libfann.LINEAR_PIECE_SYMMETRIC,layer_no,neuron_no)
elif af_l=="SIN SYMMETRIC":
self.ann.set_activation_function(libfann.SIN_SYMMETRIC,layer_no,neuron_no)
elif af_l=="COS SYMMETRIC":
self.ann.set_activation_function(libfann.COS_SYMMETRIC,layer_no,neuron_no)
elif af_l=="SIN":
self.ann.set_activation_function(libfann.SIN,layer_no,neuron_no)
elif af_l=="COS":
self.ann.set_activation_function(libfann.COS,layer_no,neuron_no)
self.ann.set_activation_steepness(svalue,layer,neuron)
self.ann.set_activation_steepness_layer(vsteep,vslayer)
if tef=="LINEAR":
self.ann.set_train_error_function(libfann.ERRORFUNC_LINEAR)
elif tef=="TANH ERROR FUNCTION":
self.ann.set_train_error_function(libfann.ERRORFUNC_TANH)
if tsf=="MSE":
self.ann.set_train_stop_function(libfann.STOPFUNC_MSE)
elif tsf=="BIT FAIL":
self.ann.set_train_stop_function(libfann.STOPFUNC_BIT)
self.ann.set_bit_fail_limit(bfl)
finally:
db.close()
#Find Out Whether it is Evolving topology or Fixed Topology
try:
db=dbm.open('config.dat','c')
max_neurons=db['Maximum Neurons']
ncascade=True
db.close()
except KeyError:
ncascade=False
finally:
db.close()
if ncascade:
db=dbm.open('config.dat','c')
max_neurons=int(db['Maximum Neurons'])
neurons_between_reports=int(db['Neurons Between Reports'])
cdesired_error=float(db['Desired Error'])
db.close()
#For Advanced Cascade Parameters
try:
db=dbm.open('config.dat','c')
ocf=db['Output Change Fraction']
db.close()
tcascade=True
except KeyError:
tcascade=False
if tcascade:
db=dbm.open('config.dat','c')
ocf=float(db['Output Change Fraction'])
ose=int(db['Output Stagnation Epochs'])
ccf=float(db['Candidate Change Fraction'])
cse=int(db['Candidate Stagnation Epochs'])
wm=float(db['Weight Multiplier'])
cl=float(db['Candidate Limit'])
max_oe=int(db['Maximum Out Epochs'])
min_oe=int(db['Minimum Out Epochs'])
max_ce=int(db['Maximum Candidate Epochs'])
min_ce=int(db['Minimum Candidate Epochs'])
num_cgroup=int(db['Number Candidate Groups'])
db.close()
self.ann.set_cascade_output_change_fraction(ocf)
self.ann.set_cascade_output_stagnation_epochs(ose)
self.ann.set_cascade_candidate_change_fraction(ccf)
self.ann.set_cascade_candidate_stagnation_epochs(cse)
self.ann.set_cascade_weight_multiplier(wm)
self.ann.set_cascade_candidate_limit(cl)
self.ann.set_cascade_max_out_epochs(max_oe)
#self.ann.set_cascade_min_out_epochs(min_oe)
self.ann.set_cascade_max_cand_epochs(max_ce)
#self.ann.set_cascade_min_cand_epochs(min_ce)
self.ann.set_cascade_num_candidate_groups(num_cgroup)
if ncascade:
self.ann.cascadetrain_on_file(tfile,max_neurons,neurons_between_reports,cdesired_error)
else:
self.ann.train_on_file(tfile, max_iterations, iterations_between_reports, desired_error)
fileparts=tfile.split('/')
fileparts.reverse()
name=fileparts[0]
temp=name.split('.')
self.network_file=temp[0]+".net"
network_fname="./dataset/"+temp[0]+".net"
print "Neuron Network Also saved at "+ network_fname
self.ann.save(self.network_file)
print "\nBPN Network Connection:\n"
self.ann.print_connections()
self.move_network_file()
def move_network_file(self):
"""Move the Network file under the dataset folder."""
filename="./dataset/"+self.network_file
if os.path.isfile(filename):
os.remove(filename)
src=self.network_file
dest="./dataset/"
shutil.move(src,dest)
if __name__=="__main__":
network=bpn()
network.train()
| psibi/Neuron | train.py | Python | gpl-3.0 | 15,183 | [
"Gaussian",
"NEURON"
] | ad4a0e97578192d619a137acc4c0d6953c81be1abdfd7bb3700da0bd9c04f1a1 |
#!/usr/bin/env python
# encoding: utf-8
"""
plot_hs.py - Plot the half-sarcomere with mayavi
Created by Dave Williams on 2010-10-4
"""
import numpy as np
from enthought.mayavi import mlab
# Configure the graph
fig = mlab.figure(1, bgcolor=(0, 0, 0), size=(350, 350))
mlab.clf()
fig.scene.parallel_projection = True
mlab.view(-4.0, 84.5, 2423.2, (625.0, 21.4, -3.4))
fil_rad = 2
fil_seg = 12
fil_color = 'jet'
fil_lims = (-1.0, 1.0)
class plot_hs:
def update_locs(self):
# Get needed info from the half-sarcomere
self.thick_xlocs = [t.axial for t in self.hs.thick]
self.thin_xlocs = [t.axial for t in self.hs.thin]
self.thick_s = [t.axialforce() for t in self.hs.thick]
self.thin_s = [t.axialforce() for t in self.hs.thin]
self.z_line = self.hs.z_line
ls = self.hs.lattice_spacing
# Calculate y and z locations of fils
ls_g = np.sqrt(3)/2 * ls
ls_d = 0.5 * ls
self.thick_yzlocs = [(0, 0),
(0 + 2*ls_g, 0),
(0 + ls_g, 0 - 3*ls_d),
(0 + 3*ls_g, 0 - 3*ls_d)]
act_a = lambda y,z: (y - ls_g, z + ls_d)
act_b = lambda y,z: (y, z + 2*ls_d)
act_c = lambda y,z: (y + ls_g, z + ls_d)
act_d = lambda y,z: (y + ls_g, z - ls_d)
act_e = lambda y,z: (y, z - 2*ls_d)
act_f = lambda y,z: (y - ls_g, z - ls-d)
self.thin_yzlocs = [act_c(self.thick_yzlocs[1]),
act_b(self.thick_yzlocs[0]),
act_a(self.thick_yzlocs[1]),
act_b(self.thick_yzlocs[1]),
act_b(self.thick_yzlocs[3]),
act_c(self.thick_yzlocs[3]),
act_b(self.thick_yzlocs[2]),
act_c(self.thick_yzlocs[2])]
def update_ends(self):
"""Update the effective forces at filament ends"""
self.thick_end = [t.effective_axial_force() for t in self.hs.thick]
self.thin_end = [t.effective_axial_force() for t in self.hs.thin]
def update_bound(self):
"""Update which cross-bridges are bound and their states"""
self.bound = []
for thick in self.hs.thick:
self.bound.append([])
for face in thick.thick_faces:
self.bound[-1].append([])
for xb in face.xb:
if xb.numeric_state != 0:
self.bound[-1][-1].append(
(xb.face_index ,
xb.bound_to,
xb.numeric_state))
def __init__(self, hs):
"""Plot the half-sarcomere"""
self.hs = hs
# Trigger an update of location data
self.update_locs()
# Do initial plotting of the thick and thin fils
self.thick_tubes = []
for x, yz, s in zip(self.thick_xlocs, self.thick_yzlocs,
self.thick_s):
y = np.repeat(yz[0], len(x))
z = np.repeat(yz[1], len(x))
self.thick_tubes.append(mlab.plot3d(x, y, z, s,
tube_radius=fil_rad, tube_sides=fil_seg,
colormap=fil_color, vmin=fil_lims[0], vmax=fil_lims[1]))
self.thin_tubes = []
for x, yz, s in zip(self.thin_xlocs, self.thin_yzlocs,
self.thin_s):
y = np.repeat(yz[0], len(x))
z = np.repeat(yz[1], len(x))
self.thin_tubes.append(mlab.plot3d(x, y, z, s,
tube_radius=0.6*fil_rad, tube_sides=fil_seg,
colormap=fil_color, vmin=fil_lims[0], vmax=fil_lims[1]))
# Plot the total force at the end of each filament
self.update_ends()
self.thick_end_cube = []
for yz, s in zip(self.thick_yzlocs, self.thick_end):
x = [0]
y = [yz[0]]
z = [yz[1]]
s = [s]
self.thick_end_cube.append(mlab.points3d(x,y,z,s,
mode='cube', scale_mode='none', scale_factor=1.5*fil_rad,
colormap=fil_color, vmin=-50, vmax=50))
self.thin_end_cube = []
for yz, s in zip(self.thin_yzlocs, self.thin_end):
x = [self.z_line]
y = [yz[0]]
z = [yz[1]]
s = [s]
self.thin_end_cube.append(mlab.points3d(x,y,z,s,
mode='cube', scale_mode='none', scale_factor=1.5*fil_rad,
colormap=fil_color, vmin=-50, vmax=50))
# Plot the cross-bridges
self.update_bound()
self.cross_bridges = []
for fil, x, yz in zip(self.bound, self.thick_xlocs,
self.thick_yzlocs):
y = [yz[0]]
z = [yz[1]]
for face in fil:
# TODO: THIS IS WHERE I LEFT OFF ON IMPLEMENTING
# CROSS-BRIDGE PLOTTING. THIS IS A BIT OF A STICKY FELLOW IN
# THAT IT REQUIRES THE CROSS-BRIDGES BE EITHER ALL SHOWN
# (WHICH IS VISUALLY CLUTTERED) OR DELETED AS NEEDED WITH
# EACH REDISPLAY (WHICH IS COMPLICATED). THE WAY TO GO IS
# PROBABLY TO DELETE ALL WITH EACH REDISPLAY AND THEN PLOT
# ALL CROSS-BRIDGES ANEW EACH TIME.
pass
def update(self):
"""Update the visualization"""
self.update_locs()
self.update_ends()
self.disable_rendering()
for tube, x, yz, s in zip(self.thick_tubes, self.thick_xlocs,
self.thick_yzlocs, self.thick_s):
y = np.repeat(yz[0], len(x))
z = np.repeat(yz[1], len(x))
tube.mlab_source.set(x = x, y = y, z = z, scalars = s)
for tube, x, yz, s in zip(self.thin_tubes, self.thin_xlocs,
self.thin_yzlocs, self.thin_s):
y = np.repeat(yz[0], len(x))
z = np.repeat(yz[1], len(x))
ts = tube.mlab_source
ts.set(x = x, y = y, z = z, scalars = s)
for cube, s in zip(self.thick_end_cube, self.thick_end):
s = [s]
cube.mlab_source.set(scalars = s)
for cube, s in zip(self.thin_end_cube, self.thin_end):
s = [s]
cube.mlab_source.set(scalars = s)
self.enable_rendering()
def disable_rendering(self):
"""Kill rendering of the scene objects
This makes things vastly faster if done when re-rendering
things, as the whole scene will only be re-rendered once,
rather than as each """
disable = lambda x: x.scene.set(disable_render=True)
map(disable, self.thick_tubes)
map(disable, self.thin_tubes)
map(disable, self.thick_end_cube)
map(disable, self.thin_end_cube)
def enable_rendering(self):
"""Unkill rendering of the scene objects"""
enable = lambda x: x.scene.set(disable_render=False)
map(enable, self.thick_tubes)
map(enable, self.thin_tubes)
map(enable, self.thick_end_cube)
map(enable, self.thin_end_cube)
| cdw/multifil | multifil/plot_hs.py | Python | mit | 7,128 | [
"Mayavi"
] | 02bd85a5b81112442a945a992fd0f9b8feea5b10e99ffd3ea851ce2207e0405b |
# mako/pyparser.py
# Copyright (C) 2006-2012 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handles parsing of Python code.
Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler
module is used.
"""
from StringIO import StringIO
from mako import exceptions, util
import operator
if util.py3k:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None', 'print'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('arg')
else:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('id')
try:
import _ast
util.restore__ast(_ast)
import _ast_util
except ImportError:
_ast = None
from compiler import parse as compiler_parse
from compiler import visitor
def parse(code, mode='exec', **exception_kwargs):
"""Parse an expression into AST"""
try:
if _ast:
return _ast_util.parse(code, '<unknown>', mode)
else:
if isinstance(code, unicode):
code = code.encode('ascii', 'backslashreplace')
return compiler_parse(code, mode)
except Exception, e:
raise exceptions.SyntaxException(
"(%s) %s (%r)" % (
e.__class__.__name__,
e,
code[0:50]
), **exception_kwargs)
if _ast:
class FindIdentifiers(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.in_assign_targets = False
self.local_ident_stack = set()
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
else:
self.local_ident_stack.add(name)
def visit_ClassDef(self, node):
self._add_declared(node.name)
def visit_Assign(self, node):
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
self.visit(node.value)
in_a = self.in_assign_targets
self.in_assign_targets = True
for n in node.targets:
self.visit(n)
self.in_assign_targets = in_a
if util.py3k:
# ExceptHandler is in Python 2, but this block only works in
# Python 3 (and is required there)
def visit_ExceptHandler(self, node):
if node.name is not None:
self._add_declared(node.name)
if node.type is not None:
self.listener.undeclared_identifiers.add(node.type.id)
for statement in node.body:
self.visit(statement)
def visit_Lambda(self, node, *args):
self._visit_function(node, True)
def visit_FunctionDef(self, node):
self._add_declared(node.name)
self._visit_function(node, False)
def _expand_tuples(self, args):
for arg in args:
if isinstance(arg, _ast.Tuple):
for n in arg.elts:
yield n
else:
yield arg
def _visit_function(self, node, islambda):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
inf = self.in_function
self.in_function = True
local_ident_stack = self.local_ident_stack
self.local_ident_stack = local_ident_stack.union([
arg_id(arg) for arg in self._expand_tuples(node.args.args)
])
if islambda:
self.visit(node.body)
else:
for n in node.body:
self.visit(n)
self.in_function = inf
self.local_ident_stack = local_ident_stack
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
for statement in node.body:
self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node):
if isinstance(node.ctx, _ast.Store):
# this is eqiuvalent to visit_AssName in
# compiler
self._add_declared(node.id)
elif node.id not in reserved and node.id \
not in self.listener.declared_identifiers and node.id \
not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.id)
def visit_Import(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
self._add_declared(name.name.split('.')[0])
def visit_ImportFrom(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
if name.name == '*':
raise exceptions.CompileException(
"'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, "
"...' instead.", **self.exception_kwargs)
self._add_declared(name.name)
class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visit_Tuple(self, node):
for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = \
self.listener.declared_identifiers.union(
p.declared_identifiers)
self.listener.undeclared_identifiers = \
self.listener.undeclared_identifiers.union(
p.undeclared_identifiers)
class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visit_FunctionDef(self, node):
self.listener.funcname = node.name
argnames = [arg_id(arg) for arg in node.args.args]
if node.args.vararg:
argnames.append(node.args.vararg)
if node.args.kwarg:
argnames.append(node.args.kwarg)
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator(object):
def __init__(self, astnode):
self.generator = _ast_util.SourceGenerator(' ' * 4)
self.generator.visit(astnode)
def value(self):
return ''.join(self.generator.result)
else:
class FindIdentifiers(object):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.local_ident_stack = set()
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
else:
self.local_ident_stack.add(name)
def visitClass(self, node, *args):
self._add_declared(node.name)
def visitAssName(self, node, *args):
self._add_declared(node.name)
def visitAssign(self, node, *args):
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
self.visit(node.expr, *args)
for n in node.nodes:
self.visit(n, *args)
def visitLambda(self, node, *args):
self._visit_function(node, args)
def visitFunction(self, node, *args):
self._add_declared(node.name)
self._visit_function(node, args)
def _expand_tuples(self, args):
for arg in args:
if isinstance(arg, tuple):
for n in arg:
yield n
else:
yield arg
def _visit_function(self, node, args):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
inf = self.in_function
self.in_function = True
local_ident_stack = self.local_ident_stack
self.local_ident_stack = local_ident_stack.union([
arg for arg in self._expand_tuples(node.argnames)
])
for n in node.getChildNodes():
self.visit(n, *args)
self.in_function = inf
self.local_ident_stack = local_ident_stack
def visitFor(self, node, *args):
# flip around visit
self.visit(node.list, *args)
self.visit(node.assign, *args)
self.visit(node.body, *args)
def visitName(self, node, *args):
if node.name not in reserved and node.name \
not in self.listener.declared_identifiers and node.name \
not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.name)
def visitImport(self, node, *args):
for mod, alias in node.names:
if alias is not None:
self._add_declared(alias)
else:
self._add_declared(mod.split('.')[0])
def visitFrom(self, node, *args):
for mod, alias in node.names:
if alias is not None:
self._add_declared(alias)
else:
if mod == '*':
raise exceptions.CompileException(
"'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, "
"...' instead.", **self.exception_kwargs)
self._add_declared(mod)
def visit(self, expr):
visitor.walk(expr, self) # , walker=walker())
class FindTuple(object):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visitTuple(self, node, *args):
for n in node.nodes:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = \
self.listener.declared_identifiers.union(
p.declared_identifiers)
self.listener.undeclared_identifiers = \
self.listener.undeclared_identifiers.union(
p.undeclared_identifiers)
def visit(self, expr):
visitor.walk(expr, self) # , walker=walker())
class ParseFunc(object):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visitFunction(self, node, *args):
self.listener.funcname = node.name
self.listener.argnames = node.argnames
self.listener.defaults = node.defaults
self.listener.varargs = node.varargs
self.listener.kwargs = node.kwargs
def visit(self, expr):
visitor.walk(expr, self)
class ExpressionGenerator(object):
"""given an AST node, generates an equivalent literal Python
expression."""
def __init__(self, astnode):
self.buf = StringIO()
visitor.walk(astnode, self) # , walker=walker())
def value(self):
return self.buf.getvalue()
def operator(self, op, node, *args):
self.buf.write('(')
self.visit(node.left, *args)
self.buf.write(' %s ' % op)
self.visit(node.right, *args)
self.buf.write(')')
def booleanop(self, op, node, *args):
self.visit(node.nodes[0])
for n in node.nodes[1:]:
self.buf.write(' ' + op + ' ')
self.visit(n, *args)
def visitConst(self, node, *args):
self.buf.write(repr(node.value))
def visitAssName(self, node, *args):
# TODO: figure out OP_ASSIGN, other OP_s
self.buf.write(node.name)
def visitName(self, node, *args):
self.buf.write(node.name)
def visitMul(self, node, *args):
self.operator('*', node, *args)
def visitAnd(self, node, *args):
self.booleanop('and', node, *args)
def visitOr(self, node, *args):
self.booleanop('or', node, *args)
def visitBitand(self, node, *args):
self.booleanop('&', node, *args)
def visitBitor(self, node, *args):
self.booleanop('|', node, *args)
def visitBitxor(self, node, *args):
self.booleanop('^', node, *args)
def visitAdd(self, node, *args):
self.operator('+', node, *args)
def visitGetattr(self, node, *args):
self.visit(node.expr, *args)
self.buf.write('.%s' % node.attrname)
def visitSub(self, node, *args):
self.operator('-', node, *args)
def visitNot(self, node, *args):
self.buf.write('not ')
self.visit(node.expr)
def visitDiv(self, node, *args):
self.operator('/', node, *args)
def visitFloorDiv(self, node, *args):
self.operator('//', node, *args)
def visitSubscript(self, node, *args):
self.visit(node.expr)
self.buf.write('[')
[self.visit(x) for x in node.subs]
self.buf.write(']')
def visitUnarySub(self, node, *args):
self.buf.write('-')
self.visit(node.expr)
def visitUnaryAdd(self, node, *args):
self.buf.write('-')
self.visit(node.expr)
def visitSlice(self, node, *args):
self.visit(node.expr)
self.buf.write('[')
if node.lower is not None:
self.visit(node.lower)
self.buf.write(':')
if node.upper is not None:
self.visit(node.upper)
self.buf.write(']')
def visitDict(self, node):
self.buf.write('{')
c = node.getChildren()
for i in range(0, len(c), 2):
self.visit(c[i])
self.buf.write(': ')
self.visit(c[i + 1])
if i < len(c) - 2:
self.buf.write(', ')
self.buf.write('}')
def visitTuple(self, node):
self.buf.write('(')
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i < len(c) - 1:
self.buf.write(', ')
self.buf.write(')')
def visitList(self, node):
self.buf.write('[')
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i < len(c) - 1:
self.buf.write(', ')
self.buf.write(']')
def visitListComp(self, node):
self.buf.write('[')
self.visit(node.expr)
self.buf.write(' ')
for n in node.quals:
self.visit(n)
self.buf.write(']')
def visitListCompFor(self, node):
self.buf.write(' for ')
self.visit(node.assign)
self.buf.write(' in ')
self.visit(node.list)
for n in node.ifs:
self.visit(n)
def visitListCompIf(self, node):
self.buf.write(' if ')
self.visit(node.test)
def visitCompare(self, node):
self.visit(node.expr)
for tup in node.ops:
self.buf.write(tup[0])
self.visit(tup[1])
def visitCallFunc(self, node, *args):
self.visit(node.node)
self.buf.write('(')
if len(node.args):
self.visit(node.args[0])
for a in node.args[1:]:
self.buf.write(', ')
self.visit(a)
self.buf.write(')')
class walker(visitor.ASTVisitor):
def dispatch(self, node, *args):
print 'Node:', str(node)
# print "dir:", dir(node)
return visitor.ASTVisitor.dispatch(self, node, *args)
| alanjw/GreenOpenERP-Win-X86 | python/Lib/site-packages/mako/pyparser.py | Python | agpl-3.0 | 18,596 | [
"VisIt"
] | 7aecb8487b6b01b4abc0f156cb97dd9af2fcc3840d3c89fc0d7d432d63e1fd5a |
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import sys
import unittest
import importlib
import espressomd
from unittest.mock import MagicMock
def _id(x):
return x
# global variable: if one import failed, all subsequent imports will be skipped,
# see skip_future_imports_dependency()
skip_future_imports = False
def configure_and_import(filepath,
gpu=False,
substitutions=lambda x: x,
cmd_arguments=None,
script_suffix=None,
move_to_script_dir=True,
random_seeds=True,
mock_visualizers=True,
**parameters):
"""
Copy a Python script to a new location and alter some lines of code:
- change global variables and local variables (up to 1 indentation level)
- pass command line arguments during import to emulate shell execution
- disable the OpenGL/Mayavi modules if they are not compiled
- disable the matplotlib GUI using a text-based backend
- use random seeds for the RNG in NumPy and ESPResSo
- temporarily move to the directory where the script is located
Parameters
----------
filepath : str
python script to import
gpu : bool
whether GPU is necessary or not
substitutions function
custom text replacement operation (useful to edit out calls to the
OpenGL or Mayavi visualizers' ``run()`` method)
cmd_arguments : list
command line arguments, i.e. sys.argv without the script path
script_suffix : str
suffix to append to the configured script (useful when a single
module is being tested by multiple tests in parallel)
random_seeds : bool
if ``True``, use random seeds in RNGs
mock_visualizers : bool
if ``True``, substitute ES visualizers with `Mock()` classes in case
of `ImportError()` (use ``False`` if an `ImportError()` is relevant
to your test)
move_to_script_dir : bool
if ``True``, move to the script's directory (useful when the script
needs to load files hardcoded as relative paths, or when files are
generated and need cleanup); this is enabled by default
\*\*parameters :
global variables to replace
"""
if skip_future_imports:
module = MagicMock()
skipIfMissingImport = skip_future_imports_dependency(filepath)
return module, skipIfMissingImport
if gpu and not espressomd.gpu_available():
skip_future_imports_dependency(filepath)
skipIfMissingGPU = unittest.skip("gpu not available, skipping test!")
module = MagicMock()
return module, skipIfMissingGPU
filepath = os.path.abspath(filepath)
# load original script
# read in binary mode, then decode as UTF-8 to avoid this python3.5 error:
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 915:
# ordinal not in range(128)
with open(filepath, "rb") as f:
code = f.read().decode(encoding="utf-8")
# custom substitutions
code = substitutions(code)
assert code.strip()
# substitute global variables
code = substitute_variable_values(code, **parameters)
# substitute command line arguments
if cmd_arguments is not None:
code, old_sys_argv = set_cmd(code, filepath, cmd_arguments)
# disable matplotlib GUI using the Agg backend
code = disable_matplotlib_gui(code)
# disable OpenGL/Mayavi GUI using MagicMock()
if mock_visualizers:
code = mock_es_visualization(code)
# use random seeds for ES and NumPy RNGs
if random_seeds:
code = set_random_seeds(code)
# save changes to a new file
if script_suffix:
if script_suffix[0] != "_":
script_suffix = "_" + script_suffix
else:
script_suffix = ""
script_suffix += "_processed.py"
output_filepath = os.path.splitext(filepath)[0] + script_suffix
assert os.path.isfile(output_filepath) is False, \
"File {} already processed, cannot overwrite".format(output_filepath)
with open(output_filepath, "wb") as f:
f.write(code.encode(encoding="utf-8"))
# import
dirname, basename = os.path.split(output_filepath)
if move_to_script_dir:
os.chdir(dirname)
sys.path.insert(0, dirname)
module_name = os.path.splitext(basename)[0]
try:
module = importlib.import_module(module_name)
except espressomd.FeaturesError as err:
skip_future_imports_dependency(filepath)
skipIfMissingFeatures = unittest.skip(str(err) + ", skipping test!")
module = MagicMock()
else:
skipIfMissingFeatures = _id
if cmd_arguments is not None:
# restore original command line arguments
sys.argv = old_sys_argv
return module, skipIfMissingFeatures
def set_cmd(code, filepath, cmd_arguments):
assert isinstance(cmd_arguments, list) \
or isinstance(cmd_arguments, tuple)
sys_argv = list(map(str, cmd_arguments))
sys_argv.insert(0, os.path.basename(filepath))
re_import_sys = re.compile("^import[\t\ ]+sys[\t\ ]*$", re.M)
re_import_argparse = re.compile("^import[\t\ ]+argparse[\t\ ]*$", re.M)
if re_import_sys.search(code) is not None:
code = re_import_sys.sub("\g<0>\nsys.argv = " + str(sys_argv), code, 1)
elif re_import_argparse.search(code) is not None:
code = re_import_argparse.sub("\g<0>\nimport sys\nsys.argv = "
+ str(sys_argv), code, 1)
else:
raise AssertionError("module sys (or argparse) is not imported")
old_sys_argv = list(sys.argv)
return code, old_sys_argv
def substitute_variable_values(code, strings_as_is=False, keep_original=True,
**parameters):
"""
Substitute values of global variables.
Parameters
----------
code : str
Source code to edit.
strings_as_is : bool
If ``True``, consider all values in \*\*parameters are strings and
substitute them in-place without formatting by ``repr()``.
keep_original : bool
Keep the original value (e.g. ``N = 10; _N__original = 1000``), helps
with debugging. If ``False``, make sure the original value is not a
multiline statement, because removing its first line would lead to
a syntax error.
\*\*parameters :
Variable names and their new value.
"""
for variable, value in parameters.items():
assert variable in code, "variable {} not found".format(variable)
re_var = re.compile("^(\t|\ {,4})(" + variable + ")(?= *=[^=])", re.M)
assert re_var.search(code) is not None, \
"variable {} has no assignment".format(variable)
val = strings_as_is and value or repr(value)
code = re_var.sub(r"\g<1>\g<2> = " + val + r"; _\g<2>__original", code)
if not keep_original:
code = re.sub(r"; _" + variable + "__original.+", "", code)
return code
def set_random_seeds(code):
# delete explicit ESPResSo seed
aliases = re.findall(r"([^\s;]+) *= *(?:espressomd\.)?System *\(", code)
pattern = r"(?<=[\s;]){}\.(?:seed|random_number_generator_state)(?= *=[^=])"
subst = "{}.set_random_state_PRNG(); _random_seed_es__original"
for varname in set(aliases):
code = re.sub(pattern.format(varname), subst.format(varname), code)
# delete explicit NumPy seed
code = re.sub(r"(?<=[\s;])(?:numpy|np)\.random\.seed *(?=\()",
"_random_seed_np = (lambda *args, **kwargs: None)", code)
return code
def disable_matplotlib_gui(code):
"""
Use the matplotlib Agg backend (no GUI).
"""
# find under which name matplotlib was imported
re_mpl_aliases = [
re.compile(r"^[\t\ ]*import[\t\ ]+(matplotlib)[\t\ ]*$", re.M),
re.compile(r"^[\t\ ]*import[\t\ ]+matplotlib[\t\ ]+as[\t\ ]+([^\s;]+)",
re.M)]
aliases = set(x for re_mpl in re_mpl_aliases for x in re_mpl.findall(code))
# remove any custom backend
for alias in aliases:
code = re.sub(r"^[\t\ ]*" + alias + r"\.use\(([\"']+).+?\1[\t\ ]*\)",
"", code, 0, re.M)
# use the Agg backend
code = re.sub(r"^([\t\ ]*)(?=(?:from|import)[\t\ ]+matplotlib[\.\s])",
r"\g<1>import matplotlib as _mpl;_mpl.use('Agg');",
code, 1, re.M)
return code
def mock_es_visualization(code):
"""
Replace `import espressomd.visualization_<backend>` by a `MagicMock()` when
the visualization module is not installed, by catching the `ImportError()`
exception. Please note that `espressomd.visualization` is deferring the
exception, thus requiring additional checks. Import aliases are supported,
however please don't use `from espressomd.visualization import *` because
it hides the namespace of classes to be mocked.
"""
# consider all legal import statements in Python3
# (the ordering follows regex precedence rules)
re_es_vis_import = re.compile(r"""
^from\ espressomd\ import\ (?:visualization(?:_opengl|_mayavi)?)\ as\ (\S+)
|^from\ espressomd\ import\ (visualization(?:_opengl|_mayavi)?)
|^from\ espressomd\.visualization(?:_opengl|_mayavi)?\ import\ ([^\n]+)
|^import\ espressomd\.visualization(?:_opengl|_mayavi)?\ as\ (\S+)
|^import\ (espressomd\.visualization(?:_opengl|_mayavi)?)
""".replace(r"\ ", r"[\t\ ]+"), re.VERBOSE | re.M)
# replacement template
r_es_vis_mock = r"""
try:
{0}{1}
except ImportError:
from unittest.mock import MagicMock
import espressomd
{2} = MagicMock()
""".lstrip()
# cannot handle "from espressomd.visualization import *"
re_es_vis_import_namespace = re.compile(
r"^from\ espressomd\.visualization(?:_opengl|_mayavi)?\ import\ \*"
.replace(r"\ ", r"[\t\ ]+"), re.M)
m = re_es_vis_import_namespace.search(code)
assert m is None, "cannot use MagicMock() at line '" + m.group(0) + "'"
def check_for_deferred_ImportError(line, alias):
if "_opengl" not in line and "_mayavi" not in line:
if "openGLLive" in line or "mayaviLive" in line:
return """
if hasattr({0}, 'deferred_ImportError'):
raise {0}.deferred_ImportError""".format(alias)
else:
return """
if hasattr({0}.mayaviLive, 'deferred_ImportError') or \\
hasattr({0}.openGLLive, 'deferred_ImportError'):
raise ImportError()""".format(alias)
else:
return ""
def substitution_es_vis_import(m):
aliases = [x for x in m.groups() if x is not None][0].split(',')
guards = []
for alias in aliases:
line = m.group(0)
if len(aliases) >= 2 and 'from espressomd.visualization' in line:
line = line.split('import')[0] + 'import ' + alias.strip()
if ' as ' in alias:
alias = alias.split(' as ')[1]
alias = alias.strip()
checks = check_for_deferred_ImportError(line, alias)
s = r_es_vis_mock.format(line, checks, alias)
guards.append(s)
return '\n'.join(guards)
# handle deferred ImportError
code = re_es_vis_import.sub(substitution_es_vis_import, code)
return code
def skip_future_imports_dependency(filepath):
"""
If an import failed, all subsequent imports will be skipped. The
fixture message provides the name of the module that failed.
"""
global skip_future_imports
if not skip_future_imports:
module_name = os.path.splitext(os.path.basename(filepath))[0]
assert module_name != ""
skip_future_imports = module_name
return unittest.skip("failed to import {}, skipping test!"
.format(skip_future_imports))
| mkuron/espresso | testsuite/scripts/importlib_wrapper.py | Python | gpl-3.0 | 12,576 | [
"ESPResSo",
"Mayavi"
] | 48faad193edb4b24d355e97e812f379e58b98cd386643cff691f0562fbecd005 |
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Calour development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import main
from tempfile import mkdtemp
from os.path import join
from io import StringIO
import shutil
import logging
import scipy.sparse
import numpy as np
import pandas as pd
from numpy.testing import assert_array_almost_equal
import calour as ca
from calour._testing import Tests
from calour.io import _create_biom_table_from_exp, _iter_fasta
class IOTests(Tests):
def setUp(self):
super().setUp()
def _validate_read(self, exp, validate_sample_metadata=True):
# number of bacteria is 12 in biom table
self.assertEqual(exp.data.shape[1], 12)
# number of samples is 21 (should not read the samples only in mapping file)
self.assertEqual(exp.data.shape[0], 21)
# test an OTU/sample to see it is in the right place
fid = 'GG'
sid = 'S12'
# test sample and sequence are in the table
self.assertIn(fid, exp.feature_metadata.index)
self.assertIn(sid, exp.sample_metadata.index)
# test the location in the sample/feature metadata corresponds to the data
spos = exp.sample_metadata.index.get_loc(sid)
fpos = exp.feature_metadata.index.get_loc(fid)
# there is only one cell with value of 1200
self.assertEqual(exp.data[spos, fpos], 1200)
# test the taxonomy is loaded correctly
self.assertEqual('Unknown', exp.feature_metadata['taxonomy'][fid])
# test the sample metadata is loaded correctly
if validate_sample_metadata:
self.assertEqual(exp.sample_metadata['id'][spos], 12)
def test_iter_fasta(self):
seqs = []
heads = []
for chead, cseq in _iter_fasta(self.seqs1_fasta):
seqs.append(cseq)
heads.append(chead)
self.assertListEqual(heads, ['real_seq_6', 'not real seq'])
self.assertListEqual(seqs, ['TT', 'AACGGAGGATGCGAGCGTTATCTGGAATCATTGGGTTTAAAGGGTCCGTAGGCGGGTTGATAAGTCAGAGGTGAAAGCGCTTAGCTCAACTAAGCAACTGCCTTTGAAACTGTCAGTCTTGAATGATTGTGAAGTAGTTGGAATGTGTAG'])
def test_read_metadata(self):
# test it's ok to read the IDs of numbers as str
f = StringIO('''SampleID foo
0100.02 a
100.030 b
''')
try:
ca.io._read_metadata(['0100.02', '100.030'], f, None)
except:
self.fail('Should not raise exception while reading metadata.')
def test_read(self):
# re-enable logging because it is disabled in setUp
logging.disable(logging.NOTSET)
with self.assertLogs(level='INFO') as cm:
# load the simple dataset as sparse
exp = ca.read(self.test1_biom, self.test1_samp, self.test1_feat, normalize=None)
# test the log messages are correct
self.assertRegex(cm.output[0], 'loaded 21 samples, 12 features')
self.assertRegex(cm.output[1], "dropped \\(1\\): {'SAMPLE_NOT_EXIST'}")
self.assertRegex(cm.output[2], "These have data but do not have metadata: {'badsample'}")
self.assertRegex(cm.output[3], "dropped \\(1\\): {'FEATURE_NOT_EXIST'}")
self.assertRegex(cm.output[4], "These have data but do not have metadata: {'badfeature'}")
self.assertTrue(scipy.sparse.issparse(exp.data))
self._validate_read(exp)
def test_read_not_sparse(self):
logging.disable(logging.NOTSET)
with self.assertLogs(level='INFO') as cm:
# load the simple dataset as dense
exp = ca.read(self.test1_biom, self.test1_samp, sparse=False, normalize=None)
self.assertFalse(scipy.sparse.issparse(exp.data))
self._validate_read(exp, cm.output)
def test_read_sample_kwargs(self):
# re-enable logging because it is disabled in setUp
logging.disable(logging.NOTSET)
with self.assertLogs(level='INFO') as cm:
# load the simple dataset as sparse
exp = ca.read(self.test1_biom, self.test1_samp, self.test1_feat, normalize=None,
sample_metadata_kwargs={'parse_dates': ['collection_date']})
# test the log messages are correct
self.assertRegex(cm.output[0], 'loaded 21 samples, 12 features')
self.assertRegex(cm.output[1], "dropped \\(1\\): {'SAMPLE_NOT_EXIST'}")
self.assertRegex(cm.output[2], "These have data but do not have metadata: {'badsample'}")
self.assertRegex(cm.output[3], "dropped \\(1\\): {'FEATURE_NOT_EXIST'}")
self.assertRegex(cm.output[4], "These have data but do not have metadata: {'badfeature'}")
self.assertTrue(scipy.sparse.issparse(exp.data))
self._validate_read(exp)
obs_dates = exp.sample_metadata['collection_date'].tolist()
# the last sample in OTU table does not have metadata, so NaT
exp_dates = [pd.Timestamp('2017-8-1')] * 20 + [pd.NaT]
self.assertListEqual(obs_dates, exp_dates)
def test_read_feature_kwargs(self):
# re-enable logging because it is disabled in setUp
logging.disable(logging.NOTSET)
with self.assertLogs(level='INFO') as cm:
# load the simple dataset as sparse
exp = ca.read(self.test1_biom, self.test1_samp, self.test1_feat, normalize=None,
feature_metadata_kwargs={'dtype': {'ph': str}})
# test the log messages are correct
self.assertRegex(cm.output[0], 'loaded 21 samples, 12 features')
self.assertRegex(cm.output[1], "dropped \\(1\\): {'SAMPLE_NOT_EXIST'}")
self.assertRegex(cm.output[2], "These have data but do not have metadata: {'badsample'}")
self.assertRegex(cm.output[3], "dropped \\(1\\): {'FEATURE_NOT_EXIST'}")
self.assertRegex(cm.output[4], "These have data but do not have metadata: {'badfeature'}")
self.assertTrue(scipy.sparse.issparse(exp.data))
self._validate_read(exp)
# read as str not float
self.assertEqual(exp.feature_metadata.loc['AA', 'ph'], '4.0')
def test_read_no_metadata(self):
logging.disable(logging.NOTSET)
with self.assertLogs(level='INFO') as cm:
# test loading without a mapping file
exp = ca.read(self.test1_biom, normalize=None)
self.assertRegex(cm.output[0], 'loaded 21 samples, 12 features')
self._validate_read(exp, validate_sample_metadata=False)
def test_read_amplicon(self):
# test loading a taxonomy biom table and filtering/normalizing
exp1 = ca.read_amplicon(self.test1_biom, min_reads=1000, normalize=10000)
exp2 = ca.read(self.test1_biom, normalize=None)
exp2.filter_by_data('abundance', axis=0, cutoff=1000, inplace=True, mean_or_sum='sum')
exp2.normalize(inplace=True)
self.assert_experiment_equal(exp1, exp2)
self.assertIn('taxonomy', exp1.feature_metadata.columns)
def test_read_openms_bucket_table(self):
# load the openms bucket table with no metadata
exp = ca.read(self.openms_csv, data_file_type='csv', sparse=False, normalize=None)
self.assertEqual(len(exp.sample_metadata), 9)
self.assertEqual(len(exp.feature_metadata), 10)
self.assertEqual(exp.shape, (9, 10))
self.assertEqual(exp.data[0, :].sum(), 8554202)
self.assertEqual(exp.data[:, 1].sum(), 13795540)
self.assertEqual(exp.sparse, False)
def test_read_openms_bucket_table_samples_are_rows(self):
# load the openms bucket table with no metadata
exp = ca.read(self.openms_samples_rows_csv, data_file_type='csv', sample_in_row=True, sparse=False, normalize=None)
self.assertEqual(len(exp.sample_metadata), 9)
self.assertEqual(len(exp.feature_metadata), 10)
self.assertEqual(exp.shape, (9, 10))
self.assertEqual(exp.data[0, :].sum(), 8554202)
self.assertEqual(exp.data[:, 1].sum(), 13795540)
self.assertEqual(exp.sparse, False)
def test_read_open_ms(self):
exp = ca.read_ms(self.openms_csv, normalize=None, data_file_type='openms')
# test we get the MZ and RT correct
self.assertIn('MZ', exp.feature_metadata)
self.assertIn('RT', exp.feature_metadata)
self.assertIn('mz_rt', exp.feature_metadata)
self.assertEqual(exp.feature_metadata['MZ'].iloc[1], 118.0869)
self.assertEqual(exp.feature_metadata['RT'].iloc[1], 23.9214)
self.assertEqual(exp.feature_metadata['mz_rt'].iloc[1], '118.0869_23.92')
# test normalizing
exp = ca.read_ms(self.openms_csv, normalize=10000, data_file_type='openms')
assert_array_almost_equal(exp.data.sum(axis=1), np.ones(exp.shape[0]) * 10000)
# test load sparse
exp = ca.read_ms(self.openms_csv, sparse=True, normalize=None, data_file_type='openms')
self.assertEqual(exp.sparse, True)
def test_read_biom_ms(self):
# load a biom table with MZ/RT in featureID, and associated gnps clusterinfo file
exp = ca.read_ms(self.ms_biom_table, sample_metadata_file=self.gnps_map,
data_file_type='biom', use_gnps_id_from_AllFiles=False, normalize=None)
self.assertIn('MZ', exp.feature_metadata)
self.assertIn('RT', exp.feature_metadata)
self.assertEqual(exp.feature_metadata['MZ'].iloc[1], 899.53)
self.assertEqual(exp.feature_metadata['RT'].iloc[0], 314)
def test_read_mzmine2_ms(self):
# load an mzmine2 metabolomics table, and associated gnps clusterinfo file
exp = ca.read_ms(self.mzmine2_csv, sample_metadata_file=self.gnps_map,
data_file_type='mzmine2', use_gnps_id_from_AllFiles=False, normalize=None)
self.assertIn('MZ', exp.feature_metadata)
self.assertIn('RT', exp.feature_metadata)
self.assertEqual(exp.feature_metadata['MZ'].iloc[1], 200)
self.assertEqual(exp.feature_metadata['RT'].iloc[0], 1)
self.assertEqual(exp.data[2, 1], 35900)
def test_read_mzmine2_ms_with_idstr(self):
# load an mzmine2 metabolomics table with the sampleids inflated with additional info
exp = ca.read_ms(self.mzmine2_with_idstr_csv, sample_metadata_file=self.gnps_map,
use_gnps_id_from_AllFiles=False, cut_sample_id_sep='_', normalize=None)
self.assertEqual(exp.feature_metadata['MZ'].iloc[1], 200)
self.assertEqual(exp.feature_metadata['RT'].iloc[0], 1)
self.assertEqual(exp.sample_metadata['field2'][0], 'f')
self.assertEqual(exp.data[2, 1], 35900)
self.assertEqual(exp.data.shape, (6, 6))
def test_read_gnps_ms(self):
# load the gnps exported table with associated sample metadata and cluster info
exp = ca.read_ms(self.gnps_table, sample_metadata_file=self.gnps_map,
data_file_type='gnps-ms2', normalize=None)
# verify the load extracts required fields to metadata
self.assertEqual(exp.data[2, 3], 139692)
# # test normalizing
exp = ca.read_ms(self.gnps_table, sample_metadata_file=self.gnps_map,
data_file_type='gnps-ms2', normalize=10000)
assert_array_almost_equal(exp.data.sum(axis=1), np.ones(exp.shape[0]) * 10000)
# # test load sparse
exp = ca.read_ms(self.gnps_table, sample_metadata_file=self.gnps_map,
data_file_type='gnps-ms2', normalize=None, sparse=True)
self.assertEqual(exp.sparse, True)
def test_read_open_ms_samples_rows(self):
exp = ca.read_ms(self.openms_samples_rows_csv, normalize=None, sample_in_row=True, data_file_type='openms')
# test we get the MZ and RT correct
self.assertIn('MZ', exp.feature_metadata)
self.assertIn('RT', exp.feature_metadata)
self.assertAlmostEqual(exp.feature_metadata['MZ'].iloc[1], 118.0869)
self.assertAlmostEqual(exp.feature_metadata['RT'].iloc[1], 23.9214)
def test_read_qiime2(self):
# test the non-hashed table
exp = ca.read_qiime2(self.qiime2table, normalize=None, min_reads=None)
self.assertEqual(exp.shape, (104, 658))
# and the hashed table with rep seqs and taxonomy files
exp = ca.read_qiime2(self.q2_cfs_table, sample_metadata_file=self.q2_cfs_map,
rep_seq_file=self.q2_cfs_repseqs, taxonomy_file=self.q2_cfs_taxonomy,
normalize=None, min_reads=None)
self.assertEqual(exp.shape, (87, 2130))
# test if the index is indeed sequences, and taxonomy is loaded correctly
test_seq = 'TACGTAGGGAGCAAGCGTTGTCCGGAATTACTGGGTGTAAAGGGTGCGTAGGCGGGTATGCAAGTCATATGTGAAATACCGGGGCTCAACTCCGGGGCTGCATAAGAAACTGTATATCTTGAGTACAGGAGAGGTAAGCGGAATTCCTAG'
self.assertEqual(exp.feature_metadata['Taxon'][test_seq], 'k__Bacteria; p__Firmicutes; c__Clostridia; o__Clostridiales; f__Ruminococcaceae; g__; s__')
def test_create_biom_table_from_exp(self):
exp = ca.read(self.test1_biom, self.test1_samp, normalize=None)
table = _create_biom_table_from_exp(exp)
self.assertCountEqual(table.ids(axis='observation'), exp.feature_metadata.index.values)
self.assertCountEqual(table.ids(axis='sample'), exp.sample_metadata.index.values)
assert_array_almost_equal(table.matrix_data.toarray(), exp.get_data(sparse=False).transpose())
metadata = table.metadata(id=exp.feature_metadata.index[1], axis='observation')
self.assertEqual(metadata['taxonomy'], exp.feature_metadata['taxonomy'].iloc[1])
def test_save_fasta(self):
exp = ca.read(self.test1_biom, self.test1_samp, normalize=None)
d = mkdtemp()
f = join(d, 'test1.fasta')
exp.save_fasta(f)
seqs = []
for chead, cseq in _iter_fasta(f):
seqs.append(cseq)
self.assertCountEqual(seqs, exp.feature_metadata.index.values)
shutil.rmtree(d)
def test_save_biom(self):
# NOTE: Currently not testing the save biom hdf with taxonomy
# as there is a bug there!
exp = ca.read_amplicon(self.test1_biom, self.test1_samp, normalize=None, min_reads=None)
d = mkdtemp()
f = join(d, 'test1.save.biom')
# test the json biom format
exp.save_biom(f, fmt='hdf5')
newexp = ca.read_amplicon(f, self.test1_samp, normalize=None, min_reads=None)
self.assert_experiment_equal(newexp, exp)
# test the txt biom format
exp.save_biom(f, fmt='txt')
newexp = ca.read_amplicon(f, self.test1_samp, normalize=None, min_reads=None)
self.assert_experiment_equal(newexp, exp, ignore_md_fields=['taxonomy'])
# test the hdf5 biom format with no taxonomy
exp.save_biom(f, add_metadata=None)
newexp = ca.read(f, self.test1_samp, normalize=None)
self.assertTrue('taxonomy' not in newexp.feature_metadata)
self.assert_experiment_equal(newexp, exp, ignore_md_fields=['taxonomy'])
shutil.rmtree(d)
def test_save(self):
exp = ca.read(self.test2_biom, self.test2_samp, normalize=None)
d = mkdtemp()
f = join(d, 'test1.save')
# test the json biom format
exp.save(f, fmt='json')
newexp = ca.read(f+'.biom', f+'_sample.txt', normalize=None)
self.assert_experiment_equal(newexp, exp, ignore_md_fields=['#SampleID.1'])
shutil.rmtree(d)
if __name__ == "__main__":
main()
| RNAer/Calour | calour/tests/test_io.py | Python | bsd-3-clause | 15,777 | [
"OpenMS"
] | 6154720206c19e67a728f16e679be95f1cc66f80d5b53e8b3285168355b9da3b |
# (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import ABCMeta, abstractmethod
from ansible.compat.six import with_metaclass
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class BaseCacheModule(with_metaclass(ABCMeta, object)):
# Backwards compat only. Just import the global display instead
_display = display
@abstractmethod
def get(self, key):
pass
@abstractmethod
def set(self, key, value):
pass
@abstractmethod
def keys(self):
pass
@abstractmethod
def contains(self, key):
pass
@abstractmethod
def delete(self, key):
pass
@abstractmethod
def flush(self):
pass
@abstractmethod
def copy(self):
pass
| kaarolch/ansible | lib/ansible/plugins/cache/base.py | Python | gpl-3.0 | 1,564 | [
"Brian"
] | 0321d5f34ed5f740c8ba6aa333264bbfade497bba7dd6ceafd3e785551e556bb |
# -*- coding: utf-8 -*-
import logging
import os
import subprocess
from galaxy.datatypes.data import get_file_peek, Text
from galaxy.datatypes.metadata import MetadataElement
log = logging.getLogger(__name__)
def count_special_lines(word, filename, invert=False):
"""
searching for special 'words' using the grep tool
grep is used to speed up the searching and counting
The number of hits is returned.
"""
try:
cmd = ["grep", "-c"]
if invert:
cmd.append('-v')
cmd.extend([word, filename])
out = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return int(out.communicate()[0].split()[0])
except Exception:
pass
return 0
class Stockholm_1_0(Text):
file_ext = "stockholm"
MetadataElement( name="number_of_alignments", default=0, desc="Number of multiple alignments", readonly=True, visible=True, optional=True, no_value=0 )
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
if (dataset.metadata.number_of_models == 1):
dataset.blurb = "1 alignment"
else:
dataset.blurb = "%s alignments" % dataset.metadata.number_of_models
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
def sniff( self, filename ):
if count_special_lines('^#[[:space:]+]STOCKHOLM[[:space:]+]1.0', filename) > 0:
return True
else:
return False
def set_meta( self, dataset, **kwd ):
"""
Set the number of models in dataset.
"""
dataset.metadata.number_of_models = count_special_lines('^#[[:space:]+]STOCKHOLM[[:space:]+]1.0', dataset.file_name)
def split( cls, input_datasets, subdir_generator_function, split_params):
"""
Split the input files by model records.
"""
if split_params is None:
return None
if len(input_datasets) > 1:
raise Exception("STOCKHOLM-file splitting does not support multiple files")
input_files = [ds.file_name for ds in input_datasets]
chunk_size = None
if split_params['split_mode'] == 'number_of_parts':
raise Exception('Split mode "%s" is currently not implemented for STOCKHOLM-files.' % split_params['split_mode'])
elif split_params['split_mode'] == 'to_size':
chunk_size = int(split_params['split_size'])
else:
raise Exception('Unsupported split mode %s' % split_params['split_mode'])
def _read_stockholm_records( filename ):
lines = []
with open(filename) as handle:
for line in handle:
lines.append( line )
if line.strip() == '//':
yield lines
lines = []
def _write_part_stockholm_file( accumulated_lines ):
part_dir = subdir_generator_function()
part_path = os.path.join( part_dir, os.path.basename( input_files[0] ) )
part_file = open( part_path, 'w' )
part_file.writelines( accumulated_lines )
part_file.close()
try:
stockholm_records = _read_stockholm_records( input_files[0] )
stockholm_lines_accumulated = []
for counter, stockholm_record in enumerate( stockholm_records, start=1):
stockholm_lines_accumulated.extend( stockholm_record )
if counter % chunk_size == 0:
_write_part_stockholm_file( stockholm_lines_accumulated )
stockholm_lines_accumulated = []
if stockholm_lines_accumulated:
_write_part_stockholm_file( stockholm_lines_accumulated )
except Exception as e:
log.error('Unable to split files: %s' % str(e))
raise
split = classmethod(split)
| SANBI-SA/tools-iuc | datatypes/msa/stockholm_1_0/stockholm_1_0.py | Python | mit | 4,052 | [
"Galaxy"
] | ded758045a42c9e59c8758124540444a6bf46c307832a757dec3c76eee34f5a8 |
# -*- Mode: Python; coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2011 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Main interface definition for pos application. """
from decimal import Decimal
import logging
import pango
import gtk
from kiwi import ValueUnset
from kiwi.currency import currency
from kiwi.datatypes import converter, ValidationError
from kiwi.python import Settable
from kiwi.ui.objectlist import Column
from kiwi.ui.widgets.contextmenu import ContextMenu, ContextMenuItem
from storm.expr import And, Lower
from stoqdrivers.enum import UnitType
from stoqlib.api import api
from stoqlib.domain.devices import DeviceSettings
from stoqlib.domain.payment.group import PaymentGroup
from stoqlib.domain.product import StorableBatch
from stoqlib.domain.sale import Sale, Delivery
from stoqlib.domain.sellable import Sellable
from stoqlib.drivers.scale import read_scale_info
from stoqlib.exceptions import StoqlibError, TaxError
from stoqlib.gui.events import POSConfirmSaleEvent, CloseLoanWizardFinishEvent
from stoqlib.lib.barcode import parse_barcode, BarcodeInfo
from stoqlib.lib.decorators import cached_property, public
from stoqlib.lib.defaults import quantize
from stoqlib.lib.formatters import (format_sellable_description,
format_quantity, get_formatted_price)
from stoqlib.lib.message import warning, info, yesno, marker
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.pluginmanager import get_plugin_manager
from stoqlib.lib.translation import stoqlib_gettext as _
from stoqlib.gui.base.dialogs import push_fullscreen, pop_fullscreen
from stoqlib.gui.base.gtkadds import button_set_image_with_label
from stoqlib.gui.dialogs.batchselectiondialog import BatchDecreaseSelectionDialog
from stoqlib.gui.dialogs.sellableimage import SellableImageViewer
from stoqlib.gui.editors.deliveryeditor import CreateDeliveryEditor
from stoqlib.gui.editors.serviceeditor import ServiceItemEditor
from stoqlib.gui.fiscalprinter import FiscalPrinterHelper
from stoqlib.gui.search.deliverysearch import DeliverySearch
from stoqlib.gui.search.personsearch import ClientSearch
from stoqlib.gui.search.productsearch import ProductSearch
from stoqlib.gui.search.salesearch import (SaleWithToolbarSearch,
SoldItemsByBranchSearch)
from stoqlib.gui.search.sellablesearch import SaleSellableSearch
from stoqlib.gui.search.servicesearch import ServiceSearch
from stoqlib.gui.search.paymentreceivingsearch import PaymentReceivingSearch
from stoqlib.gui.search.workordersearch import WorkOrderFinishedSearch
from stoqlib.gui.utils.keybindings import get_accels
from stoqlib.gui.utils.logo import render_logo_pixbuf
from stoqlib.gui.wizards.loanwizard import CloseLoanWizard
from stoqlib.gui.wizards.salereturnwizard import SaleTradeWizard
from stoq.gui.shell.shellapp import ShellApp
log = logging.getLogger(__name__)
@public(since="1.5.0")
class TemporarySaleItem(object):
def __init__(self, sellable, quantity, price=None,
notes=None, can_remove=True, quantity_decreased=0, batch=None):
# Use only 3 decimal places for the quantity
self.quantity = Decimal('%.3f' % quantity)
self.quantity_decreased = quantity_decreased
self.batch = batch
self.sellable = sellable
self.description = sellable.get_description()
self.unit = sellable.unit_description
self.code = sellable.code
self.can_remove = can_remove
if not price:
price = sellable.price
self.base_price = sellable.base_price
self.price = price
self.deliver = False
self.estimated_fix_date = None
self.notes = notes
@property
def full_description(self):
return format_sellable_description(self.sellable, self.batch)
# FIXME: Single joins dont cache de value if its None, and we use that a lot
# here. Add a cache until we fix SingleJoins to cache de value properly.
@cached_property(ttl=0)
def service(self):
return self.sellable.service
@property
def total(self):
# Sale items are suposed to have only 2 digits, but the value price
# * quantity may have more than 2, so we need to round it.
return quantize(currency(self.price * self.quantity))
@property
def quantity_unit(self):
qtd_string = ''
if (self.quantity * 100 % 100) == 0:
qtd_string = '%.0f' % self.quantity
else:
qtd_string = '%s' % self.quantity.normalize()
return '%s %s' % (qtd_string, self.unit)
class PosApp(ShellApp):
app_title = _('Point of Sales')
gladefile = "pos"
def __init__(self, window, store=None):
self._suggested_client = None
self._current_store = None
self._trade = None
self._trade_infobar = None
# The sellable and batch selected, in case the parameter
# CONFIRM_QTY_ON_BARCODE_ACTIVATE is used.
self._sellable = None
self._batch = None
ShellApp.__init__(self, window, store=store)
self._delivery = None
self._coupon = None
# Cant use self._coupon to verify if there is a sale, since
# CONFIRM_SALES_ON_TILL doesnt create a coupon
self._sale_started = False
self._scale_settings = DeviceSettings.get_scale_settings(self.store)
#
# Application
#
def create_actions(self):
group = get_accels('app.pos')
actions = [
# File
('NewTrade', None, _('Trade...'),
group.get('new_trade')),
('PaymentReceive', None, _('Payment Receival...'),
group.get('payment_receive')),
("TillOpen", None, _("Open Till..."),
group.get('till_open')),
("TillClose", None, _("Close Till..."),
group.get('till_close')),
("TillVerify", None, _("Verify Till..."),
group.get('till_verify')),
("LoanClose", None, _("Close loan...")),
("WorkOrderClose", None, _("Close work order...")),
# Order
("OrderMenu", None, _("Order")),
('ConfirmOrder', None, _('Confirm...'),
group.get('order_confirm')),
('CancelOrder', None, _('Cancel...'),
group.get('order_cancel')),
('NewDelivery', None, _('Create delivery...'),
group.get('order_create_delivery')),
# Search
("Sales", None, _("Sales..."),
group.get('search_sales')),
("SoldItemsByBranchSearch", None, _("Sold Items by Branch..."),
group.get('search_sold_items')),
("Clients", None, _("Clients..."),
group.get('search_clients')),
("ProductSearch", None, _("Products..."),
group.get('search_products')),
("ServiceSearch", None, _("Services..."),
group.get('search_services')),
("DeliverySearch", None, _("Deliveries..."),
group.get('search_deliveries')),
]
self.pos_ui = self.add_ui_actions('', actions,
filename='pos.xml')
toggle_actions = [
('DetailsViewer', None, _('Details viewer'),
group.get('toggle_details_viewer')),
]
self.add_ui_actions('', toggle_actions, 'ToggleActions', 'toggle')
self.set_help_section(_("POS help"), 'app-pos')
def create_ui(self):
self.sale_items.set_columns(self.get_columns())
self.sale_items.set_selection_mode(gtk.SELECTION_BROWSE)
# Setting up the widget groups
self.main_vbox.set_focus_chain([self.pos_vbox])
self.pos_vbox.set_focus_chain([self.list_header_hbox, self.list_vbox])
self.list_vbox.set_focus_chain([self.footer_hbox])
self.footer_hbox.set_focus_chain([self.toolbar_vbox])
# Setting up the toolbar area
self.toolbar_vbox.set_focus_chain([self.toolbar_button_box])
self.toolbar_button_box.set_focus_chain([self.checkout_button,
self.delivery_button,
self.edit_item_button,
self.remove_item_button])
# Setting up the barcode area
self.item_hbox.set_focus_chain([self.barcode, self.quantity,
self.item_button_box])
self.item_button_box.set_focus_chain([self.add_button,
self.advanced_search])
self._setup_printer()
self._setup_widgets()
self._setup_proxies()
self._clear_order()
def activate(self, refresh=True):
# Admin app doesn't have anything to print/export
for widget in (self.window.Print, self.window.ExportSpreadSheet):
widget.set_visible(False)
# Hides or shows sellable description
self._confirm_quantity = sysparam.get_bool('CONFIRM_QTY_ON_BARCODE_ACTIVATE')
self.sellable_description.set_visible(self._confirm_quantity)
# Hide toolbar specially for pos
self.uimanager.get_widget('/toolbar').hide()
self.uimanager.get_widget('/menubar/ViewMenu/ToggleToolbar').hide()
self.check_open_inventory()
self._update_parameter_widgets()
self._update_widgets()
# This is important to do after the other calls, since
# it emits signals that disable UI which might otherwise
# be enabled.
self._printer.run_initial_checks()
CloseLoanWizardFinishEvent.connect(self._on_CloseLoanWizardFinishEvent)
def deactivate(self):
api.user_settings.set('pos-show-details-viewer',
self.DetailsViewer.get_active())
self.uimanager.remove_ui(self.pos_ui)
# Re enable toolbar
self.uimanager.get_widget('/toolbar').show()
self.uimanager.get_widget('/menubar/ViewMenu/ToggleToolbar').show()
# one PosApp is created everytime the pos is opened. If we dont
# disconnect, the callback from this instance would still be called, but
# its no longer valid.
CloseLoanWizardFinishEvent.disconnect(self._on_CloseLoanWizardFinishEvent)
def setup_focus(self):
self.barcode.grab_focus()
def can_change_application(self):
# Block POS application if we are in the middle of a sale.
can_change_application = not self._sale_started
if not can_change_application:
if yesno(_('You must finish the current sale before you change to '
'another application.'),
gtk.RESPONSE_NO, _("Cancel sale"), _("Finish sale")):
self._cancel_order(show_confirmation=False)
return True
return can_change_application
def can_close_application(self):
can_close_application = not self._sale_started
if not can_close_application:
if yesno(_('You must finish or cancel the current sale before you '
'can close the POS application.'),
gtk.RESPONSE_NO, _("Cancel sale"), _("Finish sale")):
self._cancel_order(show_confirmation=False)
return True
return can_close_application
def get_columns(self):
return [Column('code', title=_('Reference'),
data_type=str, width=130, justify=gtk.JUSTIFY_RIGHT),
Column('full_description',
title=_('Description'), data_type=str, expand=True,
searchable=True, ellipsize=pango.ELLIPSIZE_END),
Column('price', title=_('Price'), data_type=currency,
width=110, justify=gtk.JUSTIFY_RIGHT),
Column('quantity_unit', title=_('Quantity'), data_type=unicode,
width=110, justify=gtk.JUSTIFY_RIGHT),
Column('total', title=_('Total'), data_type=currency,
justify=gtk.JUSTIFY_RIGHT, width=100)]
def set_open_inventory(self):
self.set_sensitive(self._inventory_widgets, False)
@public(since="1.5.0")
def add_sale_item(self, item):
"""Add a TemporarySaleItem item to the sale.
:param item: a `temporary item <TemporarySaleItem>` to add to the sale.
If the caller wants to store extra information about the sold items,
it can create a subclass of TemporarySaleItem and pass that class
here. This information will propagate when <POSConfirmSaleEvent> is
emitted.
"""
assert isinstance(item, TemporarySaleItem)
self._update_added_item(item)
#
# Private
#
def _setup_printer(self):
self._printer = FiscalPrinterHelper(self.store,
parent=self)
self._printer.connect('till-status-changed',
self._on_PrinterHelper__till_status_changed)
self._printer.connect('ecf-changed',
self._on_PrinterHelper__ecf_changed)
self._printer.setup_midnight_check()
def _setup_proxies(self):
self.sellableitem_proxy = self.add_proxy(
Settable(quantity=Decimal(1)), ['quantity'])
def _update_parameter_widgets(self):
self.delivery_button.props.visible = sysparam.get_bool('HAS_DELIVERY_MODE')
window = self.get_toplevel()
if sysparam.get_bool('POS_FULL_SCREEN'):
window.fullscreen()
push_fullscreen(window)
else:
pop_fullscreen(window)
window.unfullscreen()
for widget in [self.TillOpen, self.TillClose, self.TillVerify]:
widget.set_visible(not sysparam.get_bool('POS_SEPARATE_CASHIER'))
if sysparam.get_bool('CONFIRM_SALES_ON_TILL'):
confirm_label = _("_Close")
else:
confirm_label = _("_Checkout")
button_set_image_with_label(self.checkout_button,
gtk.STOCK_APPLY, confirm_label)
def _setup_widgets(self):
self._inventory_widgets = [self.barcode, self.quantity,
self.sale_items, self.advanced_search,
self.checkout_button, self.NewTrade,
self.LoanClose, self.WorkOrderClose]
self.register_sensitive_group(self._inventory_widgets,
lambda: not self.has_open_inventory())
self.stoq_logo.set_from_pixbuf(render_logo_pixbuf('pos'))
self.order_total_label.set_size('xx-large')
self.order_total_label.set_bold(True)
self._create_context_menu()
self.quantity.set_digits(3)
self._image_slave = SellableImageViewer(size=(175, 175))
self.attach_slave('image_holder', self._image_slave)
self.details_lbl.set_ellipsize(pango.ELLIPSIZE_END)
self.extra_details_lbl.set_ellipsize(pango.ELLIPSIZE_END)
self.details_box.set_visible(False)
self.DetailsViewer.set_active(
api.user_settings.get('pos-show-details-viewer', True))
def _create_context_menu(self):
menu = ContextMenu()
item = ContextMenuItem(gtk.STOCK_ADD)
item.connect('activate', self._on_context_add__activate)
menu.append(item)
item = ContextMenuItem(gtk.STOCK_REMOVE)
item.connect('activate', self._on_context_remove__activate)
item.connect('can-disable', self._on_context_remove__can_disable)
menu.append(item)
self.sale_items.set_context_menu(menu)
menu.show_all()
def _update_totals(self):
subtotal = self._get_subtotal()
text = _(u"Total: %s") % converter.as_string(currency, subtotal)
self.order_total_label.set_text(text)
def _update_added_item(self, sale_item, new_item=True):
"""Insert or update a klist item according with the new_item
argument
"""
if new_item:
if self._coupon_add_item(sale_item) == -1:
return
self.sale_items.append(sale_item)
else:
self.sale_items.update(sale_item)
self.sale_items.select(sale_item)
# Reset all the widgets for adding a new sellable.
self.barcode.set_text('')
self.barcode.grab_focus()
self._reset_quantity_proxy()
self._update_totals()
self._sellable = None
self._batch = None
if self._confirm_quantity:
self.sellable_description.set_text('')
def _update_list(self, sellable, batch=None):
assert isinstance(sellable, Sellable)
try:
sellable.check_taxes_validity()
except TaxError as strerr:
# If the sellable icms taxes are not valid, we cannot sell it.
warning(str(strerr))
return
quantity = self.sellableitem_proxy.model.quantity
if sellable.product:
self._add_product_sellable(sellable, quantity, batch=batch)
elif sellable.service:
self._add_service_sellable(sellable, quantity)
def _add_service_sellable(self, sellable, quantity):
sale_item = TemporarySaleItem(sellable=sellable,
quantity=quantity)
with api.new_store() as store:
rv = self.run_dialog(ServiceItemEditor, store, sale_item)
if not rv:
return
self._update_added_item(sale_item)
def _add_product_sellable(self, sellable, quantity, batch=None):
product = sellable.product
if product.storable and not batch and product.storable.is_batch:
available_batches = list(product.storable.get_available_batches(
api.get_current_branch(self.store)))
# The trivial case, where there's just one batch, use it directly
if len(available_batches) == 1:
batch = available_batches[0]
sale_item = TemporarySaleItem(sellable=sellable,
quantity=quantity,
batch=batch)
self._update_added_item(sale_item)
return
rv = self.run_dialog(BatchDecreaseSelectionDialog, self.store,
model=sellable.product_storable,
quantity=quantity)
if not rv:
return
for batch, b_quantity in rv.items():
sale_item = TemporarySaleItem(sellable=sellable,
quantity=b_quantity,
batch=batch)
self._update_added_item(sale_item)
else:
sale_item = TemporarySaleItem(sellable=sellable,
quantity=quantity,
batch=batch)
self._update_added_item(sale_item)
def _get_subtotal(self):
return currency(sum([item.total for item in self.sale_items]))
def _get_sellable_and_batch(self):
text = self.barcode.get_text()
# There is already a sellable selected and codebar not changed.
# Return it instead.
if self._sellable and not text:
return self._sellable, self._batch
if not text:
raise StoqlibError("_get_sellable_and_batch needs a barcode")
text = unicode(text)
fmt = api.sysparam.get_int('SCALE_BARCODE_FORMAT')
# Check if this barcode is from a scale
barinfo = parse_barcode(text, fmt)
if barinfo:
text = barinfo.code
weight = barinfo.weight
batch = None
query = Sellable.status == Sellable.STATUS_AVAILABLE
# FIXME: Put this logic for getting the sellable based on
# barcode/code/batch_number on domain. Note that something very
# simular is done on abstractwizard.py
sellable = self.store.find(
Sellable, And(query, Lower(Sellable.barcode) == text.lower())).one()
# If the barcode didnt match, maybe the user typed the product code
if not sellable:
sellable = self.store.find(
Sellable, And(query, Lower(Sellable.code) == text.lower())).one()
# If none of the above found, try to get the batch number
if not sellable:
query = Lower(StorableBatch.batch_number) == text.lower()
batch = self.store.find(StorableBatch, query).one()
if batch:
sellable = batch.storable.product.sellable
if not sellable.is_available:
# If the sellable is not available, reset both
sellable = None
batch = None
# The user can't add the parent product of a grid directly to the sale.
# TODO: Display a dialog to let the user choose an specific grid product.
if sellable and sellable.product and sellable.product.is_grid:
sellable = None
# If the barcode has the price information, we need to calculate the
# corresponding weight.
if barinfo and sellable and barinfo.mode == BarcodeInfo.MODE_PRICE:
weight = barinfo.price / sellable.price
if barinfo and sellable:
self.quantity.set_value(weight)
return sellable, batch
def _select_first_item(self):
if len(self.sale_items):
# XXX Probably kiwi should handle this for us. Waiting for
# support
self.sale_items.select(self.sale_items[0])
def _set_sale_sensitive(self, value):
# Enable/disable the part of the ui that is used for sales,
# usually manipulated when printer information changes.
widgets = [self.barcode, self.quantity, self.sale_items,
self.advanced_search, self.PaymentReceive]
self.set_sensitive(widgets, value)
if value:
self.barcode.grab_focus()
def _disable_printer_ui(self):
self._set_sale_sensitive(False)
widgets = [self.TillOpen, self.TillClose, self.TillVerify]
self.set_sensitive(widgets, False)
text = _(u"POS operations requires a connected fiscal printer.")
self.till_status_label.set_text(text)
def _till_status_changed(self, closed, blocked):
def large(s):
return '<span weight="bold" size="xx-large">%s</span>' % (
api.escape(s), )
if closed:
text = large(_("Till closed"))
if not blocked:
text += '\n\n<span size="large"><a href="open-till">%s</a></span>' % (
api.escape(_('Open till')))
elif blocked:
text = large(_("Till blocked"))
else:
text = large(_("Till open"))
self.till_status_label.set_use_markup(True)
self.till_status_label.set_justify(gtk.JUSTIFY_CENTER)
self.till_status_label.set_markup(text)
self.set_sensitive([self.TillOpen], closed)
self.set_sensitive([self.TillVerify, self.NewTrade,
self.LoanClose, self.WorkOrderClose],
not closed and not blocked)
self.set_sensitive([self.TillClose],
not closed or blocked)
self._set_sale_sensitive(not closed and not blocked)
def _update_widgets(self):
has_sale_items = len(self.sale_items) >= 1
self.set_sensitive((self.checkout_button, self.remove_item_button,
self.NewDelivery,
self.ConfirmOrder), has_sale_items)
# We can cancel an order whenever we have a coupon opened.
self.set_sensitive([self.CancelOrder, self.DetailsViewer],
self._sale_started)
has_products = False
has_services = False
for sale_item in self.sale_items:
if sale_item and sale_item.sellable.product:
has_products = True
if sale_item and sale_item.service:
has_services = True
if has_products and has_services:
break
self.set_sensitive([self.delivery_button], has_products)
self.set_sensitive([self.NewDelivery], has_sale_items)
sale_item = self.sale_items.get_selected()
if sale_item is not None and sale_item.service:
# We are fetching DELIVERY_SERVICE into the sale_items' store
# instead of the default store to avoid accidental commits.
can_edit = not sysparam.compare_object('DELIVERY_SERVICE', sale_item.service)
else:
can_edit = False
self.set_sensitive([self.edit_item_button], can_edit)
self.set_sensitive([self.remove_item_button],
sale_item is not None and sale_item.can_remove)
self.set_sensitive((self.checkout_button,
self.ConfirmOrder), has_products or has_services)
self.till_status_box.set_visible(not self._sale_started)
self.sale_items_pane.set_visible(self._sale_started)
self._update_totals()
self._update_buttons()
self._update_sellable_details()
def _update_sellable_details(self):
sale_item = self.sale_items.get_selected()
sellable = sale_item and sale_item.sellable
self._image_slave.set_sellable(sellable)
if sale_item:
markup = '<b>%s</b>\n%s x %s' % (
api.escape(sale_item.description),
api.escape(format_quantity(sale_item.quantity)),
api.escape(get_formatted_price(sale_item.price)))
if sellable.service:
fix_date = (sale_item.estimated_fix_date.strftime('%x')
if sale_item.estimated_fix_date else '')
extra_markup_parts = [
(_("Estimated fix date"), fix_date),
(_("Notes"), sale_item.notes)]
elif sellable.product:
product = sellable.product
manufacturer = (product.manufacturer.name if
product.manufacturer else '')
extra_markup_parts = [
(_("Manufacturer"), manufacturer),
(_("Brand"), product.brand),
(_("Family"), product.family),
(_("Model"), product.model),
(_("Width"), product.width or ''),
(_("Height"), product.height or ''),
(_("Depth"), product.depth or ''),
(_("Weight"), product.weight or '')]
extra_markup = '\n'.join(
'<b>%s</b>: %s' % (api.escape(label), api.escape(str(text)))
for label, text in extra_markup_parts if text)
else:
markup = ''
extra_markup = ''
self.details_lbl.set_markup(markup)
self.details_lbl.set_tooltip_markup(markup)
self.extra_details_lbl.set_markup(extra_markup)
self.extra_details_lbl.set_tooltip_markup(extra_markup)
def _has_sellable(self):
return bool(self.barcode.get_text().strip() != '' or self._sellable)
def _update_buttons(self):
has_quantity = self._read_quantity() > 0
has_sellable = self._has_sellable()
self.set_sensitive([self.add_button], has_sellable and has_quantity)
self.set_sensitive([self.advanced_search], has_quantity)
def _read_quantity(self):
try:
quantity = self.quantity.read()
except ValidationError:
quantity = 0
return quantity
def _read_scale(self, sellable):
data = read_scale_info(self.store)
self.quantity.set_value(data.weight)
def _run_advanced_search(self, message=None, confirm_quantity=False):
search_str = self.barcode.get_text()
sellable_view_item = self.run_dialog(
SaleSellableSearch,
self.store,
search_str=search_str,
sale_items=self.sale_items,
quantity=self.sellableitem_proxy.model.quantity,
info_message=message)
if not sellable_view_item:
self.barcode.grab_focus()
return
sellable = sellable_view_item.sellable
if confirm_quantity:
self._set_selected_sellable(sellable)
self.quantity.grab_focus()
else:
self._add_sellable(sellable)
def _reset_quantity_proxy(self):
self.sellableitem_proxy.model.quantity = Decimal(1)
self.sellableitem_proxy.update('quantity')
self.sellableitem_proxy.model.price = None
def _get_deliverable_items(self):
"""Returns a list of sale items which can be delivered"""
return [item for item in self.sale_items
if item.sellable.product is not None]
def _check_delivery_removed(self, sale_item):
# If a delivery was removed, we need to remove all
# the references to it eg self._delivery
if (sale_item.sellable ==
sysparam.get_object(self.store, 'DELIVERY_SERVICE').sellable):
self._delivery = None
#
# Sale Order operations
#
def _add_sale_item(self, confirm_quantity):
"""Try to create a sale_item based on the barcode field.
:param confirm_quantity: When True, instead of adding the sellable, we
will move to the quantity field. Otherwise, we will just add the
sellable.
"""
sellable, batch = self._get_sellable_and_batch()
if not sellable:
message = (_("The barcode '%s' does not exist. "
"Searching for a product instead...")
% self.barcode.get_text())
self._run_advanced_search(message, confirm_quantity)
return
if confirm_quantity:
self._set_selected_sellable(sellable, batch)
self.quantity.grab_focus()
else:
self._add_sellable(sellable, batch=batch)
self._update_widgets()
def _add_sellable(self, sellable, batch=None):
quantity = self._read_quantity()
if quantity == 0:
return
if not sellable.is_valid_quantity(quantity):
warning(_(u"You cannot sell fractions of this product. "
u"The '%s' unit does not allow that") %
sellable.unit_description)
return
if sellable.product:
# If the sellable has a weight unit specified and we have a scale
# configured for this station, go and check what the scale says.
if (sellable and sellable.unit and
sellable.unit.unit_index == UnitType.WEIGHT and
self._scale_settings):
self._read_scale(sellable)
storable = sellable.product_storable
if storable is not None:
if not self._check_available_stock(storable, sellable):
info(_("You cannot sell more items of product %s. "
"The available quantity is not enough.") %
sellable.get_description())
self.barcode.set_text('')
self.barcode.grab_focus()
return
self._update_list(sellable, batch=batch)
def _check_available_stock(self, storable, sellable):
branch = api.get_current_branch(self.store)
available = storable.get_balance_for_branch(branch)
# Items that were already decreased should not be considered here
added = sum([sale_item.quantity - sale_item.quantity_decreased
for sale_item in self.sale_items
# FIXME: We are using .id to workaround a problem when
# those sellables are not on the same store.
# See the fixme on self.checkout and fix this together
if sale_item.sellable.id == sellable.id])
added += self.sellableitem_proxy.model.quantity
return available - added >= 0
def _clear_order(self):
log.info("Clearing order")
self._sale_started = False
self.sale_items.clear()
widgets = [self.search_box, self.list_vbox, self.CancelOrder,
self.PaymentReceive]
self.set_sensitive(widgets, True)
self._suggested_client = None
self._delivery = None
self._clear_trade()
self._reset_quantity_proxy()
self.barcode.set_text('')
self._update_widgets()
# store may already been closed on checkout
if self._current_store and not self._current_store.obsolete:
self._current_store.rollback(close=True)
self._current_store = None
def _clear_trade(self, remove=False):
if self._trade and remove:
self._trade.remove()
self._trade = None
self._remove_trade_infobar()
def _edit_sale_item(self, sale_item):
if sale_item.service:
if sysparam.compare_object('DELIVERY_SERVICE', sale_item.service):
self._edit_delivery()
return
with api.new_store() as store:
model = self.run_dialog(ServiceItemEditor, store, sale_item)
if model:
self.sale_items.update(sale_item)
else:
# Do not raise any exception here, since this method can be called
# when the user activate a row with product in the sellables list.
return
def _cancel_order(self, show_confirmation=True):
"""
Cancels the currently opened order.
@returns: True if the order was canceled, otherwise false
"""
if len(self.sale_items) and show_confirmation:
if yesno(_("This will cancel the current order. Are you sure?"),
gtk.RESPONSE_NO, _("Don't cancel"), _(u"Cancel order")):
return False
log.info("Cancelling coupon")
if not sysparam.get_bool('CONFIRM_SALES_ON_TILL'):
if self._coupon:
self._coupon.cancel()
self._coupon = None
self._clear_order()
return True
def _create_delivery(self):
delivery_param = sysparam.get_object(self.store, 'DELIVERY_SERVICE')
if delivery_param.sellable in self.sale_items:
self._delivery = delivery_param.sellable
delivery = self._edit_delivery()
if delivery:
self._add_delivery_item(delivery, delivery_param.sellable)
self._delivery = delivery
def _edit_delivery(self):
"""Edits a delivery, but do not allow the price to be changed.
If there's no delivery, create one.
@returns: The delivery
"""
# FIXME: Canceling the editor still saves the changes.
return self.run_dialog(CreateDeliveryEditor, self.store,
self._delivery,
sale_items=self._get_deliverable_items())
def _add_delivery_item(self, delivery, delivery_sellable):
for sale_item in self.sale_items:
if sale_item.sellable == delivery_sellable:
sale_item.price = delivery.price
sale_item.notes = delivery.notes
sale_item.estimated_fix_date = delivery.estimated_fix_date
self._delivery_item = sale_item
new_item = False
break
else:
self._delivery_item = TemporarySaleItem(sellable=delivery_sellable,
quantity=1,
notes=delivery.notes,
price=delivery.price)
self._delivery_item.estimated_fix_date = delivery.estimated_fix_date
new_item = True
self._update_added_item(self._delivery_item,
new_item=new_item)
def _create_sale(self, store):
user = api.get_current_user(store)
branch = api.get_current_branch(store)
salesperson = user.person.sales_person
cfop_id = api.sysparam.get_object_id('DEFAULT_SALES_CFOP')
nature = api.sysparam.get_string('DEFAULT_OPERATION_NATURE')
group = PaymentGroup(store=store)
sale = Sale(store=store,
branch=branch,
salesperson=salesperson,
group=group,
cfop_id=cfop_id,
coupon_id=None,
operation_nature=nature)
if self._delivery:
sale.client = store.fetch(self._delivery.client)
sale.storeporter = store.fetch(self._delivery.transporter)
delivery = Delivery(
store=store,
address=store.fetch(self._delivery.address),
transporter=store.fetch(self._delivery.transporter),
)
else:
delivery = None
sale.client = self._suggested_client
for fake_sale_item in self.sale_items:
sale_item = sale.add_sellable(
store.fetch(fake_sale_item.sellable),
price=fake_sale_item.price, quantity=fake_sale_item.quantity,
quantity_decreased=fake_sale_item.quantity_decreased,
batch=store.fetch(fake_sale_item.batch))
sale_item.notes = fake_sale_item.notes
sale_item.estimated_fix_date = fake_sale_item.estimated_fix_date
if delivery and fake_sale_item.deliver:
delivery.add_item(sale_item)
elif delivery and fake_sale_item == self._delivery_item:
delivery.service_item = sale_item
return sale
def _set_selected_sellable(self, sellable, batch=None):
"""Saves the selected sellable for adding later.
The user has selected a sellable, but he is still going to confirm
the quantity. We should have what he has selected to add later.
"""
self._sellable = sellable
self._batch = batch
self.sellable_description.set_text(sellable.description)
self.barcode.set_text('')
self._update_buttons()
@public(since="1.5.0")
def checkout(self, cancel_clear=False):
"""Initiates the sale wizard to confirm sale.
:param cancel_clear: If cancel_clear is true, the sale will be cancelled
if the checkout is cancelled.
"""
assert len(self.sale_items) >= 1
# FIXME: We should create self._current_store when adding the first
# item, so we can simplify a lot of code on this module by using it
# directly. The way it is now, most of the items will come from
# self.store (so we need to fetch them to the store we define bellow)
# and some from self._current_store (closed loan items, closed work
# order items, etc)
if self._current_store:
store = self._current_store
savepoint = 'before_run_fiscalprinter_confirm'
store.savepoint(savepoint)
else:
store = api.new_store()
savepoint = None
if self._trade:
subtotal = self._get_subtotal()
returned_total = self._trade.returned_total
if subtotal < returned_total:
info(_("Traded value is greater than the new sale's value. "
"Please add more items or return it in Sales app, "
"then make a new sale"))
return
if (sysparam.get_bool('USE_TRADE_AS_DISCOUNT') and
subtotal == returned_total):
info(_("Traded value is equal to the new sale's value. "
"Please add more items or return it in Sales app, "
"then make a new sale"))
return
sale = self._create_sale(store)
self._trade.new_sale = sale
self._trade.trade()
else:
sale = self._create_sale(store)
if sysparam.get_bool('CONFIRM_SALES_ON_TILL'):
sale.order()
store.commit()
else:
assert self._coupon
ordered = self._coupon.confirm(sale, store, savepoint,
subtotal=self._get_subtotal())
# Dont call store.confirm() here, since coupon.confirm()
# above already did it
if not ordered:
# FIXME: Move to TEF plugin
manager = get_plugin_manager()
if manager.is_active('tef') or cancel_clear:
self._cancel_order(show_confirmation=False)
elif not self._current_store:
# Just do that if a store was created above and
# if _cancel_order wasn't called (it closes the connection)
store.rollback(close=True)
return
log.info("Checking out")
self._coupon = None
POSConfirmSaleEvent.emit(sale, self.sale_items[:])
# We must close the connection only after the event is emmited, since it
# may use value from the sale that will become invalid after it is
# closed
store.close()
self._clear_order()
def _remove_selected_item(self):
sale_item = self.sale_items.get_selected()
assert sale_item.can_remove
self._coupon_remove_item(sale_item)
self.sale_items.remove(sale_item)
self._check_delivery_removed(sale_item)
self._select_first_item()
self._update_widgets()
self.barcode.grab_focus()
def _checkout_or_add_item(self):
# This is called when the user activates the barcode field.
search_str = self.barcode.get_text()
if search_str == '':
# The user pressed enter with an empty string. Maybe start checkout
checkout = True
need_confirmation = sysparam.get_bool('CONFIRM_SALES_ON_TILL')
if (need_confirmation and not
yesno(_('Close the order?'), gtk.RESPONSE_NO, _('Confirm'),
_("Don't confirm"))):
checkout = False
if len(self.sale_items) >= 1 and checkout:
self.checkout()
else:
# The user typed something. Try to add the sellable.
# In case there was an already selected sellable, we should reset
# it, since the user may have changed what he is searching for.
self._sellable = None
self._batch = None
self._add_sale_item(confirm_quantity=self._confirm_quantity)
def _remove_trade_infobar(self):
if not self._trade_infobar:
return
self._trade_infobar.destroy()
self._trade_infobar = None
def _show_trade_infobar(self, trade):
self._remove_trade_infobar()
if not trade:
return
button = gtk.Button(_("Cancel trade"))
button.connect('clicked', self._on_remove_trade_button__clicked)
value = converter.as_string(currency, self._trade.returned_total)
msg = _("There is a trade with value %s in progress...\n"
"When checking out, it will be used as part of "
"the payment.") % (value, )
self._trade_infobar = self.window.add_info_bar(
gtk.MESSAGE_INFO, msg, action_widget=button)
#
# Coupon related
#
def _open_coupon(self):
coupon = self._printer.create_coupon()
if coupon:
while not coupon.open():
if not yesno(
_("It is not possible to start a new sale if the "
"fiscal coupon cannot be opened."),
gtk.RESPONSE_YES, _("Try again"), _("Cancel sale")):
return None
self.set_sensitive([self.PaymentReceive], False)
return coupon
def _coupon_add_item(self, sale_item):
"""Adds an item to the coupon.
Should return -1 if the coupon was not added, but will return None if
CONFIRM_SALES_ON_TILL is true
See :class:`stoqlib.gui.fiscalprinter.FiscalCoupon` for more information
"""
self._sale_started = True
if sysparam.get_bool('CONFIRM_SALES_ON_TILL'):
return
if self._coupon is None:
coupon = self._open_coupon()
if not coupon:
return -1
self._coupon = coupon
return self._coupon.add_item(sale_item)
def _coupon_remove_item(self, sale_item):
if sysparam.get_bool('CONFIRM_SALES_ON_TILL'):
return
assert self._coupon
self._coupon.remove_item(sale_item)
def _close_till(self):
if self._sale_started:
if not yesno(_('You must finish or cancel the current sale before '
'you can close the till.'),
gtk.RESPONSE_NO, _("Cancel sale"), _("Finish sale")):
return
self._cancel_order(show_confirmation=False)
self._printer.close_till()
#
# Actions
#
def on_CancelOrder__activate(self, action):
self._cancel_order()
def on_Clients__activate(self, action):
self.run_dialog(ClientSearch, self.store, hide_footer=True)
def on_Sales__activate(self, action):
with api.new_store() as store:
self.run_dialog(SaleWithToolbarSearch, store)
def on_SoldItemsByBranchSearch__activate(self, action):
self.run_dialog(SoldItemsByBranchSearch, self.store)
def on_ProductSearch__activate(self, action):
self.run_dialog(ProductSearch, self.store, hide_footer=True,
hide_toolbar=True, hide_cost_column=True)
def on_ServiceSearch__activate(self, action):
self.run_dialog(ServiceSearch, self.store, hide_toolbar=True,
hide_cost_column=True)
def on_DeliverySearch__activate(self, action):
self.run_dialog(DeliverySearch, self.store)
def on_ConfirmOrder__activate(self, action):
self.checkout()
def on_NewDelivery__activate(self, action):
self._create_delivery()
def on_PaymentReceive__activate(self, action):
self.run_dialog(PaymentReceivingSearch, self.store)
def on_TillClose__activate(self, action):
self._close_till()
def on_TillOpen__activate(self, action):
self._printer.open_till()
def on_TillVerify__activate(self, action):
self._printer.verify_till()
def on_DetailsViewer__activate(self, button):
self.details_box.set_visible(button.get_active())
def on_LoanClose__activate(self, action):
if self.check_open_inventory():
return
if self._current_store:
store = self._current_store
store.savepoint('before_run_wizard_closeloan')
else:
store = api.new_store()
rv = self.run_dialog(CloseLoanWizard, store, create_sale=False,
require_sale_items=True)
if rv:
if self._suggested_client is None:
# The loan close wizard lets to close more than one loan at
# the same time (but only from the same client and branch)
self._suggested_client = rv[0].client
self._current_store = store
elif self._current_store:
store.rollback_to_savepoint('before_run_wizard_closeloan')
else:
store.rollback(close=True)
def on_WorkOrderClose__activate(self, action):
if self.check_open_inventory():
return
if self._current_store:
store = self._current_store
store.savepoint('before_run_search_workorder')
else:
store = api.new_store()
rv = self.run_dialog(WorkOrderFinishedSearch, store,
double_click_confirm=True)
if rv:
work_order = rv.work_order
for item in work_order.order_items:
self.add_sale_item(
TemporarySaleItem(sellable=item.sellable,
quantity=item.quantity,
quantity_decreased=item.quantity_decreased,
price=item.price,
can_remove=False))
work_order.close()
if self._suggested_client is None:
self._suggested_client = work_order.client
self._current_store = store
elif self._current_store:
store.rollback_to_savepoint('before_run_search_workorder')
else:
store.rollback(close=True)
def on_NewTrade__activate(self, action):
if self._trade:
if yesno(_("There is already a trade in progress... Do you "
"want to cancel it and start a new one?"),
gtk.RESPONSE_NO, _("Cancel trade"), _("Finish trade")):
self._clear_trade(remove=True)
else:
return
if self._current_store:
store = self._current_store
store.savepoint('before_run_wizard_saletrade')
else:
store = api.new_store()
trade = self.run_dialog(SaleTradeWizard, store)
if trade:
self._trade = trade
self._current_store = store
elif self._current_store:
store.rollback_to_savepoint('before_run_wizard_saletrade')
else:
store.rollback(close=True)
self._show_trade_infobar(trade)
#
# Other callbacks
#
def _on_context_add__activate(self, menu_item):
self._run_advanced_search(confirm_quantity=False)
def _on_context_remove__activate(self, menu_item):
self._remove_selected_item()
def _on_context_remove__can_disable(self, menu_item):
selected = self.sale_items.get_selected()
if selected and selected.can_remove:
return False
return True
def _on_remove_trade_button__clicked(self, button):
if yesno(_("Do you really want to cancel the trade in progress?"),
gtk.RESPONSE_NO, _("Cancel trade"), _("Don't cancel")):
self._clear_trade(remove=True)
def on_till_status_label__activate_link(self, button, link):
if link == 'open-till':
self._printer.open_till()
return True
def on_advanced_search__clicked(self, button):
self._run_advanced_search(confirm_quantity=self._confirm_quantity)
def on_add_button__clicked(self, button):
self._add_sale_item(confirm_quantity=False)
def on_barcode__activate(self, entry):
marker("enter pressed")
self._checkout_or_add_item()
def after_barcode__changed(self, editable):
self._update_buttons()
def on_quantity__activate(self, entry):
# Before activate, check if 'quantity' widget is valid and if we have a
# sellable selected
has_sellable = self._has_sellable()
if self.quantity.validate() is not ValueUnset and has_sellable:
self._add_sale_item(confirm_quantity=False)
def on_quantity__validate(self, entry, value):
self._update_buttons()
if value <= 0:
return ValidationError(_("Quantity must be a positive number"))
def on_sale_items__selection_changed(self, sale_items, sale_item):
self._update_widgets()
def on_remove_item_button__clicked(self, button):
self._remove_selected_item()
def on_delivery_button__clicked(self, button):
self._create_delivery()
def on_checkout_button__clicked(self, button):
self.checkout()
def on_edit_item_button__clicked(self, button):
item = self.sale_items.get_selected()
if item is None:
raise StoqlibError("You should have a item selected "
"at this point")
self._edit_sale_item(item)
def on_sale_items__row_activated(self, sale_items, sale_item):
self._edit_sale_item(sale_item)
def _on_PrinterHelper__till_status_changed(self, printer, closed, blocked):
self._till_status_changed(closed, blocked)
def _on_PrinterHelper__ecf_changed(self, printer, has_ecf):
# If we have an ecf, let the other events decide what to disable.
if has_ecf:
return
# We dont have an ecf. Disable till related operations
self._disable_printer_ui()
def _on_CloseLoanWizardFinishEvent(self, loans, sale, wizard):
for item in wizard.get_sold_items():
sellable, quantity, price = item
self.add_sale_item(
TemporarySaleItem(sellable=sellable, quantity=quantity,
# Quantity was already decreased on loan
quantity_decreased=quantity,
price=price, can_remove=False))
| andrebellafronte/stoq | stoq/gui/pos.py | Python | gpl-2.0 | 54,235 | [
"VisIt"
] | 9201a1280a9c804dbfb7ca153f1a3147a676dad93976bc00c1dcb8c5bf84fc4a |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract base for state space models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import numpy
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries import model
from tensorflow.contrib.timeseries.python.timeseries import model_utils
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import kalman_filter
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
class StateSpaceModelConfiguration(
collections.namedtuple(
typename="StateSpaceModelConfiguration",
field_names=[
"num_features", "use_observation_noise", "dtype",
"covariance_prior_fn", "bayesian_prior_weighting",
"filtering_postprocessor", "trainable_start_state",
"exogenous_noise_increases", "exogenous_noise_decreases",
"exogenous_feature_columns", "exogenous_update_condition",
"filtering_maximum_posterior_variance_ratio",
"filtering_minimum_posterior_variance",
"transition_covariance_initial_log_scale_bias",
"static_unrolling_window_size_threshold"])):
"""Configuration options for StateSpaceModels."""
def __new__(
cls,
num_features=1,
use_observation_noise=True,
dtype=dtypes.float32,
covariance_prior_fn=math_utils.log_noninformative_covariance_prior,
bayesian_prior_weighting=True,
filtering_postprocessor=None,
trainable_start_state=False,
exogenous_noise_increases=True,
exogenous_noise_decreases=False,
exogenous_feature_columns=None,
exogenous_update_condition=None,
filtering_maximum_posterior_variance_ratio=1e6,
filtering_minimum_posterior_variance=0.,
transition_covariance_initial_log_scale_bias=-5.,
static_unrolling_window_size_threshold=None):
"""Configuration options for StateSpaceModels.
Args:
num_features: Output dimension for model
use_observation_noise: If true, observations are modeled as noisy
functions of the current state. If false, observations are a
deterministic function of the current state. Only applicable to the
top-level model in an ensemble. Consider also changing the
transition_covariance_initial_log_scale_bias when disabling observation
noise, as its default setting assumes that observation noise is part of
the model.
dtype: The float dtype to use when defining the model.
covariance_prior_fn: A function mapping from a covariance matrix to a
scalar value (e.g. log likelihood) which can be summed across
matrices. Defaults to an independent Jeffreys prior on the diagonal
elements (regularizing as log(1. / variance)). To use a flat prior
(i.e. no regularization), set to `lambda _: 0.`. Defaults to
relatively uninformative priors on state transition and observation
noise, which have the effect of encouraging low-noise solutions which
provide confident predictions when possible. Without regularization,
transition noise tends to remain high, and multi-step predictions are
under-confident.
bayesian_prior_weighting: If True, weights the prior (covariance_prior_fn)
based on an estimate of the full dataset size. If False, weights it
based on the mini-batch window size, which (while statistically
improper) can lead to more desirable low-noise solutions in cases
where the full dataset is large enough to overwhelm the prior.
filtering_postprocessor: A FilteringStepPostprocessor object to use,
useful for ignoring anomalies in training data.
trainable_start_state: If True, start state may depend on trainable
Variables. If False, it will not.
exogenous_noise_increases: If True, exogenous regressors can add to model
state, increasing uncertainty. If both this parameter and
exogenous_noise_decreases are False, exogenous regressors are ignored.
exogenous_noise_decreases: If True, exogenous regressors can "set" model
state, decreasing uncertainty. If both this parameter and
exogenous_noise_increases are False, exogenous regressors are ignored.
exogenous_feature_columns: A list of tf.contrib.layers.FeatureColumn
objects (for example tf.contrib.layers.embedding_column) corresponding
to exogenous features which provide extra information to the model but
are not part of the series to be predicted. Passed to
tf.contrib.layers.input_from_feature_columns.
exogenous_update_condition: A function taking two Tensor arguments `times`
(shape [batch size]) and `features` (a dictionary mapping exogenous
feature keys to Tensors with shapes [batch size, ...]) and returning a
boolean Tensor with shape [batch size] indicating whether state should
be updated using exogenous features for each part of the batch. Where
it is False, no exogenous update is performed. If None (default),
exogenous updates are always performed. Useful for avoiding "leaky"
frequent exogenous updates when sparse updates are desired. Called
only during graph construction.
filtering_maximum_posterior_variance_ratio: The maximum allowed ratio of
two diagonal entries in a state covariance matrix just prior to
filtering. Lower values mean that filtering will be more numerically
stable, at the cost of artificially increasing estimated uncertainty
in some cases. This parameter can be important when learning a
transition matrix.
filtering_minimum_posterior_variance: The minimum diagonal value in a
state covariance matrix just prior to filtering, preventing numerical
instability due to deterministic beliefs (sometimes an issue when
learning transition matrices). This value should be set several orders
of magnitude below any expected minimum state uncertainty.
transition_covariance_initial_log_scale_bias: Controls the initial
tradeoff between the transition noise covariance matrix and the
observation noise covariance matrix, on a log scale (the elements of
the transition noise covariance matrix are proportional to `e^{X +
transition_covariance_initial_log_scale_bias}` where `X` is learned
and may depend on input statistics, observation noise covariance is
proportional to `e^{Y -
transition_covariance_initial_log_scale_bias}`). For models *with*
observation noise, -5 is a reasonable value. Models which do not use
observation noise, and are not part of an ensemble which does use
observation noise, should have this set to 0 or more to avoid
numerical issues due to filtering with too little noise.
static_unrolling_window_size_threshold: Only relevant for the top-level
StateSpaceModel in an ensemble; enables switching between static and
dynamic looping (if not None, default, meaning that no static
unrolling is performed) based on the window size (windows with this
size and smaller will have their graphs unrolled statically). See the
SequentialTimeSeriesModel constructor for details.
Returns:
A StateSpaceModelConfiguration object.
"""
if exogenous_feature_columns is None:
exogenous_feature_columns = []
return super(StateSpaceModelConfiguration, cls).__new__(
cls, num_features, use_observation_noise, dtype,
covariance_prior_fn, bayesian_prior_weighting,
filtering_postprocessor, trainable_start_state,
exogenous_noise_increases, exogenous_noise_decreases,
exogenous_feature_columns, exogenous_update_condition,
filtering_maximum_posterior_variance_ratio,
filtering_minimum_posterior_variance,
transition_covariance_initial_log_scale_bias,
static_unrolling_window_size_threshold)
class StateSpaceModel(model.SequentialTimeSeriesModel):
"""Base class for linear state space models.
Sub-classes can specify the model to be learned by overriding
get_state_transition, get_noise_transform, and get_observation_model.
See kalman_filter.py for a detailed description of the class of models covered
by StateSpaceModel.
Briefly, state space models are defined by a state transition equation:
state[t] = StateTransition * state[t-1] + NoiseTransform * StateNoise[t]
+ ExogenousNoiseIncreasing[t]
StateNoise[t] ~ Gaussian(0, StateNoiseCovariance)
ExogenousNoiseIncreasing[t] ~ Gaussian(ExogenousNoiseIncreasingMean[t],
ExogenousNoiseIncreasingCovariance[t])
And an observation model:
observation[t] = ObservationModel * state[t] + ObservationNoise[t]
ObservationNoise[t] ~ Gaussian(0, ObservationNoiseCovariance)
Additionally, exogenous regressors can act as observations, decreasing
uncertainty:
ExogenousNoiseDecreasingObservation[t] ~ Gaussian(
ExogenousNoiseDecreasingMean[t], ExogenousNoiseDecreasingCovariance[t])
Attributes:
kalman_filter: If initialize_graph has been called, the initialized
KalmanFilter to use for inference. None otherwise.
prior_state_mean: If initialize_graph has been called, a
Variable-parameterized Tensor with shape [state dimension];
the initial prior mean for one or more time series. None otherwise.
prior_state_var: If initialize_graph has been called, a
Variable-parameterized Tensor with shape [state dimension x state
dimension]; the initial prior covariance. None otherwise.
state_transition_noise_covariance: If initialize_graph has been called, a
Variable-parameterized Tensor with shape [state noise dimension x state
noise dimension] indicating the amount of noise added at each
transition.
"""
def __init__(self, configuration):
"""Initialize a state space model.
Args:
configuration: A StateSpaceModelConfiguration object.
"""
self._configuration = configuration
if configuration.filtering_postprocessor is not None:
filtering_postprocessor_names = (
configuration.filtering_postprocessor.output_names)
else:
filtering_postprocessor_names = []
super(StateSpaceModel, self).__init__(
train_output_names=(["mean", "covariance", "log_likelihood"]
+ filtering_postprocessor_names),
predict_output_names=["mean", "covariance"],
num_features=configuration.num_features,
dtype=configuration.dtype,
exogenous_feature_columns=configuration.exogenous_feature_columns,
exogenous_update_condition=configuration.exogenous_update_condition,
static_unrolling_window_size_threshold=
configuration.static_unrolling_window_size_threshold)
self._kalman_filter = None
self.prior_state_mean = None
self.prior_state_var = None
self.state_transition_noise_covariance = None
self._total_observation_count = None
self._observation_noise_covariance = None
# Capture the current variable scope and use it to define all model
# variables. Especially useful for ensembles, where variables may be defined
# for every component model in one function call, which would otherwise
# prevent the user from separating variables from different models into
# different scopes.
self._variable_scope = variable_scope.get_variable_scope()
def transition_power_noise_accumulator(self, num_steps):
r"""Sum a transitioned covariance matrix over a number of steps.
Computes
\sum_{i=0}^{num_steps - 1} (
state_transition^i
* state_transition_noise_covariance
* (state_transition^i)^T)
If special cases are available, overriding this function can lead to more
efficient inferences.
Args:
num_steps: A [...] shape integer Tensor with numbers of steps to compute
power sums for.
Returns:
The computed power sum, with shape [..., state dimension, state
dimension].
"""
# TODO(allenl): This general case should use cumsum if transition_to_powers
# can be computed in constant time (important for correlated ensembles,
# where transition_power_noise_accumulator special cases cannot be
# aggregated from member models).
noise_transform = ops.convert_to_tensor(self.get_noise_transform(),
self.dtype)
noise_transformed = math_ops.matmul(
math_ops.matmul(noise_transform,
self.state_transition_noise_covariance),
noise_transform,
transpose_b=True)
noise_additions = math_utils.power_sums_tensor(
math_ops.reduce_max(num_steps) + 1,
ops.convert_to_tensor(self.get_state_transition(), dtype=self.dtype),
noise_transformed)
return array_ops.gather(noise_additions, indices=num_steps)
def transition_to_powers(self, powers):
"""Raise the transition matrix to a batch of powers.
Computes state_transition^powers. If special cases are available, overriding
this function can lead to more efficient inferences.
Args:
powers: A [...] shape integer Tensor with powers to raise the transition
matrix to.
Returns:
The computed matrix powers, with shape [..., state dimension, state
dimension].
"""
return math_utils.matrix_to_powers(
ops.convert_to_tensor(self.get_state_transition(), dtype=self.dtype),
powers)
def _window_initializer(self, times, state):
"""Prepare to impute across the gaps in a window."""
_, _, priors_from_time = state
times = ops.convert_to_tensor(times)
priors_from_time = ops.convert_to_tensor(priors_from_time)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.reduce_all(priors_from_time <= times[:, 0]),
[priors_from_time, times[:, 0]],
summarize=100)
]):
times = array_ops.identity(times)
intra_batch_gaps = array_ops.reshape(times[:, 1:] - times[:, :-1], [-1])
starting_gaps = times[:, 0] - priors_from_time
# Pre-define transition matrices raised to powers (and their sums) for every
# gap in this window. This avoids duplicate computation (for example many
# steps will use the transition matrix raised to the first power) and
# batches the computation rather than doing it inside the per-step loop.
unique_gaps, _ = array_ops.unique(
array_ops.concat([intra_batch_gaps, starting_gaps], axis=0))
self._window_power_sums = self.transition_power_noise_accumulator(
unique_gaps)
self._window_transition_powers = self.transition_to_powers(unique_gaps)
self._window_gap_sizes = unique_gaps
def _lookup_window_caches(self, caches, indices):
_, window_power_ids = array_ops.unique(
array_ops.concat(
[
self._window_gap_sizes, math_ops.cast(
indices, self._window_gap_sizes.dtype)
],
axis=0))
all_gathered_indices = []
for cache in caches:
gathered_indices = array_ops.gather(
cache, window_power_ids[-array_ops.shape(indices)[0]:])
gathered_indices.set_shape(indices.get_shape().concatenate(
gathered_indices.get_shape()[-2:]))
all_gathered_indices.append(gathered_indices)
return all_gathered_indices
def _cached_transition_powers_and_sums(self, num_steps):
return self._lookup_window_caches(
caches=[self._window_transition_powers, self._window_power_sums],
indices=num_steps)
def _imputation_step(self, current_times, state):
"""Add state transition noise to catch `state` up to `current_times`.
State space models are inherently sequential, so we need to "predict
through" any missing time steps to catch up each element of the batch to its
next observation/prediction time.
Args:
current_times: A [batch size] Tensor of times to impute up to, not
inclusive.
state: A tuple of (mean, covariance, previous_times) having shapes
mean; [batch size x state dimension]
covariance; [batch size x state dimension x state dimension]
previous_times; [batch size]
Returns:
Imputed model state corresponding to the `state` argument.
"""
estimated_state, estimated_state_var, previous_times = state
catchup_times = current_times - previous_times
non_negative_assertion = control_flow_ops.Assert(
math_ops.reduce_all(catchup_times >= 0), [
"Negative imputation interval", catchup_times, current_times,
previous_times
],
summarize=100)
with ops.control_dependencies([non_negative_assertion]):
transition_matrices, transition_noise_sums = ( # pylint: disable=unbalanced-tuple-unpacking
self._cached_transition_powers_and_sums(catchup_times))
estimated_state = self._kalman_filter.predict_state_mean(
estimated_state, transition_matrices)
estimated_state_var = self._kalman_filter.predict_state_var(
estimated_state_var, transition_matrices, transition_noise_sums)
return (estimated_state, estimated_state_var,
previous_times + catchup_times)
def _filtering_step(self, current_times, current_values, state, predictions):
"""Compute posteriors and accumulate one-step-ahead predictions.
Args:
current_times: A [batch size] Tensor for times for each observation.
current_values: A [batch size] Tensor of values for each observaiton.
state: A tuple of (mean, covariance, previous_times) having shapes
mean; [batch size x state dimension]
covariance; [batch size x state dimension x state dimension]
previous_times; [batch size]
predictions: A dictionary containing mean and covariance Tensors, the
output of _prediction_step.
Returns:
A tuple of (posteriors, outputs):
posteriors: Model state updated to take `current_values` into account.
outputs: The `predictions` dictionary updated to include "loss" and
"log_likelihood" entries (loss simply being negative log
likelihood).
"""
estimated_state, estimated_state_covariance, previous_times = state
observation_model = self.get_broadcasted_observation_model(current_times)
imputed_to_current_step_assert = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(current_times, previous_times)),
["Attempted to perform filtering without imputation/prediction"])
with ops.control_dependencies([imputed_to_current_step_assert]):
estimated_state_covariance = math_utils.clip_covariance(
estimated_state_covariance,
self._configuration.filtering_maximum_posterior_variance_ratio,
self._configuration.filtering_minimum_posterior_variance)
(filtered_state, filtered_state_covariance,
log_prob) = self._kalman_filter.do_filter(
estimated_state=estimated_state,
estimated_state_covariance=estimated_state_covariance,
predicted_observation=predictions["mean"],
predicted_observation_covariance=predictions["covariance"],
observation=current_values,
observation_model=observation_model,
observation_noise=self._observation_noise_covariance)
filtered_state = (filtered_state, filtered_state_covariance, current_times)
log_prob.set_shape(current_times.get_shape())
predictions["loss"] = -log_prob
predictions["log_likelihood"] = log_prob
if self._configuration.filtering_postprocessor is not None:
return self._configuration.filtering_postprocessor.process_filtering_step(
current_times=current_times,
current_values=current_values,
predicted_state=state,
filtered_state=filtered_state,
outputs=predictions)
return (filtered_state, predictions)
def _prediction_step(self, current_times, state):
"""Make a prediction based on `state`.
Computes predictions based on the current `state`, checking that it has
already been updated (in `_imputation_step`) to `current_times`.
Args:
current_times: A [batch size] Tensor for times to make predictions for.
state: A tuple of (mean, covariance, previous_times) having shapes
mean; [batch size x state dimension]
covariance; [batch size x state dimension x state dimension]
previous_times; [batch size]
Returns:
A tuple of (updated state, predictions):
updated state: Model state with added transition noise.
predictions: A dictionary with "mean" and "covariance", having shapes
"mean": [batch size x num features]
"covariance: [batch size x num features x num features]
"""
estimated_state, estimated_state_var, previous_times = state
advanced_to_current_assert = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(current_times, previous_times)),
["Attempted to predict without imputation"])
with ops.control_dependencies([advanced_to_current_assert]):
observation_model = self.get_broadcasted_observation_model(current_times)
predicted_obs, predicted_obs_var = (
self._kalman_filter.observed_from_state(
state_mean=estimated_state,
state_var=estimated_state_var,
observation_model=observation_model,
observation_noise=self._observation_noise_covariance))
predicted_obs_var.set_shape(
ops.convert_to_tensor(current_times).get_shape()
.concatenate([self.num_features, self.num_features]))
predicted_obs.set_shape(current_times.get_shape().concatenate(
(self.num_features,)))
predicted_obs_var.set_shape(current_times.get_shape().concatenate(
(self.num_features, self.num_features)))
predictions = {
"mean": predicted_obs,
"covariance": predicted_obs_var}
state = (estimated_state, estimated_state_var, current_times)
return (state, predictions)
def _exogenous_noise_decreasing(self, current_times, exogenous_values, state):
"""Update state with exogenous regressors, decreasing uncertainty.
Constructs a mean and covariance based on transformations of
`exogenous_values`, then performs Bayesian inference on the constructed
observation. This has the effect of lowering uncertainty.
This update refines or overrides previous inferences, useful for modeling
exogenous inputs which "set" state, e.g. we dumped boiling water on the
thermometer so we're pretty sure it's 100 degrees C.
Args:
current_times: A [batch size] Tensor of times for the exogenous values
being input.
exogenous_values: A [batch size x exogenous input dimension] Tensor of
exogenous values for each part of the batch.
state: A tuple of (mean, covariance, previous_times) having shapes
mean; [batch size x state dimension]
covariance; [batch size x state dimension x state dimension]
previous_times; [batch size]
Returns:
Updated state taking the exogenous regressors into account (with lower
uncertainty than the input state).
"""
estimated_state, estimated_state_covariance, previous_times = state
state_transition = ops.convert_to_tensor(
self.get_state_transition(), dtype=self.dtype)
state_dimension = state_transition.get_shape()[0].value
# Learning the observation model would be redundant since we transform
# `exogenous_values` to the state space via a linear transformation anyway.
observation_model = linalg_ops.eye(
state_dimension,
batch_shape=array_ops.shape(exogenous_values)[:-1],
dtype=self.dtype)
with variable_scope.variable_scope("exogenous_noise_decreasing_covariance"):
observation_noise = math_utils.transform_to_covariance_matrices(
exogenous_values, state_dimension)
with variable_scope.variable_scope(
"exogenous_noise_decreasing_observation"):
observation = layers.fully_connected(
exogenous_values, state_dimension, activation_fn=None)
# Pretend that we are making an observation with an observation model equal
# to the identity matrix (i.e. a direct observation of the latent state),
# with learned observation noise.
posterior_state, posterior_state_var = (
self._kalman_filter.posterior_from_prior_state(
prior_state=estimated_state,
prior_state_var=estimated_state_covariance,
observation=observation,
observation_model=observation_model,
predicted_observations=(
estimated_state,
# The predicted noise covariance is noise due to current state
# uncertainty plus noise learned based on the exogenous
# observation (a somewhat trivial call to
# self._kalman_filter.observed_from_state has been omitted).
observation_noise + estimated_state_covariance),
observation_noise=observation_noise))
return (posterior_state, posterior_state_var, previous_times)
def _exogenous_noise_increasing(self, current_times, exogenous_values, state):
"""Update state with exogenous regressors, increasing uncertainty.
Adds to the state mean a linear transformation of `exogenous_values`, and
increases uncertainty by constructing a covariance matrix based on
`exogenous_values` and adding it to the state covariance.
This update is useful for modeling changes relative to current state,
e.g. the furnace turned on so the temperature will be increasing at an
additional 1 degree per minute with some uncertainty, this uncertainty being
added to our current uncertainty in the per-minute change in temperature.
Args:
current_times: A [batch size] Tensor of times for the exogenous values
being input.
exogenous_values: A [batch size x exogenous input dimension] Tensor of
exogenous values for each part of the batch.
state: A tuple of (mean, covariance, previous_times) having shapes
mean; [batch size x state dimension]
covariance; [batch size x state dimension x state dimension]
previous_times; [batch size]
Returns:
Updated state taking the exogenous regressors into account (with higher
uncertainty than the input state).
"""
start_mean, start_covariance, previous_times = state
with variable_scope.variable_scope("exogenous_noise_increasing_mean"):
mean_addition = layers.fully_connected(
exogenous_values, start_mean.get_shape()[1].value, activation_fn=None)
state_dimension = start_covariance.get_shape()[1].value
with variable_scope.variable_scope("exogenous_noise_increasing_covariance"):
covariance_addition = (
math_utils.transform_to_covariance_matrices(
exogenous_values, state_dimension))
return (start_mean + mean_addition,
start_covariance + covariance_addition,
previous_times)
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Update state with exogenous regressors.
Allows both increases and decreases in uncertainty.
Args:
current_times: A [batch size] Tensor of times for the exogenous values
being input.
current_exogenous_regressors: A [batch size x exogenous input dimension]
Tensor of exogenous values for each part of the batch.
state: A tuple of (mean, covariance, previous_times) having shapes
mean; [batch size x state dimension]
covariance; [batch size x state dimension x state dimension]
previous_times; [batch size]
Returns:
Updated state taking the exogenous regressors into account.
"""
if self._configuration.exogenous_noise_decreases:
state = self._exogenous_noise_decreasing(
current_times, current_exogenous_regressors, state)
if self._configuration.exogenous_noise_increases:
state = self._exogenous_noise_increasing(
current_times, current_exogenous_regressors, state)
return state
def _loss_additions(self, times, values, mode):
"""Add regularization during training."""
if mode == estimator_lib.ModeKeys.TRAIN:
if (self._input_statistics is not None
and self._configuration.bayesian_prior_weighting):
normalization = 1. / math_ops.cast(
self._input_statistics.total_observation_count, self.dtype)
else:
# If there is no total observation count recorded, or if we are not
# doing a Bayesian prior weighting, assumes/pretends that the full
# dataset size is the window size.
normalization = 1. / math_ops.cast(
array_ops.shape(times)[1], self.dtype)
transition_contribution = ops.convert_to_tensor(
self._configuration.covariance_prior_fn(
self.state_transition_noise_covariance),
dtype=self.dtype)
if (self._configuration.use_observation_noise
and self._observation_noise_covariance is not None):
observation_contribution = ops.convert_to_tensor(
self._configuration.covariance_prior_fn(
self._observation_noise_covariance),
dtype=self.dtype)
regularization_sum = transition_contribution + observation_contribution
else:
regularization_sum = transition_contribution
return -normalization * regularization_sum
else:
return array_ops.zeros([], dtype=self.dtype)
def _variable_observation_transition_tradeoff_log(self):
"""Define a variable to trade off observation and transition noise."""
return variable_scope.get_variable(
name="observation_transition_tradeoff_log_scale",
initializer=constant_op.constant(
-self._configuration.transition_covariance_initial_log_scale_bias,
dtype=self.dtype),
dtype=self.dtype)
def _define_parameters(self, observation_transition_tradeoff_log=None):
"""Define extra model-specific parameters.
Models should wrap any variables defined here in the model's variable scope.
Args:
observation_transition_tradeoff_log: An ensemble-global parameter
controlling the tradeoff between observation noise and transition
noise. If its value is not None, component transition noise should scale
with e^-observation_transition_tradeoff_log.
"""
with variable_scope.variable_scope(self._variable_scope):
# A scalar which allows the optimizer to quickly shift from observation
# noise to transition noise (this value is subtracted from log transition
# noise and added to log observation noise).
if observation_transition_tradeoff_log is None:
self._observation_transition_tradeoff_log_scale = (
self._variable_observation_transition_tradeoff_log())
else:
self._observation_transition_tradeoff_log_scale = (
observation_transition_tradeoff_log)
self.state_transition_noise_covariance = (
self.get_state_transition_noise_covariance())
def _set_input_statistics(self, input_statistics=None):
super(StateSpaceModel, self).initialize_graph(
input_statistics=input_statistics)
def initialize_graph(self, input_statistics=None):
"""Define variables and ops relevant to the top-level model in an ensemble.
For generic model parameters, _define_parameters() is called recursively on
all members of an ensemble.
Args:
input_statistics: A math_utils.InputStatistics object containing input
statistics. If None, data-independent defaults are used, which may
result in longer or unstable training.
"""
self._set_input_statistics(input_statistics=input_statistics)
self._define_parameters()
with variable_scope.variable_scope(self._variable_scope):
self._observation_noise_covariance = ops.convert_to_tensor(
self.get_observation_noise_covariance(), dtype=self.dtype)
self._kalman_filter = kalman_filter.KalmanFilter(dtype=self.dtype)
(self.prior_state_mean,
self.prior_state_var) = self._make_priors()
def _make_priors(self):
"""Creates and returns model priors."""
prior_state_covariance = self.get_prior_covariance()
prior_state_mean = self.get_prior_mean()
return (prior_state_mean, prior_state_covariance)
def get_prior_covariance(self):
"""Constructs a variable prior covariance with data-based initialization.
Models should wrap any variables defined here in the model's variable scope.
Returns:
A two-dimensional [state dimension, state dimension] floating point Tensor
with a (positive definite) prior state covariance matrix.
"""
with variable_scope.variable_scope(self._variable_scope):
state_dimension = ops.convert_to_tensor(
self.get_state_transition()).get_shape()[0].value
if self._configuration.trainable_start_state:
base_covariance = math_utils.variable_covariance_matrix(
state_dimension, "prior_state_var",
dtype=self.dtype)
else:
return linalg_ops.eye(state_dimension, dtype=self.dtype)
if self._input_statistics is not None:
# Make sure initial latent value uncertainty is at least on the same
# scale as noise in the data.
covariance_multiplier = math_ops.reduce_max(
self._input_statistics.series_start_moments.variance)
return base_covariance * gen_math_ops.maximum(
covariance_multiplier, 1.0)
else:
return base_covariance
def get_prior_mean(self):
"""Constructs a Variable-parameterized prior mean.
Models should wrap any variables defined here in the model's variable scope.
Returns:
A one-dimensional floating point Tensor with shape [state dimension]
indicating the prior mean.
"""
with variable_scope.variable_scope(self._variable_scope):
state_transition = ops.convert_to_tensor(
self.get_state_transition(), dtype=self.dtype)
state_dimension = state_transition.get_shape()[0].value
return variable_scope.get_variable(
name="prior_state_mean",
shape=[state_dimension],
dtype=self.dtype,
trainable=self._configuration.trainable_start_state)
# TODO(allenl): It would be nice if the generation were done with TensorFlow
# ops, and if the model parameters were somehow set instead of being passed
# around in a dictionary. Maybe unconditional generation should be through a
# special set of initializers?
def random_model_parameters(self, seed=None):
if self.num_features != 1:
raise NotImplementedError("Generation for multivariate state space models"
" is not currently implemented.")
if seed:
numpy.random.seed(seed)
state_dimension, noise_dimension = ops.convert_to_tensor(
self.get_noise_transform()).get_shape().as_list()
transition_var = 1.0 / numpy.random.gamma(shape=10., scale=10.,
size=[noise_dimension])
initial_state = numpy.random.normal(size=[state_dimension])
params_dict = {}
if self.prior_state_mean is not None:
params_dict[self.prior_state_mean] = initial_state
if self.state_transition_noise_covariance is not None:
params_dict[self.state_transition_noise_covariance] = numpy.diag(
transition_var)
if self.prior_state_var is not None:
params_dict[self.prior_state_var] = numpy.zeros(
[state_dimension, state_dimension])
if self._configuration.use_observation_noise:
observation_var = 1.0 / numpy.random.gamma(shape=4, scale=4)
params_dict[self._observation_noise_covariance] = [[observation_var]]
return params_dict
def generate(self, number_of_series, series_length,
model_parameters=None, seed=None, add_observation_noise=None):
if seed is not None:
numpy.random.seed(seed)
if self.num_features != 1:
raise NotImplementedError("Generation for multivariate state space models"
" is not currently implemented.")
if add_observation_noise is None:
add_observation_noise = self._configuration.use_observation_noise
if model_parameters is None:
model_parameters = {}
transitions = ops.convert_to_tensor(
self.get_state_transition(), dtype=self.dtype).eval(
feed_dict=model_parameters)
noise_transform = ops.convert_to_tensor(self.get_noise_transform()).eval(
feed_dict=model_parameters)
noise_dimension = noise_transform.shape[1]
get_passed_or_trained_value = model_utils.parameter_switch(model_parameters)
transition_var = numpy.diag(get_passed_or_trained_value(
self.state_transition_noise_covariance))
transition_std = numpy.sqrt(transition_var)
if add_observation_noise:
observation_var = get_passed_or_trained_value(
self._observation_noise_covariance)[0][0]
observation_std = numpy.sqrt(observation_var)
initial_state = get_passed_or_trained_value(self.prior_state_mean)
current_state = numpy.tile(numpy.expand_dims(initial_state, 0),
[number_of_series, 1])
observations = numpy.zeros([number_of_series, series_length])
observation_models = self.get_broadcasted_observation_model(
times=math_ops.range(series_length)).eval(feed_dict=model_parameters)
for timestep, observation_model in enumerate(observation_models):
current_state = numpy.dot(current_state, transitions.T)
current_state += numpy.dot(
numpy.random.normal(
loc=numpy.zeros([number_of_series, noise_dimension]),
scale=numpy.tile(numpy.expand_dims(transition_std, 0),
[number_of_series, 1])),
noise_transform.T)
observation_mean = numpy.dot(current_state, observation_model[0].T)
if add_observation_noise:
observations[:, timestep] = numpy.random.normal(loc=observation_mean,
scale=observation_std)
else:
observations[:, timestep] = observation_mean
observations = numpy.expand_dims(observations, -1)
times = numpy.tile(
numpy.expand_dims(numpy.arange(observations.shape[1]), 0),
[observations.shape[0], 1])
return {TrainEvalFeatures.TIMES: times,
TrainEvalFeatures.VALUES: observations}
@abc.abstractmethod
def get_state_transition(self):
"""Specifies the state transition model to use.
Returns:
A [state dimension x state dimension] Tensor specifying how states
transition from one timestep to the next.
"""
pass
@abc.abstractmethod
def get_noise_transform(self):
"""Specifies the noise transition model to use.
Returns:
A [state dimension x state noise dimension] Tensor specifying how noise
(generated with shape [state noise dimension]) affects the model's state.
"""
pass
@abc.abstractmethod
def get_observation_model(self, times):
"""Specifies the observation model to use.
Args:
times: A [batch dimension] int32 Tensor with times for each part of the
batch, on which the observation model can depend.
Returns:
This function, when overridden, has three possible return values:
- A [state dimension] Tensor with a static, univariate observation
model.
- A [self.num_features x state dimension] static, multivariate model.
- A [batch dimension x self.num_features x state dimension] observation
model, which may depend on `times`.
See get_broadcasted_observation_model for details of the broadcasting.
"""
pass
def get_broadcasted_observation_model(self, times):
"""Broadcast this model's observation model if necessary.
The model can define a univariate observation model which will be broadcast
over both self.num_features and the batch dimension of `times`.
The model can define a multi-variate observation model which does not depend
on `times`, and it will be broadcast over the batch dimension of `times`.
Finally, the model can define a multi-variate observation model with a batch
dimension, which will not be broadcast.
Args:
times: A [batch dimension] int32 Tensor with times for each part of the
batch, on which the observation model can depend.
Returns:
A [batch dimension x self.num_features x state dimension] Tensor
specifying the observation model to use for each time in `times` and each
feature.
"""
unbroadcasted_model = ops.convert_to_tensor(
self.get_observation_model(times), dtype=self.dtype)
unbroadcasted_shape = (unbroadcasted_model.get_shape()
.with_rank_at_least(1).with_rank_at_most(3))
if unbroadcasted_shape.ndims is None:
# Pass through fully undefined shapes, but make sure they're rank 3 at
# graph eval time
assert_op = control_flow_ops.Assert(
math_ops.equal(array_ops.rank(unbroadcasted_model), 3),
[array_ops.shape(unbroadcasted_model)])
with ops.control_dependencies([assert_op]):
return array_ops.identity(unbroadcasted_model)
if unbroadcasted_shape.ndims == 1:
# Unbroadcasted shape [state dimension]
broadcasted_model = array_ops.tile(
array_ops.reshape(tensor=unbroadcasted_model, shape=[1, 1, -1]),
[array_ops.shape(times)[0], self.num_features, 1])
elif unbroadcasted_shape.ndims == 2:
# Unbroadcasted shape [num features x state dimension]
broadcasted_model = array_ops.tile(
array_ops.expand_dims(unbroadcasted_model, dim=0),
[array_ops.shape(times)[0], 1, 1])
elif unbroadcasted_shape.ndims == 3:
broadcasted_model = unbroadcasted_model
broadcasted_model.get_shape().assert_has_rank(3)
return broadcasted_model
def get_state_transition_noise_covariance(
self, minimum_initial_variance=1e-5):
state_noise_transform = ops.convert_to_tensor(
self.get_noise_transform(), dtype=self.dtype)
state_noise_dimension = state_noise_transform.get_shape()[1].value
if self._input_statistics is not None:
feature_variance = self._input_statistics.series_start_moments.variance
initial_transition_noise_scale = math_ops.log(
gen_math_ops.maximum(
math_ops.reduce_mean(feature_variance) / math_ops.cast(
self._input_statistics.total_observation_count, self.dtype),
minimum_initial_variance))
else:
initial_transition_noise_scale = 0.
# Generally high transition noise is undesirable; we want to set it quite
# low to start so that we don't need too much training to get to good
# solutions (i.e. with confident predictions into the future if possible),
# but not so low that training can't yield a high transition noise if the
# data demands it.
initial_transition_noise_scale -= (
self._observation_transition_tradeoff_log_scale)
return math_utils.variable_covariance_matrix(
state_noise_dimension, "state_transition_noise",
dtype=self.dtype,
initial_overall_scale_log=initial_transition_noise_scale)
def get_observation_noise_covariance(self, minimum_initial_variance=1e-5):
if self._configuration.use_observation_noise:
if self._input_statistics is not None:
# Get variance across the first few values in each batch for each
# feature, for an initial observation noise (over-)estimate.
feature_variance = self._input_statistics.series_start_moments.variance
else:
feature_variance = None
if feature_variance is not None:
feature_variance = gen_math_ops.maximum(feature_variance,
minimum_initial_variance)
return math_utils.variable_covariance_matrix(
size=self.num_features,
dtype=self.dtype,
name="observation_noise_covariance",
initial_diagonal_values=feature_variance,
initial_overall_scale_log=(
self._observation_transition_tradeoff_log_scale))
else:
return array_ops.zeros(
shape=[self.num_features, self.num_features],
name="observation_noise_covariance",
dtype=self.dtype)
def get_start_state(self):
"""Defines and returns a non-batched prior state and covariance."""
# TODO(allenl,vitalyk): Add an option for non-Gaussian priors once extended
# Kalman filtering is implemented (ideally any Distribution object).
if self._input_statistics is not None:
start_time = self._input_statistics.start_time
else:
start_time = array_ops.zeros([], dtype=dtypes.int64)
return (self.prior_state_mean,
self.prior_state_var,
start_time - 1)
def get_features_for_timesteps(self, timesteps):
"""Get features for a batch of timesteps. Default to no features."""
return array_ops.zeros([array_ops.shape(timesteps)[0], 0], dtype=self.dtype)
class StateSpaceEnsemble(StateSpaceModel):
"""Base class for combinations of state space models."""
def __init__(self, ensemble_members, configuration):
"""Initialize the ensemble by specifying its members.
Args:
ensemble_members: A list of StateSpaceModel objects which will be included
in this ensemble.
configuration: A StateSpaceModelConfiguration object.
"""
self._ensemble_members = ensemble_members
super(StateSpaceEnsemble, self).__init__(configuration=configuration)
def _set_input_statistics(self, input_statistics):
super(StateSpaceEnsemble, self)._set_input_statistics(input_statistics)
for member in self._ensemble_members:
member._set_input_statistics(input_statistics) # pylint: disable=protected-access
def _loss_additions(self, times, values, mode):
# Allow sub-models to regularize
return (super(StateSpaceEnsemble, self)._loss_additions(
times, values, mode) + math_ops.add_n([
member._loss_additions(times, values, mode) # pylint: disable=protected-access
for member in self._ensemble_members
]))
def _compute_blocked(self, member_fn, name):
with variable_scope.variable_scope(self._variable_scope):
return math_utils.block_diagonal(
[member_fn(member)
for member in self._ensemble_members],
dtype=self.dtype,
name=name)
def transition_to_powers(self, powers):
return self._compute_blocked(
member_fn=lambda member: member.transition_to_powers(powers),
name="ensemble_transition_to_powers")
def _define_parameters(self, observation_transition_tradeoff_log=None):
with variable_scope.variable_scope(self._variable_scope):
if observation_transition_tradeoff_log is None:
# Define the tradeoff parameter between observation and transition noise
# once for the whole ensemble, and pass it down to members.
observation_transition_tradeoff_log = (
self._variable_observation_transition_tradeoff_log())
for member in self._ensemble_members:
member._define_parameters(observation_transition_tradeoff_log=( # pylint: disable=protected-access
observation_transition_tradeoff_log))
super(StateSpaceEnsemble, self)._define_parameters(
observation_transition_tradeoff_log
=observation_transition_tradeoff_log)
def random_model_parameters(self, seed=None):
param_union = {}
for i, member in enumerate(self._ensemble_members):
member_params = member.random_model_parameters(
seed=seed + i if seed else None)
param_union.update(member_params)
param_union.update(
super(StateSpaceEnsemble, self).random_model_parameters(seed=seed))
return param_union
def get_prior_mean(self):
return array_ops.concat(
values=[member.get_prior_mean() for member in self._ensemble_members],
axis=0,
name="ensemble_prior_state_mean")
def get_state_transition(self):
return self._compute_blocked(
member_fn=
lambda member: member.get_state_transition(),
name="ensemble_state_transition")
def get_noise_transform(self):
return self._compute_blocked(
member_fn=
lambda member: member.get_noise_transform(),
name="ensemble_noise_transform")
def get_observation_model(self, times):
raise NotImplementedError("No un-broadcasted observation model defined for"
" ensembles.")
def get_broadcasted_observation_model(self, times):
"""Computes a combined observation model based on member models.
The effect is that predicted observations from each model are summed.
Args:
times: A [batch dimension] int32 Tensor with times for each part of the
batch, on which member observation models can depend.
Returns:
A [batch dimension x num features x combined state dimension] Tensor with
the combined observation model.
"""
member_observation_models = [
ops.convert_to_tensor(
member.get_broadcasted_observation_model(times), dtype=self.dtype)
for member in self._ensemble_members
]
return array_ops.concat(values=member_observation_models, axis=2)
class StateSpaceIndependentEnsemble(StateSpaceEnsemble):
"""Implements ensembles of independent state space models.
Useful for fitting multiple independent state space models together while
keeping their specifications decoupled. The "ensemble" is simply a state space
model with the observation models of its members concatenated, and the
transition matrices and noise transforms stacked in block-diagonal
matrices. This means that the dimensionality of the ensemble's state is the
sum of those of its components, which can lead to slow and memory-intensive
training and inference as the posterior (shape [state dimension x state
dimension]) gets large.
Each individual model j's state at time t is defined by:
state[t, j] = StateTransition[j] * state[t-1, j]
+ NoiseTransform[j] * StateNoise[t, j]
StateNoise[t, j] ~ Gaussian(0, StateNoiseCovariance[j])
and the ensemble observation model is:
observation[t] = Sum { ObservationModel[j] * state[t, j] }
+ ObservationNoise[t]
ObservationNoise[t] ~ Gaussian(0, ObservationNoiseCovariance)
"""
def transition_power_noise_accumulator(self, num_steps):
return self._compute_blocked(
member_fn=lambda m: m.transition_power_noise_accumulator(num_steps),
name="ensemble_power_noise_accumulator")
def get_prior_covariance(self):
"""Construct the ensemble prior covariance based on component models."""
return self._compute_blocked(
member_fn=
lambda member: member.get_prior_covariance(),
name="ensemble_prior_state_covariance")
def get_state_transition_noise_covariance(self):
"""Construct the ensemble transition noise covariance from components."""
return self._compute_blocked(
member_fn=
lambda member: member.state_transition_noise_covariance,
name="ensemble_state_transition_noise")
# TODO(allenl): It would be nice to have replicated feature models which are
# identical batched together to reduce the graph size.
# TODO(allenl): Support for sharing M independent models across N features, with
# N > M.
# TODO(allenl): Stack component prior covariances while allowing cross-model
# correlations to be learned (currently a full covariance prior is learned, but
# custom component model covariances are not used).
class StateSpaceCorrelatedFeaturesEnsemble(StateSpaceEnsemble):
"""An correlated ensemble where each model represents a feature.
Unlike `StateSpaceIndependentEnsemble`, a full state transition noise
covariance matrix is learned for this ensemble; the models are not assumed to
be independent. Rather than concatenating observation models (i.e. summing the
contributions of each model to each feature),
StateSpaceCorrelatedFeaturesEnsemble stacks observation models diagonally,
meaning that each model corresponds to one feature of the series.
Behaves like (and is) a single state space model where:
StateTransition = Diag(StateTransition[j] for models j)
ObservationModel = Diag(ObservationModel[j] for models j)
Note that each ObservationModel[j] is a [1 x S_j] matrix (S_j being the state
dimension of model j), i.e. a univariate model. The combined model is
multivariate, the number of features of the series being equal to the number
of component models in the ensemble.
"""
def __init__(self, ensemble_members, configuration):
"""Specify the ensemble's configuration and component models.
Args:
ensemble_members: A list of `StateSpaceModel` objects, with length equal
to `configuration.num_features`. Each of these models, which must be
univariate, corresponds to a single feature of the time series.
configuration: A StateSpaceModelConfiguration object.
Raises:
ValueError: If the length of `ensemble_members` does not equal the number
of features in the series, or any component is not univariate.
"""
if len(ensemble_members) != configuration.num_features:
raise ValueError(
"The number of members in a StateSpaceCorrelatedFeaturesEnsemble "
"must equal the number of features in the time series.")
for member in ensemble_members:
if member.num_features != 1:
raise ValueError(
"StateSpaceCorrelatedFeaturesEnsemble components must be "
"univariate.")
super(StateSpaceCorrelatedFeaturesEnsemble, self).__init__(
ensemble_members=ensemble_members, configuration=configuration)
def transition_power_noise_accumulator(self, num_steps):
"""Use a noise accumulator special case when possible."""
if len(self._ensemble_members) == 1:
# If this is a univariate series, we should use the special casing built
# into the single component model.
return self._ensemble_members[0].transition_power_noise_accumulator(
num_steps=num_steps)
# If we have multiple features, and therefore multiple models, we have
# introduced correlations which make noise accumulation more
# complicated. Here we fall back to the general case, since we can't just
# aggregate member special cases.
return super(StateSpaceCorrelatedFeaturesEnsemble,
self).transition_power_noise_accumulator(num_steps=num_steps)
def get_broadcasted_observation_model(self, times):
"""Stack observation models diagonally."""
def _member_observation_model(member):
return ops.convert_to_tensor(
member.get_broadcasted_observation_model(times), dtype=self.dtype)
return self._compute_blocked(member_fn=_member_observation_model,
name="feature_ensemble_observation_model")
| npuichigo/ttsflow | third_party/tensorflow/tensorflow/contrib/timeseries/python/timeseries/state_space_models/state_space_model.py | Python | apache-2.0 | 56,990 | [
"Gaussian"
] | 59c6ee729f438b8528788edb722f6da821ce8be7aef7747b3235ddc990c3268d |
from nose.tools import assert_equal #@UnresolvedImport
from whoosh import analysis, fields, qparser
from whoosh.compat import u, unichr
from whoosh.filedb.filestore import RamStorage
def test_regextokenizer():
value = u("AAAaaaBBBbbbCCCcccDDDddd")
rex = analysis.RegexTokenizer("[A-Z]+")
assert_equal([t.text for t in rex(value)], ["AAA", "BBB", "CCC", "DDD"])
rex = analysis.RegexTokenizer("[A-Z]+", gaps=True)
assert_equal([t.text for t in rex(value)], ["aaa", "bbb", "ccc", "ddd"])
def test_path_tokenizer():
value = u("/alfa/bravo/charlie/delta/")
pt = analysis.PathTokenizer()
assert_equal([t.text for t in pt(value)], ["/alfa", "/alfa/bravo",
"/alfa/bravo/charlie",
"/alfa/bravo/charlie/delta"])
def test_composition1():
ca = analysis.RegexTokenizer() | analysis.LowercaseFilter()
assert_equal(ca.__class__.__name__, "CompositeAnalyzer")
assert_equal(ca[0].__class__.__name__, "RegexTokenizer")
assert_equal(ca[1].__class__.__name__, "LowercaseFilter")
assert_equal([t.text for t in ca(u("ABC 123"))], ["abc", "123"])
def test_composition2():
ca = analysis.RegexTokenizer() | analysis.LowercaseFilter()
sa = ca | analysis.StopFilter()
assert_equal(len(sa), 3)
assert_equal(sa.__class__.__name__, "CompositeAnalyzer")
assert_equal(sa[0].__class__.__name__, "RegexTokenizer")
assert_equal(sa[1].__class__.__name__, "LowercaseFilter")
assert_equal(sa[2].__class__.__name__, "StopFilter")
assert_equal([t.text for t in sa(u("The ABC 123"))], ["abc", "123"])
def test_composition3():
sa = analysis.RegexTokenizer() | analysis.StopFilter()
assert_equal(sa.__class__.__name__, "CompositeAnalyzer")
def test_composing_functions():
def filter(tokens):
for t in tokens:
t.text = t.text.upper()
yield t
analyzer = analysis.RegexTokenizer() | filter
assert_equal([t.text for t in analyzer(u("abc def"))], ["ABC", "DEF"])
def test_shared_composition():
shared = analysis.RegexTokenizer(r"\S+") | analysis.LowercaseFilter()
ana1 = shared | analysis.NgramFilter(3)
ana2 = shared | analysis.DoubleMetaphoneFilter()
assert_equal([t.text for t in ana1(u("hello"))], ["hel", "ell", "llo"])
assert_equal([t.text for t in ana2(u("hello"))], ["HL"])
def test_multifilter():
f1 = analysis.LowercaseFilter()
f2 = analysis.PassFilter()
mf = analysis.MultiFilter(a=f1, b=f2)
ana = analysis.RegexTokenizer(r"\S+") | mf
text = u("ALFA BRAVO CHARLIE")
assert_equal([t.text for t in ana(text, mode="a")], ["alfa", "bravo", "charlie"])
assert_equal([t.text for t in ana(text, mode="b")], ["ALFA", "BRAVO", "CHARLIE"])
def test_tee_filter():
target = u("Alfa Bravo Charlie")
f1 = analysis.LowercaseFilter()
f2 = analysis.ReverseTextFilter()
ana = analysis.RegexTokenizer(r"\S+") | analysis.TeeFilter(f1, f2)
result = " ".join([t.text for t in ana(target)])
assert_equal(result, "alfa aflA bravo ovarB charlie eilrahC")
class ucfilter(analysis.Filter):
def __call__(self, tokens):
for t in tokens:
t.text = t.text.upper()
yield t
f2 = analysis.ReverseTextFilter() | ucfilter()
ana = analysis.RegexTokenizer(r"\S+") | analysis.TeeFilter(f1, f2)
result = " ".join([t.text for t in ana(target)])
assert_equal(result, "alfa AFLA bravo OVARB charlie EILRAHC")
f1 = analysis.PassFilter()
f2 = analysis.BiWordFilter()
ana = analysis.RegexTokenizer(r"\S+") | analysis.TeeFilter(f1, f2) | analysis.LowercaseFilter()
result = " ".join([t.text for t in ana(target)])
assert_equal(result, "alfa alfa-bravo bravo bravo-charlie charlie")
def test_intraword():
iwf = analysis.IntraWordFilter(mergewords=True, mergenums=True)
ana = analysis.RegexTokenizer(r"\S+") | iwf
def check(text, ls):
assert_equal([(t.pos, t.text) for t in ana(text)], ls)
check(u("PowerShot)"), [(0, "Power"), (1, "Shot"), (1, "PowerShot")])
check(u("A's+B's&C's"), [(0, "A"), (1, "B"), (2, "C"), (2, "ABC")])
check(u("Super-Duper-XL500-42-AutoCoder!"),
[(0, "Super"), (1, "Duper"), (2, "XL"), (2, "SuperDuperXL"),
(3, "500"), (4, "42"), (4, "50042"), (5, "Auto"), (6, "Coder"),
(6, "AutoCoder")])
def test_intraword_chars():
iwf = analysis.IntraWordFilter(mergewords=True, mergenums=True)
ana = analysis.RegexTokenizer(r"\S+") | iwf | analysis.LowercaseFilter()
target = u("WiKiWo-rd")
tokens = [(t.text, t.startchar, t.endchar) for t in ana(target, chars=True)]
assert_equal(tokens, [("wi", 0, 2), ("ki", 2, 4), ("wo", 4, 6),
("rd", 7, 9), ("wikiword", 0, 9)])
target = u("Zo WiKiWo-rd")
tokens = [(t.text, t.startchar, t.endchar) for t in ana(target, chars=True)]
assert_equal(tokens, [("zo", 0, 2), ("wi", 3, 5), ("ki", 5, 7),
("wo", 7, 9), ("rd", 10, 12), ("wikiword", 3, 12)])
def test_intraword_possessive():
iwf = analysis.IntraWordFilter(mergewords=True, mergenums=True)
ana = analysis.RegexTokenizer(r"\S+") | iwf | analysis.LowercaseFilter()
target = u("O'Malley's-Bar")
tokens = [(t.text, t.startchar, t.endchar) for t in ana(target, chars=True)]
assert_equal(tokens, [("o", 0, 1), ("malley", 2, 8), ("bar", 11, 14),
("omalleybar", 0, 14)])
def test_word_segments():
wordset = set(u("alfa bravo charlie delta").split())
cwf = analysis.CompoundWordFilter(wordset, keep_compound=True)
ana = analysis.RegexTokenizer(r"\S+") | cwf
target = u("alfacharlie bravodelta delto bravo subalfa")
tokens = [t.text for t in ana(target)]
assert_equal(tokens, ["alfacharlie", "alfa", "charlie", "bravodelta",
"bravo", "delta", "delto", "bravo", "subalfa"])
cwf = analysis.CompoundWordFilter(wordset, keep_compound=False)
ana = analysis.RegexTokenizer(r"\S+") | cwf
target = u("alfacharlie bravodelta delto bravo subalfa")
tokens = [t.text for t in ana(target)]
assert_equal(tokens, ["alfa", "charlie", "bravo", "delta", "delto", "bravo", "subalfa"])
#target = u("alfacharlie bravodelta")
#tokens = [(t.text, t.startchar, t.endchar) for t in ana(target, chars=True)]
#assert_equal(tokens, [("alfa", 0, 4), ("charlie", 4, 11), ("bravo", 12, 17), ("delta", 17, 22)])
def test_biword():
ana = analysis.RegexTokenizer(r"\w+") | analysis.BiWordFilter()
result = [t.copy() for t
in ana(u("the sign of four"), chars=True, positions=True)]
assert_equal(["the-sign", "sign-of", "of-four"], [t.text for t in result])
assert_equal([(0, 8), (4, 11), (9, 16)], [(t.startchar, t.endchar) for t in result])
assert_equal([0, 1, 2], [t.pos for t in result])
result = [t.copy() for t in ana(u("single"))]
assert_equal(len(result), 1)
assert_equal(result[0].text, "single")
def test_shingles():
ana = analysis.RegexTokenizer(r"\w+") | analysis.ShingleFilter(3, " ")
source = u("better a witty fool than a foolish wit")
results = [t.copy() for t in ana(source, positions=True, chars=True)]
assert_equal([t.text for t in results],
[u('better a witty'), u('a witty fool'), u('witty fool than'),
u('fool than a'), u('than a foolish'), u('a foolish wit')])
assert_equal([t.pos for t in results], list(range(len(results))))
for t in results:
assert_equal(t.text, source[t.startchar:t.endchar])
def test_unicode_blocks():
from whoosh.support.unicode import blocks, blockname, blocknum
assert_equal(blockname(u('a')), 'Basic Latin')
assert_equal(blockname(unichr(0x0b80)), 'Tamil')
assert_equal(blockname(unichr(2048)), None)
assert_equal(blocknum(u('a')), 0)
assert_equal(blocknum(unichr(0x0b80)), 22)
assert_equal(blocknum(unichr(2048)), None)
assert_equal(blocknum(u('a')), blocks.Basic_Latin) #@UndefinedVariable
assert_equal(blocknum(unichr(0x0b80)), blocks.Tamil) #@UndefinedVariable
def test_double_metaphone():
mf = analysis.RegexTokenizer() | analysis.LowercaseFilter() | analysis.DoubleMetaphoneFilter()
results = [(t.text, t.boost) for t in mf(u("Spruce View"))]
assert_equal(results, [('SPRS', 1.0), ('F', 1.0), ('FF', 0.5)])
mf = analysis.RegexTokenizer() | analysis.LowercaseFilter() | analysis.DoubleMetaphoneFilter(combine=True)
results = [(t.text, t.boost) for t in mf(u("Spruce View"))]
assert_equal(results, [('spruce', 1.0), ('SPRS', 1.0), ('view', 1.0),
('F', 1.0), ('FF', 0.5)])
namefield = fields.TEXT(analyzer=mf)
texts = list(namefield.process_text(u("Spruce View"), mode="query"))
assert_equal(texts, [u('spruce'), 'SPRS', u('view'), 'F', 'FF'])
def test_substitution():
mf = analysis.RegexTokenizer(r"\S+") | analysis.SubstitutionFilter("-", "")
assert_equal([t.text for t in mf(u("one-two th-re-ee four"))],
["onetwo", "threee", "four"])
mf = analysis.RegexTokenizer(r"\S+") | analysis.SubstitutionFilter("([^=]*)=(.*)", r"\2=\1")
assert_equal([t.text for t in mf(u("a=b c=d ef"))], ["b=a", "d=c", "ef"])
def test_delimited_attribute():
ana = analysis.RegexTokenizer(r"\S+") | analysis.DelimitedAttributeFilter()
results = [(t.text, t.boost) for t in ana(u("image render^2 file^0.5"))]
assert_equal(results, [("image", 1.0), ("render", 2.0), ("file", 0.5)])
def test_porter2():
from whoosh.lang.porter2 import stem
plurals = ['caresses', 'flies', 'dies', 'mules', 'denied',
'died', 'agreed', 'owned', 'humbled', 'sized',
'meeting', 'stating', 'siezing', 'itemization',
'sensational', 'traditional', 'reference', 'colonizer',
'plotted']
singles = [stem(w) for w in plurals]
assert_equal(singles, ['caress', 'fli', 'die', 'mule', 'deni', 'die', 'agre',
'own', 'humbl', 'size', 'meet', 'state', 'siez', 'item',
'sensat', 'tradit', 'refer', 'colon', 'plot'])
assert_equal(stem("bill's"), "bill")
assert_equal(stem("y's"), "y")
def test_url():
sample = u("Visit http://bitbucket.org/mchaput/whoosh or urn:isbn:5930502 or http://www.apple.com/.")
for ana in (analysis.SimpleAnalyzer(analysis.url_pattern),
analysis.StandardAnalyzer(analysis.url_pattern, stoplist=None)):
ts = [t.text for t in ana(sample)]
assert_equal(ts, [u('visit'), u('http://bitbucket.org/mchaput/whoosh'),
u('or'), u('urn:isbn:5930502'), u('or'), u('http://www.apple.com/')])
def test_name_field():
ana = (analysis.RegexTokenizer(r"\S+")
| analysis.LowercaseFilter()
| analysis.DoubleMetaphoneFilter(combine=True))
namefield = fields.TEXT(analyzer=ana, multitoken_query="or")
schema = fields.Schema(id=fields.STORED, name=namefield)
ix = RamStorage().create_index(schema)
w = ix.writer()
w.add_document(id=u("one"), name=u("Leif Ericson"))
w.commit()
s = ix.searcher()
qp = qparser.QueryParser("name", schema)
q = qp.parse(u("leaf eriksen"), normalize=False)
r = s.search(q)
assert_equal(len(r), 1)
def test_start_pos():
from whoosh import formats
ana = analysis.RegexTokenizer(r"\S+") | analysis.LowercaseFilter()
kw = {"positions": True}
assert_equal([t.pos for t in formats.tokens(u("alfa bravo charlie delta"), ana, kw)], [0, 1, 2, 3])
kw["start_pos"] = 3
ts = [t.copy() for t in formats.tokens(u("A B C D").split(), ana, kw)]
assert_equal(" ".join([t.text for t in ts]), "A B C D")
assert_equal([t.pos for t in ts], [3, 4, 5, 6])
def test_frowny_face():
# See https://bitbucket.org/mchaput/whoosh/issue/166/
ana = analysis.RegexTokenizer(r"\S+") | analysis.IntraWordFilter()
# text is all delimiters
tokens = [t.text for t in ana(u(":-("))]
assert_equal(tokens, [])
# text has consecutive delimiters
tokens = [t.text for t in ana(u("LOL:)"))]
assert_equal(tokens, ["LOL"])
| waseem18/oh-mainline | vendor/packages/whoosh/tests/test_analysis.py | Python | agpl-3.0 | 12,325 | [
"VisIt"
] | d25a53f3b2687086de3aada4fa89db1b31f0f4d28b8cc3b3c6e1d361ad86cbf5 |
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
functions.py - Miscellaneous homeless functions
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
Most Interesting Contents:
siFormat / siEval - functions for dealing with numbers in SI notation
downsample - multidimensional downsampling by mean
rmsMatch / fastRmsMatch - recursive template matching
makeDispMap / matchDistortImg - for measuring and correcting motion/distortion between two images
"""
import six
from six.moves import range
import sys
import os, re, math, time, threading, decimal
from acq4.util.metaarray import *
#from scipy import *
#from scipy.optimize import leastsq
#from scipy.ndimage import gaussian_filter, generic_filter, median_filter
from scipy import stats
import scipy.signal, scipy.ndimage, scipy.optimize
import numpy.ma
from acq4.util.debug import *
import numpy as np
try:
import scipy.weave as weave
from scipy.weave import converters
except:
pass
def dirDialog(startDir='', title="Select Directory"):
return str(Qt.QFileDialog.getExistingDirectory(None, title, startDir))
def fileDialog():
return str(Qt.QFileDialog.getOpenFileName())
## the built in logspace function is pretty much useless.
def logSpace(start, stop, num):
num = int(num)
d = (stop / start) ** (1./(num-1))
return start * (d ** np.arange(0, num))
def linSpace(start, stop, num):
return np.linspace(start, stop, num)
def sigmoid(v, x):
"""Sigmoid function value at x. the parameter v is [slope, x-offset, amplitude, y-offset]"""
return v[2] / (1.0 + np.exp(-v[0] * (x-v[1]))) + v[3]
def gaussian(v, x):
"""Gaussian function value at x. The parameter v is [amplitude, x-offset, sigma, y-offset]"""
return v[0] * np.exp(-((x-v[1])**2) / (2 * v[2]**2)) + v[3]
def expDecay(v, x):
"""Exponential decay function valued at x. Parameter vector is [amplitude, tau]"""
return v[0] * np.exp(-x / v[1]) #+ v[2]
def expDecayWithOffset(v, x):
"""Exponential decay function with a y-offset. Suitable for measuring a
bridge-balance offset in a voltage response to a current pulse. Assumes a fixed t0 at x=0.
Parameter v is [amp, tau, y-offset]."""
return v[0] * (1- np.exp(-x/v[1])) + v[2]
def expPulse(v, x):
"""Exponential pulse function (rising exponential with variable-length plateau followed by falling exponential)
Parameter v is [t0, y-offset, tau1, tau2, amp, width]"""
t0, yOffset, tau1, tau2, amp, width = v
y = np.empty(x.shape)
y[x<t0] = yOffset
m1 = (x>=t0)&(x<(t0+width))
m2 = (x>=(t0+width))
x1 = x[m1]
x2 = x[m2]
y[m1] = amp*(1-np.exp(-(x1-t0)/tau1))+yOffset
amp2 = amp*(1-np.exp(-width/tau1)) ## y-value at start of decay
y[m2] = ((amp2)*np.exp(-(x2-(width+t0))/tau2))+yOffset
return y
def fit(function, xVals, yVals, guess, errFn=None, measureError=False, generateResult=False, resultXVals=None, **kargs):
"""fit xVals, yVals to the specified function.
If generateResult is True, then the fit is used to generate an array of points from function
with the xVals supplied (useful for plotting the fit results with the original data).
The result x values can be explicitly set with resultXVals."""
if errFn is None:
errFn = lambda v, x, y: function(v, x)-y
if len(xVals) < len(guess):
raise Exception("Too few data points to fit this function. (%d variables, %d points)" % (len(guess), len(xVals)))
fitResult = scipy.optimize.leastsq(errFn, guess, args=(xVals, yVals), **kargs)
error = None
#if measureError:
#error = errFn(fit[0], xVals, yVals)
result = None
if generateResult or measureError:
if resultXVals is not None:
xVals = resultXVals
result = function(fitResult[0], xVals)
#fn = lambda i: function(fit[0], xVals[i.astype(int)])
#result = fromfunction(fn, xVals.shape)
if measureError:
error = abs(yVals - result).mean()
return fitResult + (result, error)
def fitSigmoid(xVals, yVals, guess=[1.0, 0.0, 1.0, 0.0], **kargs):
"""Returns least-squares fit for sigmoid"""
return fit(sigmoid, xVals, yVals, guess, **kargs)
def fitGaussian(xVals, yVals, guess=[1.0, 0.0, 1.0, 0.0], **kargs):
"""Returns least-squares fit parameters for function v[0] * exp(((x-v[1])**2) / (2 * v[2]**2)) + v[3]"""
return fit(gaussian, xVals, yVals, guess, **kargs)
def fitExpDecay(xVals, yVals, guess=[1.0, 1.0, 0.0], **kargs):
return fit(expDecay, xVals, yVals, guess, **kargs)
#def pspInnerFunc(v, x):
#return v[0] * (1.0 - np.exp(-x / v[2])) * np.exp(-x / v[3])
#def pspFunc(v, x, risePower=1.0):
#"""Function approximating a PSP shape.
#v = [amplitude, x offset, rise tau, fall tau]
#Uses absolute value of both taus, so fits may indicate negative tau.
#"""
### determine scaling factor needed to achieve correct amplitude
#v = [v[0], v[1], abs(v[2]), abs(v[3])]
#maxX = v[2] * np.log(1 + (v[3]/v[2]))
#maxVal = pspInnerFunc([1.0, 0, v[2], v[3]], maxX)
#out = np.empty(x.shape, x.dtype)
#mask = x > v[1]
#out[~mask] = 0
#xvals = x[mask]-v[1]
#try:
#out[mask] = 1.0 / maxVal * pspInnerFunc(v, xvals)
#except:
#print v[2], v[3], maxVal, xvals.shape, xvals.dtype
#raise
#return out
#def fitPsp(xVals, yVals, guess=[1e-3, 0, 10e-3, 10e-3], bounds=None, **kargs):
#vals, junk, comp, err = fit(pspFunc, xVals, yVals, guess, **kargs)
#amp, xoff, rise, fall = vals
### fit may return negative tau values (since pspFunc uses abs(tau)); return the absolute value.
#return (amp, xoff, abs(rise), abs(fall))#, junk, comp, err
def pspInnerFunc(x, rise, decay, power):
out = np.zeros(x.shape, x.dtype)
mask = x >= 0
xvals = x[mask]
out[mask] = (1.0 - np.exp(-xvals / rise))**power * np.exp(-xvals / decay)
return out
def pspMaxTime(rise, decay, risePower=2.0):
"""Return the time from start to peak for a psp with given parameters."""
return rise * np.log(1 + (decay * risePower / rise))
def pspFunc(v, x, risePower=2.0):
"""Function approximating a PSP shape.
v = [amplitude, x offset, rise tau, decay tau]
Uses absolute value of both taus, so fits may indicate negative tau.
"""
if len(v) > 4:
v = processExtraVars(v)
## determine scaling factor needed to achieve correct amplitude
v[2] = abs(v[2])
v[3] = abs(v[3])
maxX = pspMaxTime(v[2], v[3], risePower)
maxVal = (1.0 - np.exp(-maxX / v[2]))**risePower * np.exp(-maxX / v[3])
#maxVal = pspInnerFunc(np.array([maxX]), v[2], v[3], risePower)[0]
try:
out = v[0] / maxVal * pspInnerFunc(x-v[1], v[2], v[3], risePower)
except:
print(v[2], v[3], maxVal, x.shape, x.dtype)
raise
return out
def fitPsp(x, y, guess, bounds=None, risePower=2.0, multiFit=False):
"""
guess: [amp, xoffset, rise, fall]
bounds: [[ampMin, ampMax], ...]
NOTE: This fit is more likely to converge correctly if the guess amplitude
is larger (about 2x) than the actual amplitude.
if multiFit is True, then attempt to improve the fit by brute-force searching
and re-fitting. (this is very slow)
"""
if guess is None:
guess = [
(y.max()-y.min()) * 2,
0,
x[-1]*0.25,
x[-1]
]
## pick some reasonable default bounds
if bounds is None:
bounds = [[None,None]] * 4
bounds[1][0] = -2e-3
minTau = (x[1]-x[0]) * 0.5
#bounds[2] = [minTau, None]
#bounds[3] = [minTau, None]
errCache = {}
def errFn(v, x, y):
key = tuple(v)
if key not in errCache:
for i in range(len(v)):
if bounds[i][0] is not None:
v[i] = max(v[i], bounds[i][0])
if bounds[i][1] is not None:
v[i] = min(v[i], bounds[i][1])
err = y - v[0] * pspInnerFunc(x-v[1], abs(v[2]), abs(v[3]), risePower)
errCache[key] = (err, v.copy())
return err
err, v2 = errCache[key]
v[:] = v2
return err
## initial fit
fit = scipy.optimize.leastsq(errFn, guess, args=(x, y), ftol=1e-2, factor=0.1)[0]
## try on a few more fits
if multiFit:
err = (errFn(fit, x, y)**2).sum()
#print "fit:", err
bestFit = fit
for da in [0.5, 1.0, 2.0]:
for dt in [0.5, 1.0, 2.0]:
for dr in [0.5, 1.0, 2.0]:
for do in [0.002, .0, 0.002]:
if da == 1.0 and dt == 1.0 and dr == 1.0 and do == 0.0:
continue
guess = fit.copy()
guess[0] *= da
guess[1] += do
guess[3] *= dt
guess[2] *= dr
fit2 = scipy.optimize.leastsq(errFn, guess, args=(x, y), ftol=1e-1, factor=0.1)[0]
err2 = (errFn(fit2, x, y)**2).sum()
if err2 < err:
bestFit = fit2
#print " found better PSP fit: %s -> %s" % (err, err2), da, dt, dr, do
err = err2
fit = bestFit
fit[2:] = abs(fit[2:])
maxX = fit[2] * np.log(1 + (fit[3]*risePower / fit[2]))
maxVal = (1.0 - np.exp(-maxX / fit[2]))**risePower * np.exp(-maxX / fit[3])
fit[0] *= maxVal
return fit
def doublePspFunc(v, x, risePower=2.0):
"""Function approximating a PSP shape with double exponential decay.
v = [amp1, amp2, x offset, rise tau, decay tau 1, decay tau 2]
Uses absolute value of both taus, so fits may indicate negative tau.
"""
amp1, amp2, xoff, rise, decay1, decay2 = v
x = x-xoff
### determine scaling factor needed to achieve correct amplitude
#v[2] = abs(v[2])
#v[3] = abs(v[3])
#maxX = pspMaxTime(v[2], v[3], risePower)
#maxVal = (1.0 - np.exp(-maxX / v[2]))**risePower * np.exp(-maxX / v[3])
##maxVal = pspInnerFunc(np.array([maxX]), v[2], v[3], risePower)[0]
try:
out = np.zeros(x.shape, x.dtype)
mask = x >= 0
x = x[mask]
riseExp = (1.0 - np.exp(-x / rise))**risePower
decayExp1 = amp1 * np.exp(-x / decay1)
decayExp2 = amp2 * np.exp(-x / decay2)
out[mask] = riseExp * (decayExp1 + decayExp2)
except:
print(v, x.shape, x.dtype)
raise
return out
def doublePspMax(v, risePower=2.0):
"""
Return the time and value of the peak of a PSP with double-exponential decay.
"""
## create same params with negative amplitudes
v2 = list(v)[:]
if v2[0] > 0:
v2[0] *= -1
if v2[1] > 0:
v2[1] *= -1
xMax = scipy.optimize.fmin(lambda x: doublePspFunc(v2, x), [v[2]], disp=False)
yMax = doublePspFunc(v, xMax)
return xMax[0], yMax[0]
def fitDoublePsp(x, y, guess, bounds=None, risePower=2.0):
"""
Fit a PSP shape with double exponential decay.
guess: [amp1, amp2, xoffset, rise, fall1, fall2]
bounds: [[amp1Min, amp1Max], ...]
NOTE: This fit is more likely to converge correctly if the guess amplitude
is larger (about 2x) than the actual amplitude.
"""
## normalize scale to assist fit
yScale = y.max() - y.min()
y = y / yScale
for i in [0, 1]:
guess[i] /= yScale
if bounds[i][0] is not None:
bounds[i][0] /= yScale
if bounds[i][1] is not None:
bounds[i][1] /= yScale
#if guess is None:
#guess = [
#(y.max()-y.min()) * 2,
#0,
#x[-1]*0.25,
#x[-1]
#]
### pick some reasonable default bounds
#if bounds is None:
#bounds = [[None,None]] * 4
#minTau = (x[1]-x[0]) * 0.5
##bounds[2] = [minTau, None]
##bounds[3] = [minTau, None]
#trials = []
errs = {}
def errFn(v, x, y):
key = tuple(v)
if key not in errs:
## enforce max rise/fall ratio
#v[2] = min(v[2], v[3] / 2.)
f = doublePspFunc(v,x,risePower)
err = y - f
#trials.append(f)
for i in range(len(v)):
if bounds[i][0] is not None and v[i] < bounds[i][0]:
v[i] = bounds[i][0]
if bounds[i][1] is not None and v[i] > bounds[i][1]:
v[i] = bounds[i][1]
## both amps must be either positive or negative
if (v[0] > 0 and v[1] < 0) or (v[0] < 0 and v[1] > 0):
if abs(v[0]) > abs(v[1]):
v[1] = 0
else:
v[0] = 0
errs[key] = (err, v.copy())
return err
err, v2 = errs[key]
v[:] = v2
return err
#fit = scipy.optimize.leastsq(errFn, guess, args=(x, y), ftol=1e-3, factor=0.1, full_output=1)
fit = scipy.optimize.leastsq(errFn, guess, args=(x, y), ftol=1e-2)
#print fit[2:]
fit = fit[0]
err = (errFn(fit, x, y)**2).sum()
#print "initial fit:", fit, err
guess = fit.copy()
bestFit = fit
for ampx in (0.5, 2.0):
for taux in (0.2, 0.5, 2.0): ## The combination ampx=2, taux=0.2 seems to be particularly important.
guess[:2] = fit[:2] * ampx
guess[4:6] = fit[4:6] * taux
fit2 = scipy.optimize.leastsq(errFn, guess, args=(x, y), ftol=1e-2, factor=0.1)[0]
err2 = (errFn(fit2, x, y)**2).sum()
if err2 < err:
#print "Improved fit:", ampx, taux, err2
bestFit = fit2
err = err2
fit = bestFit
#print "final fit:", fit, err
fit[0] *= yScale
fit[1] *= yScale
return tuple(fit[:4]) + (min(*fit[4:]), max(*fit[4:]))
STRNCMP_REGEX = re.compile(r'(-?\d+(\.\d*)?((e|E)-?\d+)?)')
def strncmp(a, b):
"""Compare strings based on the numerical values they represent (for sorting). Each string may have multiple numbers."""
global STRNCMP_REGEX
am = STRNCMP_REGEX.findall(a)
bm = STRNCMP_REGEX.findall(b)
if len(am) > 0 and len(bm) > 0:
for i in range(0, len(am)):
c = cmp(float(am[i][0]), float(bm[i][0]))
if c != 0:
return c
return cmp(a, b)
def downsample(data, n, axis=0, xvals='subsample'):
"""Downsample by averaging points together across axis.
If multiple axes are specified, runs once per axis.
If a metaArray is given, then the axis values can be either subsampled
or downsampled to match.
"""
ma = None
if (hasattr(data, 'implements') and data.implements('MetaArray')):
ma = data
data = data.view(ndarray)
if hasattr(axis, '__len__'):
if not hasattr(n, '__len__'):
n = [n]*len(axis)
for i in range(len(axis)):
data = downsample(data, n[i], axis[i])
return data
nPts = int(data.shape[axis] / n)
s = list(data.shape)
s[axis] = nPts
s.insert(axis+1, n)
sl = [slice(None)] * data.ndim
sl[axis] = slice(0, nPts*n)
d1 = data[tuple(sl)]
#print d1.shape, s
d1.shape = tuple(s)
d2 = d1.mean(axis+1)
if ma is None:
return d2
else:
info = ma.infoCopy()
if 'values' in info[axis]:
if xvals == 'subsample':
info[axis]['values'] = info[axis]['values'][::n][:nPts]
elif xvals == 'downsample':
info[axis]['values'] = downsample(info[axis]['values'], n)
return MetaArray(d2, info=info)
def downsamplend(data, div):
"""Downsample multiple axes at once. Probably slower than just using downsample multiple times."""
shape = [float(data.shape[i]) / div[i] for i in range(0, data.ndim)]
res = np.empty(tuple(shape), dtype=float)
for ind, i in np.ndenumerate(res):
sl = [slice(ind[j]*div[j], (ind[j]+1)*div[j]) for j in range(0, data.ndim)]
res[tuple(ind)] = data[tuple(sl)].mean()
return res
def recursiveRegisterImages(i1, i2, hint=(0,0), maxDist=None, objSize=None):
"""Given images i1 and i2, recursively find the offset for i2 that best matches with i1"""
time1 = time.clock()
## float images
im1 = i1.astype(float)
im2 = i2.astype(float)
#im1 = i1.mean(axis=2).astype(float)
#im2 = i2.mean(axis=2).astype(float)
## Decide how many iterations to perform, scale images
if objSize is not None:
nit = int(np.floor(np.log(objSize)/np.log(2)) + 1)
else:
nit = 5
print("Doing %d iterations" % nit)
spow = 2.0
scales = [1.0 / spow**x for x in range(nit-1,-1,-1)]
imScale = [[None, None]] * nit
imScale[-1] = [im1, im2]
time2 = time.clock()
for i in range(nit-2,-1,-1):
imScale[i] = [scipy.ndimage.zoom(imScale[i+1][0], 1.0/spow, order=1), scipy.ndimage.zoom(imScale[i+1][1], 1.0/spow, order=1)]
print(scales)
time3 = time.clock()
lastSf = None
if maxDist != None:
start = (np.array(hint) - np.ceil(maxDist / 2.)) * scales[0]
end = (np.array(hint) + np.ceil(maxDist / 2.)) * scales[0]
else:
start = np.array([0,0])
end = None
print("Checking range %s - %s" % (str(start), str(end)))
for i in range(0, nit):
sf = scales[i]
im1s = imScale[i][0]
im2s = imScale[i][1]
if lastSf is not None:
start = np.floor(np.floor(center-0.5) * sf / lastSf)
end = np.ceil(np.ceil(center+0.5) * sf / lastSf)
## get prediction
#print "Scale %f: start: %s end: %s" % (sf, str(start), str(end))
if end is None or any(start != end):
print("register:", start, end)
center = registerImages(im1s, im2s, (start, end))
#print " center = %s" % str(center/sf)
lastSf = sf
time4 = time.clock()
print("Scale time: %f Corr time: %f Total: %f" % (time3-time2, time4-time3, time4-time1))
return center
def xcMax(xc):
mi = np.where(xc == xc.max())
mi = np.array([mi[0][0], mi[1][0]])
return mi
def registerImages(im1, im2, searchRange):
"""
searchRange is [[xmin, ymin], [xmax, ymax]]
"""
#print "Registering images %s and %s, %s-%s" % (str(im1.shape), str(im2.shape), str(start), str(end))
#(sx, sy) = searchRange
#start=[sx[0], sy[0]]
#end = [sx[1], sy[1]]
start, end = searchRange
print("start:",start,"end:",end)
if end is None:
mode = 'full'
im1c = im1
im2c = im2
#print "Searching full images."
else:
mode = 'valid'
s1x = max(0, start[0])
s1y = max(0, start[1])
print(im1.shape)
print(im2.shape)
e1x = min(im1.shape[0], im2.shape[0]+end[0])
e1y = min(im1.shape[1], im2.shape[1]+end[1])
print("%d,%d - %d,%d" % (s1x, s1y, e1x, e1y))
s2x = max(0, -start[0])
s2y = max(0, -start[1])
e2x = min(im2.shape[0], im1.shape[0]-end[0])
e2y = min(im2.shape[1], im1.shape[1]-end[1])
print("%d,%d - %d,%d" % (s2x, s2y, e2x, e2y))
## Crop images
im1c = im1[s1x:e1x, s1y:e1y]
im2c = im2[s2x:e2x, s2y:e2y]
#print "Images cropped to %d,%d-%d,%d %d,%d-%d,%d" % (s1x, s1y, e1x, e1y, s2x, s2y, e2x, e2y)
#showImage(im1c)
#showImage(im2c)
## get full scale correlation
#turns out cross-correlation is a really lousy way to register images.
#xc = scipy.signal.signaltools.correlate2d(im1c, im2c, boundary='fill', fillvalue=im1c.mean(), mode=mode)
def err(img):
try:
img.shape = im2c.shape
except:
print(img.shape, im2c.shape)
raise
return abs(im2c - img).sum()
print(im1c.shape, im2c.shape)
xc = scipy.ndimage.generic_filter(im1c, err, footprint=im2c)
# print xc.min(), xc.max()
#xcb = ndimage.filters.gaussian_filter(xc, 20)
#xc -= xcb
xcm = np.argmin(xc)
# argmin returns min index of flattened array
xcm = np.unravel_index(xcm, xc.shape)
#xcm = xcMax(xc)
#xcc = concatenate((xc[...,newaxis], xc[...,newaxis], xc[...,newaxis]), axis=2)
#xcc[xcm[0], xcm[1], 0:2] = xc.min()
#showImage(xcc)
#showImage(xcb)
print("Best match at " + str(xcm))
if mode == 'full':
xcm -= np.array(im1c.shape)-1
else:
xcm += start
print(" ..corrected to " + str(xcm))
#showImage(regPair(im1, im2, xcm))
raise Exception()
return xcm
def regPair(im1, im2, reg):
if len(im1.shape) > 2:
im1 = im1[...,0]
im2 = im2[...,0]
## prepare blank images
mn = min(im1.min(), im2.min())
mx = max(im1.max(), im2.max())
w = (im1.shape[0]+im2.shape[0])/2 + abs(reg[0]) + 2
h = (im1.shape[1]+im2.shape[1])/2 + abs(reg[1]) + 2
r = np.empty((w, h))
g = np.empty((w, h))
b = np.empty((w, h))
r[...] = mn
g[...] = mn
b[...] = mn
## draw borders
im1 = im1.copy()
im2 = im2.copy()
im1[0,:] = mx
im1[-1,:] = mx
im1[:,0] = mx
im1[:,-1] = mx
im2[0,:] = mx
im2[-1,:] = mx
im2[:,0] = mx
im2[:,-1] = mx
## copy in
i1sx = max(0, -int(reg[0]))
i1sy = max(0, -int(reg[1]))
i2sx = max(0, int(reg[0]))
i2sy = max(0, int(reg[1]))
r[i1sx:i1sx+im1.shape[0], i1sy:i1sy+im1.shape[1]] = im1
g[i2sx:i2sx+im2.shape[0], i2sy:i2sy+im2.shape[1]] = im2
return scipy.concatenate((r[...,np.newaxis], g[...,np.newaxis], b[...,np.newaxis]), axis=2)
def vibratome(data, start, stop, axes=(0,1)):
"""Take a diagonal slice through an array. If the input is N-dimensional, the result is N-1 dimensional.
start and stop are (x,y) tuples that indicate the beginning and end of the slice region.
The spacing of points along the slice is equivalent to the original pixel spacing.
(The data set returned is not guaranteed to hit the stopping point exactly)"""
## transpose data so x and y are the first 2 axes
trAx = list(range(data.ndim))
trAx.remove(axes[0])
trAx.remove(axes[1])
tr1 = tuple(axes) + tuple(trAx)
data = data.transpose(tr1)
## determine proper length of output array and pointwise vectors
length = np.sqrt((stop[0]-start[0])**2 + (stop[1]-start[1])**2)
dx = (stop[0]-start[0]) / length
dy = (stop[1]-start[1]) / length
length = np.ceil(length) ## Extend length to be integer (can't have fractional array dimensions)
nPts = int(length)+1
## Actual position of each point along the slice
x = np.linspace(start[0], start[0]+(length*dx), nPts)
y = np.linspace(start[1], start[1]+(length*dy), nPts)
## Location of original values that will contribute to each point
xi0 = np.floor(x).astype(np.uint)
yi0 = np.floor(y).astype(np.uint)
xi1 = xi0 + 1
yi1 = yi0 + 1
## Find out-of-bound values
ox0 = (xi0 < 0) + (xi0 >= data.shape[0])
oy0 = (yi0 < 0) + (yi0 >= data.shape[1])
ox1 = (xi1 < 0) + (xi1 >= data.shape[0])
oy1 = (yi1 < 0) + (yi1 >= data.shape[1])
## Make sure these locations are in-bounds (just read from 0,0 and then overwrite the values later)
xi0[ox0] = 0
xi1[ox1] = 0
yi0[oy0] = 0
yi1[oy1] = 0
## slices needed to pull values from data set
s00 = [xi0, yi0] + [slice(None)] * (data.ndim-2)
s10 = [xi1, yi0] + [slice(None)] * (data.ndim-2)
s01 = [xi0, yi1] + [slice(None)] * (data.ndim-2)
s11 = [xi1, yi1] + [slice(None)] * (data.ndim-2)
## Actual values from data set
v00 = data[s00]
v10 = data[s10]
v01 = data[s01]
v11 = data[s11]
## Set 0 for all out-of-bound values
v00[ox0+oy0] = 0
v10[ox1+oy0] = 0
v01[ox0+oy1] = 0
v11[ox1+oy1] = 0
## Interpolation coefficients
dx0 = x - xi0
dy0 = y - yi0
dx1 = 1 - dx0
dy1 = 1 - dy0
c00 = dx1 * dy1
c10 = dx0 * dy1
c01 = dx1 * dy0
c11 = dx0 * dy0
## Add un-indexed dimensions into coefficient arrays
c00.shape = c00.shape + (1,)*(data.ndim-2)
c10.shape = c10.shape + (1,)*(data.ndim-2)
c01.shape = c01.shape + (1,)*(data.ndim-2)
c11.shape = c11.shape + (1,)*(data.ndim-2)
## Interpolate!
interpolated = v00*c00 + v10*c10 + v01*c01 + v11*c11
## figure out the reverse transpose order
tr1 = list(tr1)
tr1.pop(1)
tr2 = [None] * len(tr1)
for i in range(len(tr1)):
if tr1[i] > 1:
tr1[i] -= 1
tr2[tr1[i]] = i
tr2 = tuple(tr2)
## Untranspose array before returning
return interpolated.transpose(tr2)
def affineSlice(data, shape, origin, vectors, axes, **kargs):
"""Take an arbitrary slice through an array.
Parameters:
data: the original dataset
shape: the shape of the slice to take (Note the return value may have more dimensions than len(shape))
origin: the location in the original dataset that will become the origin in the sliced data.
vectors: list of unit vectors which point in the direction of the slice axes
each vector must be the same length as axes
If the vectors are not unit length, the result will be scaled.
If the vectors are not orthogonal, the result will be sheared.
axes: the axes in the original dataset which correspond to the slice vectors
interpolate: chice between linear interpolation and nearest-neighbor
Example: start with a 4D data set, take a diagonal-planar slice out of the last 3 axes
- data = array with dims (time, x, y, z) = (100, 40, 40, 40)
- The plane to pull out is perpendicular to the vector (x,y,z) = (1,1,1)
- The origin of the slice will be at (x,y,z) = (40, 0, 0)
- The we will slice a 20x20 plane from each timepoint, giving a final shape (100, 20, 20)
affineSlice(data, shape=(20,20), origin=(40,0,0), vectors=((-1, 1, 0), (-1, 0, 1)), axes=(1,2,3))
Note the following:
len(shape) == len(vectors)
len(origin) == len(axes) == len(vectors[0])
"""
# sanity check
if len(shape) != len(vectors):
raise Exception("shape and vectors must have same length.")
if len(origin) != len(axes):
raise Exception("origin and axes must have same length.")
for v in vectors:
if len(v) != len(axes):
raise Exception("each vector must be same length as axes.")
shape = (np.ceil(shape[0]), np.ceil(shape[1]))
## transpose data so slice axes come first
trAx = list(range(data.ndim))
for x in axes:
trAx.remove(x)
tr1 = tuple(axes) + tuple(trAx)
data = data.transpose(tr1)
#print "tr1:", tr1
## dims are now [(slice axes), (other axes)]
### determine proper length of output array and pointwise vectors
#length = np.sqrt((stop[0]-start[0])**2 + (stop[1]-start[1])**2)
#dx = (stop[0]-start[0]) / length
#dy = (stop[1]-start[1]) / length
#length = np.ceil(length) ## Extend length to be integer (can't have fractional array dimensions)
#nPts = int(length)+1
## Actual position of each point along the slice
#x = np.linspace(start[0], start[0]+(length*dx), nPts)
#y = np.linspace(start[1], start[1]+(length*dy), nPts)
## make sure vectors are arrays
vectors = np.array(vectors)
origin = np.array(origin)
origin.shape = (len(axes),) + (1,)*len(shape)
## Build array of sample locations.
grid = np.mgrid[tuple([slice(0,x) for x in shape])] ## mesh grid of indexes
x = (grid[np.newaxis,...] * vectors.transpose()[(Ellipsis,) + (np.newaxis,)*len(shape)]).sum(axis=1) ## magic
#print x.shape, origin.shape
x += origin
#print "X values:"
#print x
## iterate manually over unused axes since map_coordinates won't do it for us
extraShape = data.shape[len(axes):]
output = np.empty(tuple(shape) + extraShape, dtype=data.dtype)
for inds in np.ndindex(*extraShape):
ind = (Ellipsis,) + inds
output[ind] = scipy.ndimage.map_coordinates(data[ind], x, **kargs)
tr = list(range(output.ndim))
trb = []
for i in range(min(axes)):
ind = tr1.index(i) + (len(shape)-len(axes))
tr.remove(ind)
trb.append(ind)
tr2 = tuple(trb+tr)
#print "tr2", tr2
## Untranspose array before returning
return output.transpose(tr2)
def volumeSum(data, alpha, axis=0, dtype=None):
"""Volumetric summing over one axis."""
#if data.ndim != alpha.ndim:
#raise Exception('data and alpha must have same ndim.')
if dtype is None:
dtype = data.dtype
sh = list(data.shape)
sh.pop(axis)
output = np.zeros(sh, dtype=dtype)
#mask = np.zeros(sh, dtype=dtype)
sl = [slice(None)] * data.ndim
for i in reversed(range(data.shape[axis])):
sl[axis] = i
#p = (1.0 - mask) * alpha[sl]
#output += p*data[sl]
#mask += p
a = alpha[sl]
#print a.min(), a.max()
output *= (1.0-a)
output += a * data[sl]
return output
def slidingOp(template, data, op):
data = data.view(ndarray)
template = template.view(ndarray)
tlen = template.shape[0]
length = data.shape[0] - tlen
result = np.empty((length), dtype=float)
for i in range(0, length):
result[i] = op(template, data[i:i+tlen])
return result
def ratio(a, b):
r1 = a/b
r2 = b/a
return np.where(r1>1.0, r1, r2)
def rmsMatch(template, data, thresh=0.75, scaleInvariant=False, noise=0.0):
## better to use scipy.ndimage.generic_filter ?
if scaleInvariant:
devs = slidingOp(template, data, lambda t,d: (t/d).std())
else:
devs = slidingOp(template, data, lambda t,d: (t-d).std())
tstd = template.std()
blocks = np.argwhere(devs < thresh * tstd)[:, 0]
if len(blocks) == 0:
return []
inds = list(np.argwhere(blocks[1:] - blocks[:-1] > 1)[:,0] + 1) #remove adjacent points
inds.insert(0, 0)
return blocks[inds]
def fastRmsMatch(template, data, thresholds=[0.85, 0.75], scales=[0.2, 1.0], minTempLen=4):
"""Do multiple rounds of rmsMatch on scaled-down versions of the data set"""
data = data.view(ndarray)
template = template.view(ndarray)
tlen = template.shape[0]
inds = None
inds2 = None
lastScale = None
for i in range(0, len(scales)):
## Decide on scale to use for this iteration
t1len = max(minTempLen, int(scales[i]*tlen))
scale = float(t1len)/float(tlen)
## scale down data sets
if scale == 1.0:
t1 = template
data1 = data
else:
t1 = signal.signaltools.resample(template, t1len)
data1 = signal.signaltools.resample(data, int(data.shape[0] * scale))
## find RMS matches
if inds is None:
inds = rmsMatch(t1, data1, thresholds[i])
else:
ix = np.ceil(scale/lastScale)
inds = ((inds*scale) - ix).astype(int)
span = 2*ix + t1len
inds2 = []
for ind in inds:
d = data1[ind:ind+span]
m = rmsMatch(t1, d, thresholds[i])
for n in m:
inds2.append(ind+n)
inds = inds2
lastScale = scale
inds = (array(inds) / scale).round()
return inds.astype(int)
def highPass(data, cutoff, order=1, dt=None):
"""return data passed through high-pass bessel filter"""
return besselFilter(data, cutoff, order, dt, 'high')
def applyFilter(data, b, a, padding=100, bidir=True):
"""Apply a linear filter with coefficients a, b. Optionally pad the data before filtering
and/or run the filter in both directions."""
d1 = data.view(ndarray)
if padding > 0:
d1 = numpy.hstack([d1[:padding], d1, d1[-padding:]])
if bidir:
d1 = scipy.signal.lfilter(b, a, scipy.signal.lfilter(b, a, d1)[::-1])[::-1]
else:
d1 = scipy.signal.lfilter(b, a, d1)
if padding > 0:
d1 = d1[padding:-padding]
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d1, info=data.infoCopy())
else:
return d1
def besselFilter(data, cutoff, order=1, dt=None, btype='low', bidir=True):
"""return data passed through bessel filter"""
if dt is None:
try:
tvals = data.xvals('Time')
dt = (tvals[-1]-tvals[0]) / (len(tvals)-1)
except:
raise Exception('Must specify dt for this data.')
b,a = scipy.signal.bessel(order, cutoff * dt, btype=btype)
return applyFilter(data, b, a, bidir=bidir)
#base = data.mean()
#d1 = scipy.signal.lfilter(b, a, data.view(ndarray)-base) + base
#if (hasattr(data, 'implements') and data.implements('MetaArray')):
#return MetaArray(d1, info=data.infoCopy())
#return d1
def butterworthFilter(data, wPass, wStop=None, gPass=2.0, gStop=20.0, order=1, dt=None, btype='low', bidir=True):
"""return data passed through bessel filter"""
if dt is None:
try:
tvals = data.xvals('Time')
dt = (tvals[-1]-tvals[0]) / (len(tvals)-1)
except:
raise Exception('Must specify dt for this data.')
if wStop is None:
wStop = wPass * 2.0
ord, Wn = scipy.signal.buttord(wPass*dt*2., wStop*dt*2., gPass, gStop)
#print "butterworth ord %f Wn %f c %f sc %f" % (ord, Wn, cutoff, stopCutoff)
b,a = scipy.signal.butter(ord, Wn, btype=btype)
return applyFilter(data, b, a, bidir=bidir)
def lowPass(data, cutoff, order=4, bidir=True, filter='butterworth', stopCutoff=None, gpass=2., gstop=20., samplerate=None, dt=None):
"""Bi-directional bessel/butterworth lowpass filter"""
if dt is None:
try:
tvals = data.xvals('Time')
dt = (tvals[-1]-tvals[0]) / (len(tvals)-1)
except:
raise Exception('Must specify dt for this data.')
if dt is not None:
samplerate = 1.0 / dt
if samplerate is not None:
cutoff /= 0.5*samplerate
if stopCutoff is not None:
stopCutoff /= 0.5*samplerate
if filter == 'bessel':
## How do we compute Wn?
### function determining magnitude transfer of 4th-order bessel filter
#from scipy.optimize import fsolve
#def m(w):
#return 105. / (w**8 + 10*w**6 + 135*w**4 + 1575*w**2 + 11025.)**0.5
#v = fsolve(lambda x: m(x)-limit, 1.0)
#Wn = cutoff / (sampr*v)
b,a = scipy.signal.bessel(order, cutoff, btype='low')
elif filter == 'butterworth':
if stopCutoff is None:
stopCutoff = cutoff * 2.0
ord, Wn = scipy.signal.buttord(cutoff, stopCutoff, gpass, gstop)
#print "butterworth ord %f Wn %f c %f sc %f" % (ord, Wn, cutoff, stopCutoff)
b,a = scipy.signal.butter(ord, Wn, btype='low')
else:
raise Exception('Unknown filter type "%s"' % filter)
return applyFilter(data, b, a, bidir=bidir)
def bandPass(data, low, high, lowOrder=1, highOrder=1, dt=None):
"""return data passed through low-pass bessel filter"""
if dt is None:
try:
tvals = data.xvals('Time')
dt = (tvals[-1]-tvals[0]) / (len(tvals)-1)
except:
raise Exception('Must specify dt for this data.')
return lowPass(highPass(data, low, lowOrder, dt), high, highOrder, dt)
def gaussDivide(data, sigma):
return data.astype(np.float32) / scipy.ndimage.gaussian_filter(data, sigma=sigma)
def meanDivide(data, axis, inplace=False):
if not inplace:
d = np.empty(data.shape, dtype=np.float32)
ind = [slice(None)] * data.ndim
for i in range(0, data.shape[axis]):
ind[axis] = i
if inplace:
data[tuple(ind)] /= data[tuple(ind)].mean()
else:
d[tuple(ind)] = data[tuple(ind)].astype(np.float32) / data[tuple(ind)].mean()
if not inplace:
return d
def medianDivide(data, axis, inplace=False):
if not inplace:
d = np.empty(data.shape, dtype=np.float32)
ind = [slice(None)] * data.ndim
for i in range(0, data.shape[axis]):
ind[axis] = i
if inplace:
data[tuple(ind)] /= data[tuple(ind)].median()
else:
d[tuple(ind)] = data[tuple(ind)].astype(np.float32) / data[tuple(ind)].mean()
if not inplace:
return d
def blur(data, sigma):
return scipy.ndimage.gaussian_filter(data, sigma=sigma)
def findTriggers(data, spacing=None, highpass=True, devs=1.5):
if highpass:
d1 = data - scipy.ndimage.median_filter(data, size=spacing)
else:
d1 = data
stdev = d1.std() * devs
ptrigs = (d1[1:] > stdev*devs) * (d1[:-1] <= stdev)
ntrigs = (d1[1:] < -stdev*devs) * (d1[:-1] >= -stdev)
return (np.argwhere(ptrigs)[:, 0], np.argwhere(ntrigs)[:, 0])
def triggerStack(data, triggers, axis=0, window=None):
"""Stacks windows from a waveform from trigger locations.
Useful for making spike-triggered measurements"""
if window is None:
dt = (triggers[1:] - triggers[:-1]).mean()
window = [int(-0.5 * dt), int(0.5 * dt)]
shape = list(data.shape)
shape[axis] = window[1] - window[0]
total = np.zeros((len(triggers),) + tuple(shape), dtype=data.dtype)
readIndex = [slice(None)] * data.ndim
writeIndex = [0] + ([slice(None)] * data.ndim)
for i in triggers:
rstart = i+window[0]
rend = i+window[1]
wstart = 0
wend = shape[axis]
if rend < 0 or rstart > data.shape[axis]:
continue
if rstart < 0:
wstart = -rstart
rstart = 0
if rend > data.shape[axis]:
wend = data.shape[axis] - rstart
rend = data.shape[axis]
readIndex[axis] = slice(rstart, rend)
writeIndex[axis+1] = slice(wstart, wend)
total[tuple(writeIndex)] += data[tuple(readIndex)]
writeIndex[0] += 1
return total
def generateSphere(radius):
radius2 = radius**2
w = int(radius*2 + 1)
d = np.empty((w, w), dtype=np.float32)
for x in range(0, w):
for y in range(0, w):
r2 = (x-radius)**2+(y-radius)**2
if r2 > radius2:
d[x,y] = 0.0
else:
d[x,y] = sqrt(radius2 - r2)
return d
def make3Color(r=None, g=None, b=None):
i = r
if i is None:
i = g
if i is None:
i = b
img = np.zeros(i.shape + (3,), dtype=i.dtype)
if r is not None:
img[..., 2] = r
if g is not None:
img[..., 1] = g
if b is not None:
img[..., 0] = b
return img
def imgDeconvolve(data, div):
## pad data past the end with the minimum value for each pixel
data1 = np.empty((data.shape[0]+len(div),) + data.shape[1:])
data1[:data.shape[0]] = data
dmin = data.min(axis=0)
dmin.shape = (1,) + dmin.shape
data1[data.shape[0]:] = dmin
## determine shape of deconvolved image
dec = deconvolve(data1[:, 0, 0], div)
shape1 = (dec[0].shape[0], data.shape[1], data.shape[2])
shape2 = (dec[1].shape[0], data.shape[1], data.shape[2])
dec1 = np.empty(shape1)
dec2 = np.empty(shape2)
## deconvolve
for i in range(0, shape1[1]):
for j in range(0, shape1[2]):
dec = deconvolve(data1[:,i,j], div)
dec1[:,i,j] = dec[0]
dec2[:,i,j] = dec[1]
return (dec1, dec2)
def xColumn(data, col):
"""Take a column out of a 2-D MetaArray and turn it into the axis values for axis 1. (Used for correcting older rtxi files)"""
yCols = list(range(0, data.shape[0]))
yCols.remove(col)
b = data[yCols].copy()
b._info[1] = data.infoCopy()[0]['cols'][col]
b._info[1]['values'] = data[col].view(ndarray)
return b
def stdFilter(data, kernShape):
shape = data.shape
if len(kernShape) != data.ndim:
raise Exception("Kernel shape must have length = data.ndim")
res = np.empty(tuple(shape), dtype=float)
for ind, i in np.ndenumerate(res):
sl = [slice(max(0, ind[j]-kernShape[j]/2), min(shape[j], ind[j]+(kernShape[j]/2))) for j in range(0, data.ndim)]
res[tuple(ind)] = std(data[tuple(sl)])
return res
def makeDispMap(im1, im2, maxDist=10, searchRange=None, normBlur=5.0, matchSize=10., printProgress=False, showProgress=False, method="diffNoise"):
"""Generate a displacement map that can be used to distort one image to match another.
Return a tuple of two images (displacement, goodness).
maxDist is the maximum distance to search in any direction for matches.
Alternatively, searchRange can be specified [[minX, maxX], [minY, maxY]] to set the exact locations to be searched.
normBlur is the amount of blur to apply when normalizing the image to identify well-matched regions. May need to be tweaked to improve performance.
matchSize is the amount of blur to apply when smoothing out the displacement map--it should be roughly equivalent to the size of the well-matched region at any displacement. May need to be tweaked to improve performance.
Recommended approach for matching two images:
dm = makeDispMap(im1, im2)
dmb = scipy.ndimage.gaussian_filter(dm, (20, 20))
im1dist = scipy.ndimage.geometric_transform(im1, lambda x: (x[0]-dmb[x[0]], x[1]-dmb[x[1]]))
(See also: matchDistortImg)
"""
im1 = im1.astype(np.float32)
im2 = im2.astype(np.float32)
if searchRange is None:
searchRange = [[-maxDist, maxDist+1], [-maxDist, maxDist+1]]
bestMatch = np.empty(im2.shape, dtype=float)
bmSet = False
matchOffset = np.zeros(im2.shape + (2,), dtype=int)
if showProgress:
imw1 = showImg(np.zeros(im2.shape), title="errMap")
imw2 = showImg(np.zeros(im2.shape), title="matchOffset")
imw3 = showImg(np.zeros(im2.shape), title="goodness")
for i in range(searchRange[0][0], searchRange[0][1]):
for j in range(searchRange[1][0], searchRange[1][1]):
# determine im1 and im2 slices
# (im1 slides over im2)
s1 = [max(0, -i), min(im1.shape[0], im2.shape[0]-i), max(0, -j), min(im1.shape[1], im2.shape[1]-j)]
s2 = [max(0, i), min(im2.shape[0], im1.shape[0]+i), max(0, j), min(im2.shape[1], im1.shape[1]+j)]
rgn1 = im1[s1[0]:s1[1], s1[2]:s1[3]]
rgn2 = im2[s2[0]:s2[1], s2[2]:s2[3]]
#print s1, s2
if method == 'diffNoise':
# compute the difference between im1 region and im2 region
diff = (rgn1 - rgn2)
# measure how well the images match
errMap = blur(abs(diff - blur(diff, (normBlur,normBlur))), (matchSize/2, matchSize/2))
elif method == 'diff':
errMap = abs(rgn1-rgn2)
if not bmSet:
bestMatch[...] = errMap.max()*5.
bmSet = True
# get bestMatch slice
bmRgn = bestMatch[s2[0]:s2[1], s2[2]:s2[3]]
# compare std map to bestMatch
stdCmp = errMap < bmRgn
# Set new values in bestMatch
bestMatch[s2[0]:s2[1], s2[2]:s2[3]] = np.where(stdCmp, errMap, bmRgn)
# set matchOffset to i,j wherever std is lower than previously seen
stdCmpInds = np.argwhere(stdCmp) + np.array([[s2[0],s2[2]]])
matchOffset[stdCmpInds[:,0], stdCmpInds[:,1]] = np.array([i,j])
#v = array([i,j])
#for ind in stdCmpInds:
#matchOffset[tuple(ind)] = v
if printProgress:
print("Displacement %d, %d: %d matches" % (i,j, len(stdCmpInds)))
if showProgress:
imw1.updateImage(errMap, autoRange=True)
imw3.updateImage(bestMatch, autoRange=True)
imw2.updateImage(make3Color(r=matchOffset[...,0], g=matchOffset[...,1]), autoRange=True)
qapp.processEvents()
if showProgress:
imw1.hide()
imw2.hide()
imw3.hide()
return (matchOffset, bestMatch)
def matchDistortImg(im1, im2, scale=4, maxDist=40, mapBlur=30, showProgress=False):
"""Distort im2 to fit optimally over im1. Searches scaled-down images first to determine range"""
## Determine scale and offset factors needed to match histograms
for i in range(3):
im1 -= im1.mean()
im2 -= im2.mean()
im1 /= im1.std()
im2 /= im2.std()
imws = []
if showProgress:
imws.append(showImg(im1, title="Original image 1"))
imws.append(showImg(im2, title="Original image 2"))
## Scale down image to quickly find a rough displacement map
print("Scaling images down for fast displacement search")
#im1s = downsamplend(im1, (scale,scale))
#im2s = downsamplend(im2, (scale,scale))
im1s = downsample(downsample(im1, scale), scale)
imss = downsample(downsample(im2, scale), scale)
(dispMap, goodMap) = makeDispMap(im1s, im2s, maxDist=maxDist/scale, normBlur=5.0, matchSize=10., showProgress=showProgress)
#showImg(make3Color(r=dispMap[..., 0], g=dispMap[..., 1], b=goodMap), title="Rough displacement map")
border = 20
## clean up border of displacement map
#for i in range(border-1,-1,-1):
#dispMap[i] = dispMap[i+1]
#dispMap[-i] = dispMap[-i-1]
#dispMap[:,i] = dispMap[:,i+1]
#dispMap[:,-i] = dispMap[:,-i-1]
#showImg(make3Color(r=dispMap[..., 0], g=dispMap[..., 1], b=goodMap), title="Rough displacement map")
## Determine range of displacements to search, exclude border pixels
## TODO: this should exclude regions of the image which obviously do not match, rather than just chopping out the borders.
dmCrop = dispMap[border:-border, border:-border]
search = [
[scale*(dmCrop[...,0].min()-1), scale*(dmCrop[...,0].max()+1)],
[scale*(dmCrop[...,1].min()-1), scale*(dmCrop[...,1].max()+1)]
]
print("Finished initial search; displacement range is", search)
## Generate full-size displacement map
(dispMap2, goodMap2) = makeDispMap(im1, im2, searchRange=search, normBlur=2*scale, matchSize=5.*scale, showProgress=showProgress)
if showProgress:
imws.append(showImg(make3Color(r=dispMap2[..., 0], g=dispMap2[..., 1], b=goodMap2), title="Full displacement map"))
## blur the map to make continuous
dm2Blur = blur(dispMap2.astype(np.float32), (mapBlur, mapBlur, 0))
if showProgress:
imws.append(showImg(dm2Blur, title="blurred full disp map"))
## Generate matched images
print("Distorting image to match..")
im2d = geometric_transform(im2, lambda x: (x[0]+(dm2Blur[x[0], x[1], 0]), x[1]+(dm2Blur[x[0], x[1], 1])))
if showProgress:
for w in imws:
w.hide()
return im2d
def threshold(data, threshold, direction=1):
"""Return all indices where data crosses threshold."""
mask = data >= threshold
mask = mask[1:].astype(np.byte) - mask[:-1].astype(np.byte)
return np.argwhere(mask == direction)[:, 0]
def measureBaseline(data, threshold=2.0, iterations=2):
"""Find the baseline value of a signal by iteratively measuring the median value, then excluding outliers."""
data = data.view(ndarray)
med = np.median(data)
if iterations > 1:
std = data.std()
thresh = std * threshold
arr = numpy.ma.masked_outside(data, med - thresh, med + thresh)
if len(arr) == 0:
raise Exception("Masked out all data. min: %f, max: %f, std: %f" % (med - thresh, med + thresh, std))
return measureBaseline(arr[~arr.mask], threshold, iterations-1)
else:
return med
def measureNoise(data, threshold=2.0, iterations=2):
## Determine the base level of noise
data = data.view(ndarray)
if iterations > 1:
med = median(data)
std = data.std()
thresh = std * threshold
arr = numpy.ma.masked_outside(data, med - thresh, med + thresh)
return measureNoise(arr[~arr.mask], threshold, iterations-1)
else:
return data.std()
#data2 = data.view(ndarray)[:10*(len(data)/10)]
#data2.shape = (10, len(data2)/10)
#return median(data2.std(axis=0))
def stdevThresholdEvents(data, threshold=3.0):
"""Finds regions in data greater than threshold*stdev.
Returns a record array with columns: index, length, sum, peak.
This function is only useful for data with its baseline removed."""
stdev = data.std()
mask = (abs(data) > stdev * threshold).astype(np.byte)
starts = np.argwhere((mask[1:] - mask[:-1]) == 1)[:,0]
ends = np.argwhere((mask[1:] - mask[:-1]) == -1)[:,0]
if len(ends) > 0 and len(starts) > 0:
if ends[0] < starts[0]:
ends = ends[1:]
if starts[-1] > ends[-1]:
starts = starts[:-1]
lengths = ends-starts
events = np.empty(starts.shape, dtype=[('start',int), ('len',int), ('sum',float), ('peak',float)])
events['start'] = starts
events['len'] = lengths
if len(starts) == 0 or len(ends) == 0:
return events
for i in range(len(starts)):
d = data[starts[i]:ends[i]]
events['sum'][i] = d.sum()
if events['sum'][i] > 0:
peak = d.max()
else:
peak = d.min()
events['peak'][i] = peak
return events
def findEvents(*args, **kargs):
return zeroCrossingEvents(*args, **kargs)
def zeroCrossingEvents(data, minLength=3, minPeak=0.0, minSum=0.0, noiseThreshold=None):
"""Locate events of any shape in a signal. Works by finding regions of the signal
that deviate from noise, using the area beneath the deviation as the detection criteria.
Makes the following assumptions about the signal:
- noise is gaussian
- baseline is centered at 0 (high-pass filtering may be required to achieve this).
- no 0 crossings within an event due to noise (low-pass filtering may be required to achieve this)
- Events last more than minLength samples
Return an array of events where each row is (start, length, sum, peak)
"""
## just make sure this is an ndarray and not a MetaArray before operating..
#p = Profiler('findEvents')
data1 = data.view(ndarray)
#p.mark('view')
xvals = None
if (hasattr(data, 'implements') and data.implements('MetaArray')):
try:
xvals = data.xvals(0)
except:
pass
## find all 0 crossings
mask = data1 > 0
diff = mask[1:] - mask[:-1] ## mask is True every time the trace crosses 0 between i and i+1
times1 = np.argwhere(diff)[:, 0] ## index of each point immediately before crossing.
times = np.empty(len(times1)+2, dtype=times1.dtype) ## add first/last indexes to list of crossing times
times[0] = 0 ## this is a bit suspicious, but we'd rather know
times[-1] = len(data1) ## about large events at the beginning/end
times[1:-1] = times1 ## rather than ignore them.
#p.mark('find crossings')
## select only events longer than minLength.
## We do this check early for performance--it eliminates the vast majority of events
longEvents = np.argwhere(times[1:] - times[:-1] > minLength)
if len(longEvents) < 1:
nEvents = 0
else:
longEvents = longEvents[:, 0]
nEvents = len(longEvents)
## Measure sum of values within each region between crossings, combine into single array
if xvals is None:
events = np.empty(nEvents, dtype=[('index',int),('len', int),('sum', float),('peak', float)]) ### rows are [start, length, sum]
else:
events = np.empty(nEvents, dtype=[('index',int),('time',float),('len', int),('sum', float),('peak', float)]) ### rows are [start, length, sum]
#p.mark('empty %d -> %d'% (len(times), nEvents))
#n = 0
for i in range(nEvents):
t1 = times[longEvents[i]]+1
t2 = times[longEvents[i]+1]+1
events[i]['index'] = t1
events[i]['len'] = t2-t1
evData = data1[t1:t2]
events[i]['sum'] = evData.sum()
if events[i]['sum'] > 0:
peak = evData.max()
else:
peak = evData.min()
events[i]['peak'] = peak
#p.mark('generate event array')
if xvals is not None:
events['time'] = xvals[events['index']]
if noiseThreshold > 0:
## Fit gaussian to peak in size histogram, use fit sigma as criteria for noise rejection
stdev = measureNoise(data1)
#p.mark('measureNoise')
hist = histogram(events['sum'], bins=100)
#p.mark('histogram')
histx = 0.5*(hist[1][1:] + hist[1][:-1]) ## get x values from middle of histogram bins
#p.mark('histx')
fit = fitGaussian(histx, hist[0], [hist[0].max(), 0, stdev*3, 0])
#p.mark('fit')
sigma = fit[0][2]
minSize = sigma * noiseThreshold
## Generate new set of events, ignoring those with sum < minSize
#mask = abs(events['sum'] / events['len']) >= minSize
mask = abs(events['sum']) >= minSize
#p.mark('mask')
events = events[mask]
#p.mark('select')
if minPeak > 0:
events = events[abs(events['peak']) > minPeak]
if minSum > 0:
events = events[abs(events['sum']) > minSum]
return events
def thresholdEvents(data, threshold, adjustTimes=True, baseline=0.0):
"""Finds regions in a trace that cross a threshold value (as measured by distance from baseline). Returns the index, time, length, peak, and sum of each event.
Optionally adjusts times to an extrapolated baseline-crossing."""
threshold = abs(threshold)
data1 = data.view(ndarray)
data1 = data1-baseline
#if (hasattr(data, 'implements') and data.implements('MetaArray')):
try:
xvals = data.xvals(0)
dt = xvals[1]-xvals[0]
except:
dt = 1
xvals = None
## find all threshold crossings
masks = [(data1 > threshold).astype(np.byte), (data1 < -threshold).astype(np.byte)]
hits = []
for mask in masks:
diff = mask[1:] - mask[:-1]
onTimes = np.argwhere(diff==1)[:,0]+1
offTimes = np.argwhere(diff==-1)[:,0]+1
#print mask
#print diff
#print onTimes, offTimes
if len(onTimes) == 0 or len(offTimes) == 0:
continue
if offTimes[0] < onTimes[0]:
offTimes = offTimes[1:]
if len(offTimes) == 0:
continue
if offTimes[-1] < onTimes[-1]:
onTimes = onTimes[:-1]
for i in range(len(onTimes)):
hits.append((onTimes[i], offTimes[i]))
## sort hits ## NOTE: this can be sped up since we already know how to interleave the events..
hits.sort(lambda a,b: cmp(a[0], b[0]))
nEvents = len(hits)
if xvals is None:
events = np.empty(nEvents, dtype=[('index',int),('len', int),('sum', float),('peak', float),('peakIndex', int)]) ### rows are [start, length, sum]
else:
events = np.empty(nEvents, dtype=[('index',int),('time',float),('len', int),('sum', float),('peak', float),('peakIndex', int)]) ### rows are
mask = np.ones(nEvents, dtype=bool)
## Lots of work ahead:
## 1) compute length, peak, sum for each event
## 2) adjust event times if requested, then recompute parameters
for i in range(nEvents):
t1, t2 = hits[i]
ln = t2-t1
evData = data1[t1:t2]
sum = evData.sum()
if sum > 0:
#peak = evData.max()
#ind = argwhere(evData==peak)[0][0]+t1
peakInd = np.argmax(evData)
else:
#peak = evData.min()
#ind = argwhere(evData==peak)[0][0]+t1
peakInd = np.argmin(evData)
peak = evData[peakInd]
peakInd += t1
#print "event %f: %d" % (xvals[t1], t1)
if adjustTimes: ## Move start and end times outward, estimating the zero-crossing point for the event
## adjust t1 first
mind = np.argmax(evData)
pdiff = abs(peak - evData[0])
if pdiff == 0:
adj1 = 0
else:
adj1 = int(threshold * mind / pdiff)
adj1 = min(ln, adj1)
t1 -= adj1
#print " adjust t1", adj1
## check for collisions with previous events
if i > 0:
#lt2 = events[i-1]['index'] + events[i-1]['len']
lt2 = hits[i-1][1]
if t1 < lt2:
diff = lt2-t1 ## if events have collided, force them to compromise
tot = adj1 + lastAdj
if tot != 0:
d1 = diff * float(lastAdj) / tot
d2 = diff * float(adj1) / tot
#events[i-1]['len'] -= (d1+1)
hits[i-1] = (hits[i-1][0], hits[i-1][1]-(d1+1))
t1 += d2
#recompute[i-1] = True
#print " correct t1", d2, " correct prev.", d1+1
#try:
#print " correct t1", d2, " correct prev.", d1+1
#except:
#pass
## adjust t2
mind = ln - mind
pdiff = abs(peak - evData[-1])
if pdiff == 0:
adj2 = 0
else:
adj2 = int(threshold * mind / pdiff)
adj2 = min(ln, adj2)
t2 += adj2
lastAdj = adj2
#print " adjust t2", adj2
#recompute[i] = True
#starts.append(t1)
#stops.append(t2)
hits[i] = (t1, t2)
events[i]['peak'] = peak
#if index == 'peak':
#events[i]['index']=ind
#else:
events[i]['index'] = t1
events[i]['peakIndex'] = peakInd
events[i]['len'] = ln
events[i]['sum'] = sum
if adjustTimes: ## go back and re-compute event parameters.
for i in range(nEvents):
t1, t2 = hits[i]
ln = t2-t1
evData = data1[int(t1):int(t2)]
sum = evData.sum()
if len(evData) == 0:
mask[i] = False
continue
if sum > 0:
#peak = evData.max()
#ind = argwhere(evData==peak)[0][0]+t1
peakInd = np.argmax(evData)
else:
#peak = evData.min()
#ind = argwhere(evData==peak)[0][0]+t1
peakInd = np.argmin(evData)
peak = evData[peakInd]
peakInd += t1
events[i]['peak'] = peak
#if index == 'peak':
#events[i]['index']=ind
#else:
events[i]['index'] = t1
events[i]['peakIndex'] = peakInd
events[i]['len'] = ln
events[i]['sum'] = sum
## remove masked events
events = events[mask]
if xvals is not None:
events['time'] = xvals[events['index']]
#for i in range(len(events)):
#print events[i]['time'], events[i]['peak']
return events
def adaptiveDetrend(data, x=None, threshold=3.0):
"""Return the signal with baseline removed. Discards outliers from baseline measurement."""
if x is None:
x = data.xvals(0)
d = data.view(ndarray)
d2 = scipy.signal.detrend(d)
stdev = d2.std()
mask = abs(d2) < stdev*threshold
#d3 = where(mask, 0, d2)
#d4 = d2 - lowPass(d3, cutoffs[1], dt=dt)
lr = stats.linregress(x[mask], d[mask])
base = lr[1] + lr[0]*x
d4 = d - base
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d4, info=data.infoCopy())
return d4
def mode(data, bins=None):
"""Returns location max value from histogram."""
if bins is None:
bins = int(len(data)/10.)
if bins < 2:
bins = 2
y, x = np.histogram(data, bins=bins)
ind = np.argmax(y)
mode = 0.5 * (x[ind] + x[ind+1])
return mode
def modeFilter(data, window=500, step=None, bins=None):
"""Filter based on histogram-based mode function"""
d1 = data.view(np.ndarray)
vals = []
l2 = int(window/2.)
if step is None:
step = l2
i = 0
while True:
if i > len(data)-step:
break
vals.append(mode(d1[i:i+window], bins))
i += step
chunks = [np.linspace(vals[0], vals[0], l2)]
for i in range(len(vals)-1):
chunks.append(np.linspace(vals[i], vals[i+1], step))
remain = len(data) - step*(len(vals)-1) - l2
chunks.append(np.linspace(vals[-1], vals[-1], remain))
d2 = np.hstack(chunks)
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d2, info=data.infoCopy())
return d2
def histogramDetrend(data, window=500, bins=50, threshold=3.0):
"""Linear detrend. Works by finding the most common value at the beginning and end of a trace, excluding outliers."""
d1 = data.view(np.ndarray)
d2 = [d1[:window], d1[-window:]]
v = [0, 0]
for i in [0, 1]:
d3 = d2[i]
stdev = d3.std()
mask = abs(d3-np.median(d3)) < stdev*threshold
d4 = d3[mask]
y, x = np.histogram(d4, bins=bins)
ind = np.argmax(y)
v[i] = 0.5 * (x[ind] + x[ind+1])
base = np.linspace(v[0], v[1], len(data))
d3 = data.view(np.ndarray) - base
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d3, info=data.infoCopy())
return d3
def subtractMedian(data, time=None, width=100, dt=None):
"""Subtract rolling median from signal.
Arguments:
width: the width of the filter window in samples
time: the width of the filter window in x value
if specified, then width is ignored.
dt: the conversion factor for time -> width
"""
if time is not None:
if dt is None:
x = data.xvals(0)
dt = x[1] - x[0]
width = time / dt
d1 = data.view(ndarray)
width = int(width)
med = scipy.ndimage.median_filter(d1, size=width)
d2 = d1 - med
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d2, info=data.infoCopy())
return d2
#def removeBaseline(data, windows=[500, 100], threshold=4.0):
## very slow method using median_filter:
#d1 = data.view(ndarray)
#d2 = d1 - median_filter(d1, windows[0])
#stdev = d2.std()
#d3 = where(abs(d2) > stdev*threshold, 0, d2)
#d4 = d2 - median_filter(d3, windows[1])
#if (hasattr(data, 'implements') and data.implements('MetaArray')):
#return MetaArray(d4, info=data.infoCopy())
#return d4
def clusterSignals(data, num=5):
pass
def denoise(data, radius=2, threshold=4):
"""Very simple noise removal function. Compares a point to surrounding points,
replaces with nearby values if the difference is too large."""
r2 = radius * 2
d1 = data.view(ndarray)
d2 = data[radius:] - data[:-radius] #a derivative
#d3 = data[r2:] - data[:-r2]
#d4 = d2 - d3
stdev = d2.std()
#print "denoise: stdev of derivative:", stdev
mask1 = d2 > stdev*threshold #where derivative is large and positive
mask2 = d2 < -stdev*threshold #where derivative is large and negative
maskpos = mask1[:-radius] * mask2[radius:] #both need to be true
maskneg = mask1[radius:] * mask2[:-radius]
mask = maskpos + maskneg
d5 = np.where(mask, d1[:-r2], d1[radius:-radius]) #where both are true replace the value with the value from 2 points before
d6 = np.empty(d1.shape, dtype=d1.dtype) #add points back to the ends
d6[radius:-radius] = d5
d6[:radius] = d1[:radius]
d6[-radius:] = d1[-radius:]
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d6, info=data.infoCopy())
return d6
def rollingSum(data, n):
d1 = data.copy()
d1[1:] += d1[:-1] # integrate
d2 = np.empty(len(d1) - n + 1, dtype=data.dtype)
d2[0] = d1[n-1] # copy first point
d2[1:] = d1[n:] - d1[:-n] # subtract
return d2
def clementsBekkers(data, template):
"""Implements Clements-bekkers algorithm: slides template across data,
returns array of points indicating goodness of fit.
Biophysical Journal, 73: 220-229, 1997.
"""
## Strip out meta-data for faster computation
D = data.view(ndarray)
T = template.view(ndarray)
## Prepare a bunch of arrays we'll need later
N = len(T)
sumT = T.sum()
sumT2 = (T**2).sum()
sumD = rollingSum(D, N)
sumD2 = rollingSum(D**2, N)
sumTD = correlate(D, T, mode='valid')
## compute scale factor, offset at each location:
scale = (sumTD - sumT * sumD /N) / (sumT2 - sumT**2 /N)
offset = (sumD - scale * sumT) /N
## compute SSE at every location
SSE = sumD2 + scale**2 * sumT2 + N * offset**2 - 2 * (scale*sumTD + offset*sumD - scale*offset*sumT)
## finally, compute error and detection criterion
error = sqrt(SSE / (N-1))
DC = scale / error
return DC, scale, offset
def cbTemplateMatch(data, template, threshold=3.0):
dc, scale, offset = clementsBekkers(data, template)
mask = dc > threshold
diff = mask[1:] - mask[:-1]
times = np.argwhere(diff==1)[:, 0] ## every time we start OR stop a spike
## in the unlikely event that the very first or last point is matched, remove it
if abs(dc[0]) > threshold:
times = times[1:]
if abs(dc[-1]) > threshold:
times = times[:-1]
nEvents = len(times) / 2
result = np.empty(nEvents, dtype=[('peak', int), ('dc', float), ('scale', float), ('offset', float)])
for i in range(nEvents):
i1 = times[i*2]
i2 = times[(i*2)+1]
d = dc[i1:i2]
p = argmax(d)
result[0] = p+i1
result[1] = d[p]
result[2] = scale[p+i1]
result[3] = offset[p+i1]
return result
def expTemplate(dt, rise, decay, delay=None, length=None, risePow=2.0):
"""Create PSP template with sample period dt.
rise and decay are the exponential time constants
delay is the amount of time before the PSP starts (defaults to rise+decay)
length is the amount of time after the PSP starts (defaults to 5 * (rise+decay))
"""
if delay is None:
delay = rise+decay
if length is None:
length = (rise+decay) * 5
nPts = int(length / dt)
start = int(delay / dt)
temp = np.empty(nPts)
times = np.arange(0.0, dt*(nPts-start), dt)
temp[:start] = 0.0
temp[start:] = (1.0 - np.exp(-times/rise))**risePow * np.exp(-times/decay)
temp /= temp.max()
return temp
def tauiness(data, win, step=10):
ivals = list(range(0, len(data)-win-1, int(win/step)))
xvals = data.xvals(0)
result = np.empty((len(ivals), 4), dtype=float)
for i in range(len(ivals)):
j = ivals[i]
v = fitExpDecay(np.arange(win), data.asarray()[j:j+win], measureError=True)
result[i] = np.array(list(v[0]) + [v[3]])
#result[i][0] = xvals[j]
#result[i][1] = j
result = MetaArray(result, info=[
{'name': 'Time', 'values': xvals[ivals]},
{'name': 'Parameter', 'cols': [{'name': 'Amplitude'}, {'name': 'Tau'}, {'name': 'Offset'}, {'name': 'Error'}]}
])
return result
def expDeconvolve(data, tau):
dt = 1
if (hasattr(data, 'implements') and data.implements('MetaArray')):
dt = data.xvals(0)[1] - data.xvals(0)[0]
arr = data.view(np.ndarray)
d = arr[:-1] + (tau / dt) * (arr[1:] - arr[:-1])
if (hasattr(data, 'implements') and data.implements('MetaArray')):
info = data.infoCopy()
if 'values' in info[0]:
info[0]['values'] = info[0]['values'][:-1]
info[-1]['expDeconvolveTau'] = tau
return MetaArray(d, info=info)
else:
return d
def expReconvolve(data, tau=None, dt=None):
if (hasattr(data, 'implements') and data.implements('MetaArray')):
if dt is None:
dt = data.xvals(0)[1] - data.xvals(0)[0]
if tau is None:
tau = data._info[-1].get('expDeconvolveTau', None)
if dt is None:
dt = 1
if tau is None:
raise Exception("Must specify tau.")
# x(k+1) = x(k) + dt * (f(k) - x(k)) / tau
# OR: x[k+1] = (1-dt/tau) * x[k] + dt/tau * x[k]
#print tau, dt
d = np.zeros(data.shape, data.dtype)
dtt = dt / tau
dtti = 1. - dtt
for i in range(1, len(d)):
d[i] = dtti * d[i-1] + dtt * data[i-1]
if (hasattr(data, 'implements') and data.implements('MetaArray')):
info = data.infoCopy()
#if 'values' in info[0]:
#info[0]['values'] = info[0]['values'][:-1]
#info[-1]['expDeconvolveTau'] = tau
return MetaArray(d, info=info)
else:
return d
def concatenateColumns(data):
"""Returns a single record array with columns taken from the elements in data.
data should be a list of elements, which can be either record arrays or tuples (name, type, data)
"""
## first determine dtype
dtype = []
names = set()
maxLen = 0
for element in data:
if isinstance(element, np.ndarray):
## use existing columns
for i in range(len(element.dtype)):
name = element.dtype.names[i]
dtype.append((name, element.dtype[i]))
maxLen = max(maxLen, len(element))
else:
name, type, d = element
if type is None:
type = suggestDType(d)
dtype.append((name, type))
if isinstance(d, list) or isinstance(d, np.ndarray):
maxLen = max(maxLen, len(d))
if name in names:
raise Exception('Name "%s" repeated' % name)
names.add(name)
## create empty array
out = np.empty(maxLen, dtype)
## fill columns
for element in data:
if isinstance(element, np.ndarray):
for i in range(len(element.dtype)):
name = element.dtype.names[i]
try:
out[name] = element[name]
except:
print("Column:", name)
print("Input shape:", element.shape, element.dtype)
print("Output shape:", out.shape, out.dtype)
raise
else:
name, type, d = element
out[name] = d
return out
def suggestDType(x, singleValue=False):
"""Return a suitable dtype for x
If singleValue is True, then a sequence will be interpreted as dtype=object
rather than looking inside the sequence to determine its type.
"""
if not singleValue and isinstance(x, list) or isinstance(x, tuple):
if len(x) == 0:
raise Exception('can not determine dtype for empty list')
x = x[0]
if hasattr(x, 'dtype'):
return x.dtype
elif isinstance(x, float):
return float
elif isinstance(x, int) or isinstance(x, long):
return int
#elif isinstance(x, six.string_types): ## don't try to guess correct string length; use object instead.
#return '<U%d' % len(x)
else:
return object
def suggestRecordDType(x, singleRecord=False):
"""Given a dict of values, suggest a record array dtype to use
If singleRecord is True, then x is interpreted as a single record
rather than a dict-of-lists structure. This can resolve some ambiguities
when a single cell contains a sequence as its value.
"""
dt = []
for k, v in x.items():
dt.append((k, suggestDType(v, singleValue=singleRecord)))
return dt
def isFloat(x):
return isinstance(x, float) or isinstance(x, np.floating)
def isInt(x):
for typ in [int, long, np.integer]:
if isinstance(x, typ):
return True
return False
#return isinstance(x, int) or isinstance(x, np.integer)
def find(data, val, op='==', arrayOp='all', axis=0, useWeave=True):
operands = {'==': 'eq', '!=': 'ne', '<': 'lt', '>': 'gt', '<=': 'le', '>=': 'ge'}
if op not in operands:
raise Exception("Operand '%s' is not supported. Options are: %s" % (str(op), str(list(operands.keys()))))
## fallback for when weave is not available
if not useWeave:
axes = list(range(data.ndim))
axes.remove(axis)
axes = [axis] + axes
d2 = data.transpose(axes)
op = '__'+operands[op]+'__'
for i in range(d2.shape[0]):
d3 = d2[i]
test = getattr(d3, op)
if getattr(test, arrayOp)():
return i
return None
## simple scalar test
if data.ndim == 1:
template = """
if (op == "%s") {
for (int i=0; i<data_array->dimensions[0]; i++) {
if (data[i] %s val) {
return_val = i;
break;
}
}
}
"""
code = "return_val = -1;\n"
for op1 in operands:
code += template % (op1, op1)
#ret = weave.inline(code, ['data', 'val', 'op'], type_converters=converters.blitz, compiler = 'gcc')
ret = weave.inline(code, ['data', 'val', 'op'], compiler = 'gcc')
if ret == -1:
ret = None
return ret
## broadcasting test
else:
template = """
if (op == "%s") {
for (int i=0; i<data_array->dimensions[0]; i++) {
PyArrayObject* d2 = // PyArray_TakeFrom(data_array, PyInt_FromLong(i), 0, NULL, NPY_CLIP);
PyObject *itr;
itr = PyArray_MultiIterNew(2, d2, val);
int fail = 0;
while(PyArray_MultiIter_NOTDONE(itr)) {
if (PyArray_MultiIter_DATA(itr, 0) %s PyArray_MultiIter_DATA(itr, 1)) {
fail = 1;
break;
}
PyArray_MultiIter_NEXT(itr);
}
if (fail == 0) {
return_val = i;
break;
}
}
}
"""
code = "return_val = -1;\n"
for op1 in operands:
code += template % (op1, op1)
ret = weave.inline(code, ['data', 'val', 'op'], compiler = 'gcc')
if ret == -1:
ret = None
return ret
## broadcasting test
#else:
#template = """
#if (op == "%s") {
#for (int i=0; i<data_array->dimensions[0]; i++) {
#PyArrayObject* d2 = data(i);
#PyObject *itr;
#itr = PyArray_MultiIterNew(2, a_array, b_array);
#while(PyArray_MultiIter_NOTDONE(itr)) {
#p1 = (%s *) PyArray_MultiIter_DATA(itr, 0);
#p2 = (%s *) PyArray_MultiIter_DATA(itr, 1);
#*p1 = (*p1) * (*p2);
#PyArray_MultiIter_NEXT(itr);
#}
#}
#}
#"""
#pass
def measureResistance(data, mode):
"""Return a tuple of the (inputResistance, seriesResistance) for the given data.
Arguments:
data A metaarray with a Time axis and 'primary' and 'command' channels, with a square step in the command channel.
mode Either 'IC' for current clamp or 'VC' for voltage clamp. If mode is 'IC' seriesResistance will be None."""
cmd = data['command']
pulseStart = cmd.axisValues('Time')[np.argwhere(cmd != cmd[0])[0][0]]
pulseStop = cmd.axisValues('Time')[np.argwhere(cmd != cmd[0])[-1][0]]
## Extract specific time segments
nudge = 0.1e-3
base = data['Time': :(pulseStart-nudge)]
pulse = data['Time': (pulseStart+nudge):(pulseStop-nudge)]
pulseEnd = data['Time': pulseStart+((pulseStop-pulseStart)*2./3.):pulseStop-nudge]
end = data['Time': (pulseStop+nudge): ]
pulseAmp = pulse['command'].mean() - base['command'].mean()
if mode == 'IC':
inputResistance = (pulseEnd['primary'].mean() - base['primary'].mean())/pulseAmp
seriesResistance = None
elif mode == 'VC':
if pulseAmp < 0:
RsPeak = data['primary'].min()
else:
RsPeak = data['primary'].max()
seriesResistance = (RsPeak-base['primary'].mean())/pulseAmp
inputResistance = (pulseEnd['primary'].mean() - base['primary'].mean())/pulseAmp
else:
raise Exception("Not sure how to interpret mode: %s. Please use either 'VC' or 'IC'. " %str(mode))
return (inputResistance, seriesResistance)
def measureResistanceWithExponentialFit(data, debug=False):
"""Return a dict with 'inputResistance', 'bridgeBalance' and 'tau' keys for the given current clamp
data. Fits the data to an exponential decay with a y-offset to measure the
voltage drop across the bridge balance. Does not account for any bridge balance
compensation done during recording.
Arguments:
data A metaarray with a Time axis and 'primary' and 'command' channels, with a square step in the command channel.
debug Default: False. If True, include extra intermediary calculated values in the dictionary that is returned.
"""
cmd = data['command']
pulseStart = cmd.axisValues('Time')[np.argwhere(cmd != cmd[0])[0][0]]
pulseStop = cmd.axisValues('Time')[np.argwhere(cmd != cmd[0])[-1][0]]
baseline = data['Time':0:pulseStart]['primary']
baseline = measureBaseline(baseline)
pulse = data["Time":pulseStart:pulseStop]['primary']
xvals = pulse.axisValues('Time') - pulseStart
fitResult = fit(expDecayWithOffset, xvals, pulse, (-0.01, 0.01, 0.00), generateResult=True)
amp = fitResult[0][0]
tau = fitResult[0][1]
yOffset = fitResult[0][2]
commandAmp = cmd['Time':pulseStart][0] - cmd[0]
inputResistance = abs((amp)/commandAmp)
bridgeBalance = (yOffset - baseline)/commandAmp
results = {'inputResistance':inputResistance,
'bridgeBalance':bridgeBalance,
'tau':tau}
if debug:
results['fitResult'] = fitResult
results['xvals'] = xvals
results['pulse'] = pulse
results['baseline'] = baseline
results['commandAmp'] = commandAmp
return results
#------------------------------------------
# Useless function graveyard:
#------------------------------------------
def alpha(t, tau):
"""Return the value of an alpha function at time t with width tau."""
t = max(t, 0)
return (t / tau) * math.exp(1.0 - (t / tau));
def alphas(t, tau, starts):
tot = 0.0
for s in starts:
tot += alpha(t-s, tau)
return tot
### TODO: replace with faster scipy filters
def smooth(data, it=1):
data = data.view(ndarray)
d = np.empty((len(data)), dtype=data.dtype)
for i in range(0, len(data)):
start = max(0, i-1)
stop = min(i+1, len(data)-1)
d[i] = mean(data[start:stop+1])
if it > 1:
return smooth(d, it-1)
else:
return d
def maxDenoise(data, it):
return smooth(data, it).max()
def absMax(data):
mv = 0.0
for v in data:
if abs(v) > abs(mv):
mv = v
return mv
# takes data in form of [[t1, y1], [t2, y2], ...]
def triggers(data, trig):
"""Return a list of places where data crosses trig
Requires 2-column array: array([[time...], [voltage...]])"""
tVals = []
for i in range(0, data.shape[1]-1):
v1 = data[1, i]
v2 = data[1, i+1]
if v1 <= trig and v2 > trig:
g1 = data[0,i]
g2 = data[0,i+1]
tVals.append(g1 + (g2-g1)*((0.5-v1)/(v2-v1)))
return tVals
## generates a command data structure from func with n points
def cmd(func, n, time):
return [[i*(time/float(n-1)), func(i*(time/float(n-1)))] for i in range(0,n)]
def inpRes(data, v1Range, v2Range):
r1 = [r for r in data if r['Time'] > v1Range[0] and r['Time'] < v1Range[1]]
r2 = [r for r in data if r['Time'] > v2Range[0] and r['Time'] < v2Range[1]]
v1 = mean([r['voltage'] for r in r1])
v2 = min(smooth([r['voltage'] for r in r2], 10))
c1 = mean([r['current'] for r in r1])
c2 = mean([r['current'] for r in r2])
return (v2-v1)/(c2-c1)
def findActionPots(data, lowLim=-20e-3, hiLim=0, maxDt=2e-3):
"""Returns a list of indexes of action potentials from a voltage trace
Requires 2-column array: array([[time...], [voltage...]])
Defaults specify that an action potential is when the voltage trace crosses
from -20mV to 0mV in 2ms or less"""
data = data.view(ndarray)
lastLow = None
ap = []
for i in range(0, data.shape[1]):
if data[1,i] < lowLim:
lastLow = data[0,i]
if data[1,i] > hiLim:
if lastLow != None and data[0,i]-lastLow < maxDt:
ap.append(i)
lastLow = None
return ap
def getSpikeTemplate(ivc, traces):
"""Returns the trace of the first spike in an IV protocol"""
## remove all negative currents
posCurr = np.argwhere(ivc['current'] > 0.)[:, 0]
ivc = ivc[:, posCurr]
## find threshold index
ivd = ivc['max voltage'] - ivc['mean voltage']
ivdd = ivd[1:] - ivd[:-1]
thrIndex = argmax(ivdd) + 1 + posCurr[0]
## subtract spike trace from previous trace
minlen = min(traces[thrIndex].shape[1], traces[thrIndex-1].shape[1])
di = traces[thrIndex]['Inp0', :minlen] - traces[thrIndex-1]['Inp0', :minlen]
## locate tallest spike
ind = argmax(di)
maxval = di[ind]
start = ind
stop = ind
while di[start] > maxval*0.5:
start -= 1
while di[stop] > maxval*0.5:
stop += 1
return traces[thrIndex][['Time', 'Inp0'], start:stop]
if __name__ == '__main__':
import user
| meganbkratz/acq4 | acq4/util/functions.py | Python | mit | 83,674 | [
"Gaussian"
] | 346bae5a2f00426c184c3ebbad38a074fb8badd432f3180a450e8088b59b2e80 |
""" Used by the executors for dispatching events (IIUC)
"""
import threading, time, types
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.ReturnValues import isReturnStructure
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
class ExecutorState( object ):
def __init__( self, log = False ):
if log:
self.__log = log
else:
self.__log = gLogger
self.__lock = threading.Lock()
self.__typeToId = {}
self.__maxTasks = {}
self.__execTasks = {}
self.__taskInExec = {}
def _internals( self ):
return { 'type2id' : dict( self.__typeToId ),
'maxTasks' : dict( self.__maxTasks ),
'execTasks' : dict( self.__execTasks ),
'tasksInExec' : dict( self.__taskInExec ),
'locked' : self.__lock.locked() }
def addExecutor( self, eId, eTypes, maxTasks = 1 ):
self.__lock.acquire()
try:
self.__maxTasks[ eId ] = max( 1, maxTasks )
if eId not in self.__execTasks:
self.__execTasks[ eId ] = set()
if type( eTypes ) not in ( types.ListType, types.TupleType ):
eTypes = [ eTypes ]
for eType in eTypes:
if eType not in self.__typeToId:
self.__typeToId[ eType ] = set()
self.__typeToId[ eType ].add( eId )
finally:
self.__lock.release()
def removeExecutor( self, eId ):
self.__lock.acquire()
try:
tasks = []
for eType in self.__typeToId:
if eId in self.__typeToId[ eType ]:
self.__typeToId[ eType ].remove( eId )
for taskId in self.__execTasks[ eId ]:
self.__taskInExec.pop( taskId )
tasks.append( taskId )
self.__execTasks.pop( eId )
self.__maxTasks.pop( eId )
return tasks
finally:
self.__lock.release()
def getTasksForExecutor( self, eId ):
try:
return set( self.__execTasks[ eId ] )
except KeyError:
return set()
def full( self, eId ):
try:
return len( self.__execTasks[ eId ] ) >= self.__maxTasks[ eId ]
except KeyError:
return True
def freeSlots( self, eId ):
try:
return self.__maxTasks[ eId ] - len( self.__execTasks[ eId ] )
except KeyError:
return 0
def getFreeExecutors( self, eType ):
execs = {}
try:
eids = self.__typeToId[ eType ]
except KeyError:
return execs
try:
for eid in eids:
freeSlots = self.freeSlots( eid )
if freeSlots:
execs[ eid ] = freeSlots
except RuntimeError:
pass
return execs
def getIdleExecutor( self, eType ):
idleId = None
maxFreeSlots = 0
try:
for eId in self.__typeToId[ eType ]:
freeSlots = self.freeSlots( eId )
if freeSlots > maxFreeSlots:
maxFreeSlots = freeSlots
idleId = eId
except KeyError:
pass
return idleId
def addTask( self, eId, taskId ):
self.__lock.acquire()
try:
try:
self.__taskInExec[ taskId ] = eId
self.__execTasks[ eId ].add( taskId )
return len( self.__execTasks[ eId ] )
except KeyError:
return 0
finally:
self.__lock.release()
def getExecutorOfTask( self, taskId ):
try:
return self.__taskInExec[ taskId ]
except KeyError:
return None
def removeTask( self, taskId, eId = None ):
self.__lock.acquire()
try:
try:
if eId == None:
eId = self.__taskInExec[ taskId ]
self.__execTasks[ eId ].remove( taskId )
self.__taskInExec.pop( taskId )
return True
except KeyError:
return False
finally:
self.__lock.release()
class ExecutorQueues:
def __init__( self, log = False ):
if log:
self.__log = log
else:
self.__log = gLogger
self.__lock = threading.Lock()
self.__queues = {}
self.__lastUse = {}
self.__taskInQueue = {}
def _internals( self ):
return { 'queues' : dict( self.__queues ),
'lastUse' : dict( self.__lastUse ),
'taskInQueue' : dict( self.__taskInQueue ),
'locked' : self.__lock.locked() }
def getExecutorList( self ):
return [ eType for eType in self.__queues ]
def pushTask( self, eType, taskId, ahead = False ):
self.__log.verbose( "Pushing task %s into waiting queue for executor %s" % ( taskId, eType ) )
self.__lock.acquire()
try:
if taskId in self.__taskInQueue:
if self.__taskInQueue[ taskId ] != eType:
errMsg = "Task %s cannot be queued because it's already queued for %s" % ( taskId,
self.__taskInQueue[ taskId ] )
self.__log.fatal( errMsg )
return 0
else:
return len( self.__queues[ eType ] )
if eType not in self.__queues:
self.__queues[ eType ] = []
self.__lastUse[ eType ] = time.time()
if ahead:
self.__queues[ eType ].insert( 0, taskId )
else:
self.__queues[ eType ].append( taskId )
self.__taskInQueue[ taskId ] = eType
return len( self.__queues[ eType ] )
finally:
self.__lock.release()
def popTask( self, eTypes ):
if type( eTypes ) not in ( types.ListType, types.TupleType ):
eTypes = [ eTypes ]
self.__lock.acquire()
for eType in eTypes:
try:
taskId = self.__queues[ eType ].pop( 0 )
del( self.__taskInQueue[ taskId ] )
#Found! release and return!
self.__lock.release()
self.__lastUse[ eType ] = time.time()
self.__log.verbose( "Popped task %s from executor %s waiting queue" % ( taskId, eType ) )
return ( taskId, eType )
except IndexError:
continue
except KeyError:
continue
self.__lock.release()
#Not found. release and return None
return None
def getState( self ):
self.__lock.acquire()
try:
qInfo = {}
for qName in self.__queues:
qInfo[ qName ] = list( self.__queues[ qName ] )
finally:
self.__lock.release()
return qInfo
def deleteTask( self, taskId ):
self.__log.verbose( "Deleting task %s from waiting queues" % taskId )
self.__lock.acquire()
try:
try:
eType = self.__taskInQueue[ taskId ]
del( self.__taskInQueue[ taskId ] )
self.__lastUse[ eType ] = time.time()
except KeyError:
return False
try:
iPos = self.__queues[ eType ].index( taskId )
except ValueError:
return False
del( self.__queues[ eType ][ iPos ] )
return True
finally:
self.__lock.release()
def waitingTasks( self, eType ):
self.__lock.acquire()
try:
try:
return len( self.__queues[ eType ] )
except KeyError:
return 0
finally:
self.__lock.release()
class ExecutorDispatcherCallbacks:
def cbDispatch( self, taskId, taskObj, pathExecuted ):
return S_ERROR( "No dispatch callback defined" )
def cbSendTask( self, taskId, taskObj, eId, eType ):
return S_ERROR( "No send task callback defined" )
def cbDisconectExecutor( self, eId ):
return S_ERROR( "No disconnect callback defined" )
def cbTaskError( self, taskId, taskObj, errorMsg ):
return S_ERROR( "No error callback defined" )
def cbTaskProcessed( self, taskId, taskObj, eType ):
return S_OK()
def cbTaskFreeze( self, taskId, taskObj, eType ):
return S_OK()
class ExecutorDispatcher:
class ETask:
def __init__( self, taskId, taskObj ):
self.taskId = taskId
self.taskObj = taskObj
self.pathExecuted = []
self.freezeTime = 60
self.frozenTime = 0
self.frozenSince = 0
self.frozenCount = 0
self.frozenMsg = False
self.eType = False
self.sendTime = 0
self.retries = 0
def __repr__( self ):
rS = "<ETask %s" % self.taskId
if self.eType:
rS += " eType=%s>" % self.eType
else:
rS += ">"
return rS
def __init__( self, monitor = None ):
self.__idMap = {}
self.__execTypes = {}
self.__executorsLock = threading.Lock()
self.__tasksLock = threading.Lock()
self.__freezerLock = threading.Lock()
self.__tasks = {}
self.__log = gLogger.getSubLogger( "ExecMind" )
self.__taskFreezer = []
self.__queues = ExecutorQueues( self.__log )
self.__states = ExecutorState( self.__log )
self.__cbHolder = ExecutorDispatcherCallbacks()
self.__monitor = monitor
gThreadScheduler.addPeriodicTask( 60, self.__doPeriodicStuff )
#If a task is frozen too many times, send error or forget task?
self.__failedOnTooFrozen = True
#If a task fails to properly dispatch, freeze or forget task?
self.__freezeOnFailedDispatch = True
#If a task needs to go to an executor that has not connected. Freeze or forget the task?
self.__freezeOnUnknownExecutor = True
if self.__monitor:
self.__monitor.registerActivity( "executors", "Executor reactors connected",
"Executors", "executors", self.__monitor.OP_MEAN, 300 )
self.__monitor.registerActivity( "tasks", "Tasks processed",
"Executors", "tasks", self.__monitor.OP_RATE, 300 )
self.__monitor.registerActivity( "taskTime", "Task processing time",
"Executors", "seconds", self.__monitor.OP_MEAN, 300 )
def setFailedOnTooFrozen( self, value ):
self.__failedOnTooFrozen = value
def setFreezeOnFailedDispatch( self, value ):
self.__freezeOnFailedDispatch = value
def setFreezeOnUnknownExecutor( self, value ):
self.__freezeOnUnknownExecutor = value
def _internals( self ):
return { 'idMap' : dict( self.__idMap ),
'execTypes' : dict( self.__execTypes ),
'tasks' : sorted( self.__tasks ),
'freezer' : list( self.__taskFreezer ),
'queues' : self.__queues._internals(),
'states' : self.__states._internals(),
'locked' : { 'exec' : self.__executorsLock.locked(),
'tasks' : self.__tasksLock.locked(),
'freezer' : self.__freezerLock.locked() },
}
def setCallbacks( self, callbacksObj ):
if not isinstance( callbacksObj, ExecutorDispatcherCallbacks ):
return S_ERROR( "Callbacks object does not inherit from ExecutorDispatcherCallbacks" )
self.__cbHolder = callbacksObj
return S_OK()
def __doPeriodicStuff( self ):
self.__unfreezeTasks()
for eType in self.__execTypes:
self.__fillExecutors( eType )
if not self.__monitor:
return
eTypes = self.__execTypes
for eType in eTypes:
try:
self.__monitor.addMark( "executors-%s" % eType, self.__execTypes[ eType ] )
except KeyError:
pass
self.__monitor.addMark( "executors", len( self.__idMap ) )
def addExecutor( self, eId, eTypes, maxTasks = 1 ):
self.__log.verbose( "Adding new %s executor to the pool %s" % ( eId, ", ".join ( eTypes ) ) )
self.__executorsLock.acquire()
try:
if eId in self.__idMap:
return
if type( eTypes ) not in ( types.ListType, types.TupleType ):
eTypes = [ eTypes ]
self.__idMap[ eId ] = list( eTypes )
self.__states.addExecutor( eId, eTypes, maxTasks )
for eType in eTypes:
if eType not in self.__execTypes:
self.__execTypes[ eType ] = 0
if self.__monitor:
self.__monitor.registerActivity( "executors-%s" % eType, "%s executor modules connected" % eType,
"Executors", "executors", self.__monitor.OP_MEAN, 300 )
self.__monitor.registerActivity( "tasks-%s" % eType, "Tasks processed by %s" % eType,
"Executors", "tasks", self.__monitor.OP_RATE, 300 )
self.__monitor.registerActivity( "taskTime-%s" % eType, "Task processing time for %s" % eType,
"Executors", "seconds", self.__monitor.OP_MEAN, 300 )
self.__execTypes[ eType ] += 1
finally:
self.__executorsLock.release()
for eType in eTypes:
self.__fillExecutors( eType )
def removeExecutor( self, eId ):
self.__log.verbose( "Removing executor %s" % eId )
self.__executorsLock.acquire()
try:
if eId not in self.__idMap:
return
eTypes = self.__idMap.pop( eId )
for eType in eTypes:
self.__execTypes[ eType ] -= 1
tasksInExec = self.__states.removeExecutor( eId )
for taskId in tasksInExec:
try:
eTask = self.__tasks[ taskId ]
except KeyError:
#Task already removed
pass
if eTask.eType:
self.__queues.pushTask( eTask.eType, taskId, ahead = True )
else:
self.__dispatchTask( taskId )
finally:
self.__executorsLock.release()
try:
self.__cbHolder.cbDisconectExecutor( eId )
except:
self.__log.exception( "Exception while disconnecting agent %s" % eId )
for eType in eTypes:
self.__fillExecutors( eType )
def __freezeTask( self, taskId, errMsg, eType = False, freezeTime = 60 ):
self.__log.verbose( "Freezing task %s" % taskId )
self.__freezerLock.acquire()
try:
if taskId in self.__taskFreezer:
return False
try:
eTask = self.__tasks[ taskId ]
except KeyError:
return False
eTask.freezeTime = freezeTime
eTask.frozenMessage = errMsg
eTask.frozenSince = time.time()
eTask.frozenCount += 1
eTask.eType = eType
isFrozen = False
if eTask.frozenCount < 10:
self.__taskFreezer.append( taskId )
isFrozen = True
finally:
self.__freezerLock.release()
if not isFrozen:
self.removeTask( taskId )
if self.__failedOnTooFrozen:
self.__cbHolder.cbTaskError( taskId, eTask.taskObj, "Retried more than 10 times. Last error: %s" % errMsg )
return False
return True
def __isFrozen( self, taskId ):
return taskId in self.__taskFreezer
def __removeFromFreezer( self, taskId ):
self.__freezerLock.acquire()
try:
try:
iP = self.__taskFreezer.index( taskId )
except ValueError:
return False
self.__taskFreezer.pop( iP )
try:
eTask = self.__tasks[ taskId ]
except KeyError:
return False
eTask.frozenTime += time.time() - eTask.frozenSince
finally:
self.__freezerLock.release()
return True
def __unfreezeTasks( self, eType = False ):
iP = 0
while iP < len( self.__taskFreezer ):
self.__freezerLock.acquire()
try:
try:
taskId = self.__taskFreezer[ iP ]
except IndexError:
return
try:
eTask = self.__tasks[ taskId ]
except KeyError:
self.__log.notice( "Removing task %s from the freezer. Somebody has removed the task" % taskId )
self.__taskFreezer.pop( iP )
continue
#Current taskId/eTask is the one to defrost
if eType and eType != eTask.eType:
iP += 1
continue
if time.time() - eTask.frozenSince < eTask.freezeTime:
iP += 1
continue
self.__taskFreezer.pop( iP )
finally:
self.__freezerLock.release()
#Out of the lock zone to minimize zone of exclusion
eTask.frozenTime += time.time() - eTask.frozenSince
self.__log.verbose( "Unfreezed task %s" % taskId )
self.__dispatchTask( taskId, defrozeIfNeeded = False )
def __addTaskIfNew( self, taskId, taskObj ):
self.__tasksLock.acquire()
try:
if taskId in self.__tasks:
self.__log.verbose( "Task %s was already known" % taskId )
return False
self.__tasks[ taskId ] = ExecutorDispatcher.ETask( taskId, taskObj )
self.__log.verbose( "Added task %s" % taskId )
return True
finally:
self.__tasksLock.release()
def getTask( self, taskId ):
try:
return self.__tasks[ taskId ].taskObj
except KeyError:
return None
def __dispatchTask( self, taskId, defrozeIfNeeded = True ):
self.__log.verbose( "Dispatching task %s" % taskId )
#If task already in executor skip
if self.__states.getExecutorOfTask( taskId ):
return S_OK()
self.__removeFromFreezer( taskId )
result = self.__getNextExecutor( taskId )
if not result[ 'OK' ]:
self.__log.warn( "Error while calling dispatch callback: %s" % result[ 'Message' ] )
if self.__freezeOnFailedDispatch:
if self.__freezeTask( taskId, result[ 'Message' ] ):
return S_OK()
return result
taskObj = self.getTask( taskId )
self.removeTask( taskId )
self.__cbHolder.cbTaskError( taskId, taskObj, "Could not dispatch task: %s" % result[ 'Message' ] )
return S_ERROR( "Could not add task. Dispatching task failed" )
eType = result[ 'Value' ]
if not eType:
self.__log.verbose( "No more executors for task %s" % taskId )
return self.removeTask( taskId )
self.__log.verbose( "Next executor type is %s for task %s" % ( eType, taskId ) )
if eType not in self.__execTypes:
if self.__freezeOnUnknownExecutor:
self.__log.verbose( "Executor type %s has not connected. Freezing task %s" % ( eType, taskId ) )
self.__freezeTask( taskId, "Unknown executor %s type" % eType,
eType = eType, freezeTime = 0 )
return S_OK()
self.__log.verbose( "Executor type %s has not connected. Forgetting task %s" % ( eType, taskId ) )
return self.removeTask( taskId )
self.__queues.pushTask( eType, taskId )
self.__fillExecutors( eType, defrozeIfNeeded = defrozeIfNeeded )
return S_OK()
def __taskProcessedCallback( self, taskId, taskObj, eType ):
try:
result = self.__cbHolder.cbTaskProcessed( taskId, taskObj, eType )
except:
self.__log.exception( "Exception while calling taskDone callback" )
return S_ERROR( "Exception while calling taskDone callback" )
if not isReturnStructure( result ):
errMsg = "taskDone callback did not return a S_OK/S_ERROR structure"
self.__log.fatal( errMsg )
return S_ERROR( errMsg )
return result
def __taskFreezeCallback( self, taskId, taskObj, eType ):
try:
result = self.__cbHolder.cbTaskFreeze( taskId, taskObj, eType )
except:
self.__log.exception( "Exception while calling taskFreeze callback" )
return S_ERROR( "Exception while calling taskFreeze callback" )
if not isReturnStructure( result ):
errMsg = "taskFreeze callback did not return a S_OK/S_ERROR structure"
self.__log.fatal( errMsg )
return S_ERROR( errMsg )
return result
def __getNextExecutor( self, taskId ):
try:
eTask = self.__tasks[ taskId ]
except IndexError:
msg = "Task %s was deleted prematurely while being dispatched" % taskId
self.__log.error( "Task was deleted prematurely while being dispatched", "%s" % taskId )
return S_ERROR( msg )
try:
result = self.__cbHolder.cbDispatch( taskId, eTask.taskObj, tuple( eTask.pathExecuted ) )
except:
self.__log.exception( "Exception while calling dispatch callback" )
return S_ERROR( "Exception while calling dispatch callback" )
if not isReturnStructure( result ):
errMsg = "Dispatch callback did not return a S_OK/S_ERROR structure"
self.__log.fatal( errMsg )
return S_ERROR( errMsg )
#Assign the next executor type to the task
if result[ 'OK' ]:
eTask.eType = result[ 'Value' ]
return result
def getTaskIds( self ):
return self.__tasks.keys()
def getExecutorsConnected( self ):
return dict( self.__execTypes )
def addTask( self, taskId, taskObj ):
if not self.__addTaskIfNew( taskId, taskObj ):
self.__unfreezeTasks()
return S_OK()
return self.__dispatchTask( taskId )
def removeTask( self, taskId ):
try:
self.__tasks.pop( taskId )
except KeyError:
self.__log.verbose( "Task %s is already removed" % taskId )
return S_OK()
self.__log.verbose( "Removing task %s" % taskId )
eId = self.__states.getExecutorOfTask( taskId )
self.__queues.deleteTask( taskId )
self.__states.removeTask( taskId )
self.__freezerLock.acquire()
try:
try:
self.__taskFreezer.pop( self.__taskFreezer.index( taskId ) )
except KeyError:
pass
except ValueError:
pass
finally:
self.__freezerLock.release()
if eId:
#Send task to executor if idle
return self.__sendTaskToExecutor( eId, checkIdle = True )
return S_OK()
def __taskReceived( self, taskId, eId ):
try:
eTask = self.__tasks[ taskId ]
except KeyError:
errMsg = "Task %s is not known" % taskId
self.__log.error( "Task is not known", "%s" % taskId )
return S_ERROR( errMsg )
if not self.__states.removeTask( taskId, eId ):
self.__log.info( "Executor %s says it's processed task %s but it didn't have it" % ( eId, taskId ) )
return S_OK()
if eTask.eType not in self.__idMap[ eId ]:
errMsg = "Executor type invalid for %s. Redoing task %s" % ( eId, taskId )
self.__log.error( "Executor type invalid. Redoing task", "Type %s, Task %s" % ( eId, taskId ) )
self.removeExecutor( eId )
self.__dispatchTask( taskId )
return S_ERROR( errMsg )
if self.__monitor:
tTime = time.time() - self.__tasks[ taskId ].sendTime
self.__monitor.addMark( "taskTime-%s" % eTask.eType, tTime)
self.__monitor.addMark( "taskTime", tTime )
self.__monitor.addMark( "tasks-%s" % eTask.eType, 1 )
self.__monitor.addMark( "tasks", 1 )
return S_OK( eTask.eType )
def freezeTask( self, eId, taskId, freezeTime, taskObj = False ):
result = self.__taskReceived( taskId, eId )
if not result[ 'OK' ]:
return result
eType = result[ 'Value' ]
#Executor didn't have the task.
if not eType:
#Fill the executor
self.__sendTaskToExecutor( eId )
return S_OK()
if not taskObj:
taskObj = self.__tasks[ taskId ].taskObj
result = self.__taskFreezeCallback( taskId, taskObj, eType )
if not result[ 'OK' ]:
#Fill the executor
self.__sendTaskToExecutor( eId )
return result
try:
self.__tasks[ taskId ].taskObj = taskObj
except KeyError:
self.__log.error( "Task seems to have been removed while being processed!", "%s" % taskId )
self.__sendTaskToExecutor( eId, eType )
return S_OK()
self.__freezeTask( taskId, "Freeze request by %s executor" % eType,
eType = eType, freezeTime = freezeTime )
self.__sendTaskToExecutor( eId, eType )
return S_OK()
def taskProcessed( self, eId, taskId, taskObj = False ):
result = self.__taskReceived( taskId, eId )
if not result[ 'OK' ]:
return result
eType = result[ 'Value' ]
#Executor didn't have the task.
if not eType:
#Fill the executor
self.__sendTaskToExecutor( eId )
return S_OK()
#Call the done callback
if not taskObj:
taskObj = self.__tasks[ taskId ].taskObj
result = self.__taskProcessedCallback( taskId, taskObj, eType )
if not result[ 'OK' ]:
#Fill the executor
self.__sendTaskToExecutor( eId )
#Remove the task
self.removeTask( taskId )
return result
#Up until here it's an executor error. From now on it can be a task error
try:
self.__tasks[ taskId ].taskObj = taskObj
self.__tasks[ taskId ].pathExecuted.append( eType )
except KeyError:
self.__log.error( "Task seems to have been removed while being processed!", "%s" % taskId )
self.__sendTaskToExecutor( eId, eType )
return S_OK()
self.__log.verbose( "Executor %s processed task %s" % ( eId, taskId ) )
result = self.__dispatchTask( taskId )
self.__sendTaskToExecutor( eId, eType )
return result
def retryTask( self, eId, taskId ):
if taskId not in self.__tasks:
errMsg = "Task %s is not known" % taskId
self.__log.error( "Task is not known", "%s" % taskId )
return S_ERROR( errMsg )
if not self.__states.removeTask( taskId, eId ):
self.__log.info( "Executor %s says it's processed task %s but it didn't have it" % ( eId, taskId ) )
self.__sendTaskToExecutor( eId )
return S_OK()
self.__log.verbose( "Executor %s did NOT process task %s, retrying" % ( eId, taskId ) )
try:
self.__tasks[ taskId ].retries += 1
except KeyError:
self.__log.error( "Task seems to have been removed while waiting for retry!", "%s" % taskId )
return S_OK()
return self.__dispatchTask( taskId )
def __fillExecutors( self, eType, defrozeIfNeeded = True ):
if defrozeIfNeeded:
self.__log.verbose( "Unfreezing tasks for %s" % eType )
self.__unfreezeTasks( eType )
self.__log.verbose( "Filling %s executors" % eType )
eId = self.__states.getIdleExecutor( eType )
processedTasks = set()
while eId:
result = self.__sendTaskToExecutor( eId, eType )
if not result[ 'OK' ]:
self.__log.error( "Could not send task to executor", "%s" % result[ 'Message' ] )
else:
if not result[ 'Value' ]:
#No more tasks for eType
break
self.__log.verbose( "Task %s was sent to %s" % ( result[ 'Value'], eId ) )
eId = self.__states.getIdleExecutor( eType )
self.__log.verbose( "No more idle executors for %s" % eType )
def __sendTaskToExecutor( self, eId, eTypes = False, checkIdle = False ):
if checkIdle and self.__states.freeSlots( eId ) == 0:
return S_OK()
try:
searchTypes = list( reversed( self.__idMap[ eId ] ) )
except KeyError:
self.__log.verbose( "Executor %s invalid/disconnected" % eId )
return S_ERROR( "Invalid executor" )
if eTypes:
if type( eTypes ) not in ( types.ListType, types.TupleType ):
eTypes = [ eTypes ]
for eType in reversed( eTypes ):
try:
searchTypes.remove( eType )
except ValueError:
pass
searchTypes.append( eType )
pData = self.__queues.popTask( searchTypes )
if pData == None:
self.__log.verbose( "No more tasks for %s" % eTypes )
return S_OK()
taskId, eType = pData
self.__log.verbose( "Sending task %s to %s=%s" % ( taskId, eType, eId ) )
self.__states.addTask( eId, taskId )
result = self.__msgTaskToExecutor( taskId, eId, eType )
if not result[ 'OK' ]:
self.__queues.pushTask( eType, taskId, ahead = True )
self.__states.removeTask( taskId )
return result
return S_OK( taskId )
def __msgTaskToExecutor( self, taskId, eId, eType ):
try:
self.__tasks[ taskId ].sendTime = time.time()
except KeyError:
return S_ERROR( "Task %s has been deleted" % taskId )
try:
result = self.__cbHolder.cbSendTask( taskId, self.__tasks[ taskId ].taskObj, eId, eType )
except:
self.__log.exception( "Exception while sending task to executor" )
return S_ERROR( "Exception while sending task to executor" )
if isReturnStructure( result ):
return result
else:
errMsg = "Send task callback did not send back an S_OK/S_ERROR structure"
self.__log.fatal( errMsg )
return S_ERROR( "Send task callback did not send back an S_OK/S_ERROR structure" )
#Seems an executor problem
self.__log.verbose( "Disconnecting executor" )
self.removeExecutor( eId )
return S_ERROR( "Exception while sending task to executor" )
if __name__ == "__main__":
def testExecState():
execState = ExecutorState()
execState.addExecutor( 1, "type1", 2 )
print execState.freeSlots( 1 ) == 2
print execState.addTask( 1, "t1" ) == 1
print execState.addTask( 1, "t1" ) == 1
print execState.addTask( 1, "t2" ) == 2
print execState.freeSlots( 1 ) == 0
print execState.full( 1 ) == True
print execState.removeTask( "t1" ) == 1
print execState.freeSlots( 1 ) == 1
print execState.getFreeExecutors( "type1" ) == {1:1}
print execState.getTasksForExecutor( 1 ) == [ "t2" ]
print execState.removeExecutor( 1 )
print execState._internals()
def testExecQueues():
eQ = ExecutorQueues()
for y in range( 2 ):
for i in range( 3 ):
print eQ.pushTask( "type%s" % y, "t%s%s" % ( y, i ) ) == i + 1
print "DONE IN"
print eQ.pushTask( "type0", "t01" ) == 3
print eQ.getState()
print eQ.popTask( "type0" ) == "t00"
print eQ.pushTask( "type0", "t00", ahead = True ) == 3
print eQ.popTask( "type0" ) == "t00"
print eQ.deleteTask( "t01" ) == True
print eQ.getState()
print eQ.deleteTask( "t02" )
print eQ.getState()
for i in range( 3 ):
print eQ.popTask( "type1" ) == "t1%s" % i
print eQ._internals()
testExecQueues()
| Andrew-McNab-UK/DIRAC | Core/Utilities/ExecutorDispatcher.py | Python | gpl-3.0 | 29,006 | [
"DIRAC"
] | f00439c3792b4002055fc33a1fcb6a607d95aa2add6ca1a0b9a612381463c0dd |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class ParallelNetcdf(AutotoolsPackage):
"""Parallel netCDF (PnetCDF) is a library providing high-performance
parallel I/O while still maintaining file-format compatibility with
Unidata's NetCDF."""
homepage = "https://trac.mcs.anl.gov/projects/parallel-netcdf"
url = "http://cucis.ece.northwestern.edu/projects/PnetCDF/Release/parallel-netcdf-1.6.1.tar.gz"
list_url = "http://cucis.ece.northwestern.edu/projects/PnetCDF/download.html"
version('1.8.0', '825825481aa629eb82f21ca37afff1609b8eeb07')
version('1.7.0', '267eab7b6f9dc78c4d0e6def2def3aea4bc7c9f0')
version('1.6.1', '62a094eb952f9d1e15f07d56e535052604f1ac34')
variant('cxx', default=True, description='Build the C++ Interface')
variant('fortran', default=True, description='Build the Fortran Interface')
variant('pic', default=True,
description='Produce position-independent code (for shared libs)')
depends_on('mpi')
depends_on('m4', type='build')
# See:
# https://trac.mcs.anl.gov/projects/parallel-netcdf/browser/trunk/INSTALL
def configure_args(self):
spec = self.spec
args = ['--with-mpi={0}'.format(spec['mpi'].prefix)]
args.append('MPICC={0}'.format(spec['mpi'].mpicc))
args.append('MPICXX={0}'.format(spec['mpi'].mpicxx))
args.append('MPIF77={0}'.format(spec['mpi'].mpifc))
args.append('MPIF90={0}'.format(spec['mpi'].mpifc))
args.append('SEQ_CC={0}'.format(spack_cc))
if '+pic' in spec:
args.extend([
'CFLAGS={0}'.format(self.compiler.pic_flag),
'CXXFLAGS={0}'.format(self.compiler.pic_flag),
'FFLAGS={0}'.format(self.compiler.pic_flag)
])
if '~cxx' in spec:
args.append('--disable-cxx')
if '~fortran' in spec:
args.append('--disable-fortran')
return args
def install(self, spec, prefix):
# Installation fails in parallel
make('install', parallel=False)
| matthiasdiener/spack | var/spack/repos/builtin/packages/parallel-netcdf/package.py | Python | lgpl-2.1 | 3,279 | [
"NetCDF"
] | 2f004c936d05575d85b3bfde788f6485374534f0157415b824dc8cf214b2497b |
#!/usr/bin/env python
"""
Command-line tool to convert a binary map output to netcdf.
"""
from __future__ import print_function
import argparse
import sys,os
import numpy as np
import stompy.model.delft.io as dio
parser = argparse.ArgumentParser(description='Convert D-WAQ binary map output to NetCDF.')
parser.add_argument('map_fn', metavar='somefile.map', type=str,
help='path to map file output')
parser.add_argument('hyd_fn', metavar='other.hyd', type=str,
help='path to hyd file')
parser.add_argument('--totaldepth',default='TotalDepth',
help='output variable to use as total depth. none to disable sigma coordinate')
args = parser.parse_args()
# DBG args=parser.parse_args(['--totaldepth','none',"wy2011.map","com-wy2011.hyd"])
map_fn=args.map_fn
hyd_fn=args.hyd_fn
output_fn=map_fn.replace('.map','.nc')
if os.path.exists(output_fn):
print("Output file '%s' exists. Aborting"%output_fn)
sys.exit(1)
print("Reading map data and grid")
map_ds=dio.read_map(map_fn,hyd_fn)
if args.totaldepth != 'none':
total_depth=args.totaldepth
print("Adding minor metadata")
if total_depth not in map_ds:
print("Fabricating a total-depth variable to allow ugrid-ish output")
map_ds[total_depth]=('time','layer','face'),np.ones( (len(map_ds.time),
len(map_ds.layer),
len(map_ds.face)), '<i1')
dio.map_add_z_coordinate(map_ds,total_depth=total_depth,coord_type='sigma',
layer_dim='layer')
print("Writing to %s"%output_fn)
map_ds.to_netcdf(output_fn)
| rustychris/stompy | examples/dwaq_map_to_nc.py | Python | mit | 1,704 | [
"NetCDF"
] | 2a5151b9c89878ba0278511c22bec3198d92dd6468078b6d4f3563e32d4f25d7 |
# Copyright (C) 2014 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script generates gen_pxiconfig.cpp, which in turn generates myconfig.pxi.
#
from __future__ import print_function
import inspect
import sys
import os
# find featuredefs.py
moduledir = os.path.dirname(inspect.getfile(inspect.currentframe()))
sys.path.append(os.path.join(moduledir, '..', '..'))
import featuredefs
if len(sys.argv) != 3:
print("Usage: {} DEFFILE CPPFILE".format(sys.argv[0]), file=sys.stderr)
exit(2)
deffilename, cfilename = sys.argv[1:3]
print("Reading definitions from " + deffilename + "...")
defs = featuredefs.defs(deffilename)
print("Done.")
# generate cpp-file
print("Writing " + cfilename + "...")
cfile = open(cfilename, 'w')
cfile.write("""
#include "config.hpp"
#include <iostream>
using namespace std;
int main() {
cout << "# This file was autogenerated." << endl
<< "# Do not modify it or your changes will be overwritten!" << endl;
""")
template = """
#ifdef {0}
cout << "DEF {0} = 1" << endl;
#else
cout << "DEF {0} = 0" << endl;
#endif
"""
for feature in defs.allfeatures:
cfile.write(template.format(feature))
cfile.write("""
}
""")
cfile.close()
print("Done.")
| sehrhardt/espresso | src/python/espressomd/gen_pxiconfig.py | Python | gpl-3.0 | 1,826 | [
"ESPResSo"
] | ba1b3b577c6fd6a50c13413a63490c598da0c7a50733a2ec208b4e157888d054 |
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for SearchIO fasta-m10 indexing."""
import os
import unittest
from search_tests_common import CheckIndex
class FastaM10IndexCases(CheckIndex):
fmt = 'fasta-m10'
def test_output_002(self):
"""Test fasta-m10 indexing, fasta34, multiple queries"""
filename = os.path.join('Fasta', 'output002.m10')
self.check_index(filename, self.fmt)
def test_output_001(self):
"""Test fasta-m10 indexing, fasta35, multiple queries"""
filename = os.path.join('Fasta', 'output001.m10')
self.check_index(filename, self.fmt)
def test_output_005(self):
"""Test fasta-m10 indexing, ssearch35, multiple queries"""
filename = os.path.join('Fasta', 'output005.m10')
self.check_index(filename, self.fmt)
def test_output_008(self):
"""Test fasta-m10 indexing, tfastx36, multiple queries"""
filename = os.path.join('Fasta', 'output008.m10')
self.check_index(filename, self.fmt)
def test_output_009(self):
"""Test fasta-m10 indexing, fasta36, multiple queries"""
filename = os.path.join('Fasta', 'output009.m10')
self.check_index(filename, self.fmt)
def test_output_010(self):
"""Test fasta-m10 indexing, fasta36, single query, no hits"""
filename = os.path.join('Fasta', 'output010.m10')
self.check_index(filename, self.fmt)
def test_output_011(self):
"""Test fasta-m10 indexing, fasta36, single query, hits with single hsp"""
filename = os.path.join('Fasta', 'output011.m10')
self.check_index(filename, self.fmt)
def test_output_012(self):
"""Test fasta-m10 indexing, fasta36, single query with multiple hsps"""
filename = os.path.join('Fasta', 'output012.m10')
self.check_index(filename, self.fmt)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| updownlife/multipleK | dependencies/biopython-1.65/Tests/test_SearchIO_fasta_m10_index.py | Python | gpl-2.0 | 2,166 | [
"Biopython"
] | a71f9d125106ff400f0208723590f59b4214bac18e3b263490bae2513c3b04ed |
#!/usr/bin/env python
#
# @Xiaming Chen
import sys
import numpy as np
import time
import math
from datetime import datetime, date
class CellMap(object):
def __init__(self, data):
self._map = {}
print("loading data ...")
for line in open(data, 'rb'):
parts = line.strip('\r\n ').split(',')
id = int(parts[0])
lon = float(parts[2])
lat = float(parts[3])
self._map[id] = (lon, lat)
def id2coord(self, id):
return self._map[id]
def ids2coords(self, locs):
return [ self._map[i] for i in locs ]
def drange(ts):
dt = datetime.fromtimestamp(ts)
if dt.hour < 3:
sds = datetime(dt.year, dt.month, dt.day-1, 3)
else:
sds = datetime(dt.year, dt.month, dt.day, 3)
eds = sds.replace(day=sds.day+1)
return (sds, eds)
def in_area(p, lb, rt):
if p[0] >= lb[0] and p[0] <= rt[0] and p[1] >= lb[1] and p[1] <= rt[1]:
return True
return False
def is_valid_location(locid):
""" Check if given location point is valid
"""
HZ_LB = [120.03013, 30.13614]
HZ_RT = [120.28597, 30.35318]
coord = cmap.id2coord(locid)
lon = coord[0]
lat = coord[1]
return in_area([lon, lat], HZ_LB, HZ_RT)
def extract_metaflow(loc):
""" Extract metaflows from a location sequence
"""
n = len(loc)
flows = []
if n < 3:
return flows
i = 0
while i < n:
isdup = True
found = False
for j in range(i+1, n):
if loc[j] != loc[i]:
isdup = False
continue
if not isdup and loc[j] == loc[i]:
found = True
flows.append((i, j))
deeper = extract_metaflow(loc[i+1:j])
deeper = [(t[0]+i+1, t[1]+i+1) for t in deeper]
flows.extend(deeper)
break
i = (j + 1) if found else (i + 1)
return flows
def calculate_gcd(latlon1, latlon2):
""" Calculate great circle distance
"""
lon1 = math.radians(latlon1[1])
lat1 = math.radians(latlon1[0])
lon2 = math.radians(latlon2[1])
lat2 = math.radians(latlon2[0])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = (math.sin(dlat / 2))**2 + math.cos(lat1) * math.cos(lat2) * (math.sin(dlon / 2))**2
c = 2 * math.asin(min(1, math.sqrt(a)))
dist = 6371 * c
return dist
def calculate_mp(lats, lons, w=None):
if w is None:
w = [ 1 for i in lats ]
# calculate weighted mid-point
lon = np.array([ math.radians(i) for i in lons ])
lat = np.array([ math.radians(i) for i in lats ])
w = np.array(w)
x = np.cos(lat) * np.cos(lon)
y = np.cos(lat) * np.sin(lon)
z = np.sin(lat)
tw = np.sum(w)
mx = 1.0 * np.sum(x * w) / tw
my = 1.0 * np.sum(y * w) / tw
mz = 1.0 * np.sum(z * w) / tw
lon_r = math.atan2(my, mx)
hyp_r = math.sqrt(mx**2 + my**2)
lat_r = math.atan2(mz, hyp_r)
lon_d = math.degrees(lon_r)
lat_d = math.degrees(lat_r)
return (lat_d, lon_d)
def calculate_rg(lats, lons):
""" Calculate the radius of gyration
"""
dist = []
latlons = set(zip(lats, lons))
lats = [ i[0] for i in latlons ]
lons = [ i[1] for i in latlons ]
lat_d, lon_d = calculate_mp(lats, lons)
for i in range(0, len(lons)):
gcd = calculate_gcd((lat_d, lon_d), (lats[i], lons[i]))
dist.append(gcd)
dist = np.array(dist)
rg = np.sqrt(np.sum(dist**2) / len(dist))
return rg
def calculate_comdist(lats, lons):
""" Calculate the commulative distance
"""
pass
def calculate_mindist(lats, lons):
""" Calculate the minimum distance to visit all places
"""
pass
def is_max_metaflow(flow, flows, n):
is_max = True
for f in flows:
if f[0] == 0 and f[1] == n-1:
continue
if f[0] < flow[0] and f[1] > flow[1]:
is_max = False
break
return is_max
def extract_metaflow_features(ts, locs, flows):
"""
@ts: timestamp sequence
@locs: location ID sequence
@flows: a list of detected metaflows
"""
features = []
coords = cmap.ids2coords(locs)
lons = [ i[0] for i in coords ]
lats = [ i[1] for i in coords ]
rg_day = calculate_rg(lats, lons)
thisdate = date.fromtimestamp(ts[0])
day_id = int("%4d%02d%02d" % (thisdate.year, thisdate.month, thisdate.day))
ff_id = -1
for flow in flows:
ff_features = {}
flow_locs = locs[ flow[0]:flow[1]+1 ]
flow_ts = ts[ flow[0]:flow[1]+1 ]
flow_lons = lons[ flow[0]:flow[1]+1 ]
flow_lats = lats[ flow[0]:flow[1]+1 ]
flow_dists = [ calculate_gcd((flow_lats[i], flow_lons[i]),
(flow_lats[i+1], flow_lons[i+1]))
for i in range(0, len(flow_locs)-1) ]
ff_id += 1
ff_len = len(flow_locs)
ff_ulen = len(set(flow_locs))
ff_time = max(flow_ts) - min(flow_ts)
ff_dist = sum(flow_dists)
ff_ismax = is_max_metaflow(flow, flows, ff_len)
ff_rg = calculate_rg(flow_lats, flow_lons)
ff_rgprc = 1.0 * ff_rg / rg_day
# delta flow
delta_flow_lons = []
delta_flow_lats = []
if flow[0] > 0:
delta_flow_lons.extend(lons[0:flow[0]])
delta_flow_lats.extend(lats[0:flow[0]])
if flow[1] < len(lons)-1:
delta_flow_lons.extend(lons[ flow[1]+1:len(lons) ])
delta_flow_lats.extend(lats[ flow[1]+1:len(lats) ])
ff_rgdlt = calculate_rg(delta_flow_lats, delta_flow_lons)
ff_rgdlt = 0 if np.isnan(ff_rgdlt) else ff_rgdlt
ff_features['id'] = ff_id
ff_features['len'] = ff_len
ff_features['ulen'] = ff_ulen
ff_features['time'] = ff_time
ff_features['dist'] = ff_dist
ff_features['ismax'] = ff_ismax
ff_features['rg'] = ff_rg
ff_features['rgprc'] = ff_rgprc
ff_features['rgdlt'] = ff_rgdlt
ff_features['date'] = day_id
ff_features['idx1'] = flow[0]
ff_features['idx2'] = flow[1]
features.append(ff_features)
return features
header=False
silent=True
def dump_features(uid, features, file='metaflows.txt'):
global header, silent
if not header:
header=True
ofile = open(file, 'wb')
ofile.write("UID,DATE,FID,LEN,ULEN,TIME,DIST,ISMAX,RG,RGPRC,RGDLT,IDX1,IDX2\n")
ofile = open(file, 'ab')
for f in features:
format_str = "%d,%d,%d,%d,%d,%d,%.3f,%d,%.3f,%.3f,%.3f,%d,%d" % \
(uid, f['date'], f['id'], f['len'], f['ulen'], f['time'],
f['dist'], f['ismax'], f['rg'], f['rgprc'], f['rgdlt'],
f['idx1'], f['idx2'])
if not silent: print format_str
ofile.write(format_str + '\n')
ofile.close()
if __name__ == '__main__':
i = 0
buf_ts = []
buf_lc = []
last_uid = None
last_sds = None
if len(sys.argv) < 3:
print("Usage: %s <movement> <bsmap>" % sys.argv[0])
sys.exit(-1)
movement = open(sys.argv[1], 'rb')
cmap = CellMap(sys.argv[2])
for line in movement:
uid, ts, loc = line.strip('\r\n').split(',')[0:3]
uid = int(uid); ts = int(float(ts)); loc = int(loc)
dt = datetime.fromtimestamp(ts)
sds, eds = drange(ts)
if not is_valid_location(loc):
# omit points outside city range
continue
if last_uid is None or uid == last_uid and sds == last_sds:
buf_ts.append(ts)
buf_lc.append(loc)
else: # manipulate the buffer
if buf_ts[-1] != buf_ts[0] + 86400:
buf_ts.append(buf_ts[0] + 86400)
buf_lc.append(buf_lc[0])
flows = extract_metaflow(buf_lc)
if len(flows) > 0:
print "[%s] %d: %s" % (time.ctime(buf_ts[0]), last_uid, flows)
features = extract_metaflow_features(buf_ts, buf_lc, flows)
dump_features(last_uid, features)
buf_ts = [ts]
buf_lc = [loc]
if i > 2500000:
break
i += 1
last_uid = uid
last_sds = sds
flows = extract_metaflow(buf_lc)
print("Done!")
if __name__ == '__main__':
print("-----------Module test------------")
print calculate_gcd((47.64828, -122.52963), (47.61168, -122.33326))
lat = [30.23412, 30.266203, 30.266203, 30.265251, 30.267384,
30.267384, 30.266203, 30.266203, 30.266203, 30.267384,
30.266203, 30.265251, 30.267384, 30.267384, 30.264925,
30.25779, 30.256128, 30.256592, 30.25665, 30.241655,
30.242035, 30.24256, 30.24, 30.237858, 30.235168, 30.23412]
lon = [120.195305, 120.16932, 120.16932, 120.17233, 120.17016,
120.17016, 120.16932, 120.16932, 120.16932, 120.17016,
120.16932, 120.17233, 120.17016, 120.17016, 120.1709,
120.17321, 120.17334, 120.17848, 120.17696, 120.17795,
120.18491, 120.18684, 120.18853, 120.19241, 120.196434,
120.195305]
print calculate_rg(lat, lon)
| caesar0301/paper-flowmap-code | src/040.flow-miner.py | Python | gpl-3.0 | 9,135 | [
"VisIt"
] | 48770336fa4a4d1f07a60a2afa61dcbbe56da8981effe8603e260c9bb7139d3f |
"""
PyXNAT
======
pyxnat provides an API to access data on XNAT (see http://xnat.org)
servers.
Visit http://packages.python.org/pyxnat for more information.
"""
from .version import __version__
from .core import Interface
from .core import SearchManager
from .core import CacheManager
from .core import Select
from .core import Inspector
from .core import Users
from .core import attributes
from .core import cache
from .core import help
from .core import interfaces
from .core import resources
from .core import schema
from .core import select
from .core import users
from .core import jsonutil
from .core import uriutil
from .core import xpass
from .core import xpath_store
from .core import Packages
| BrainIntensive/OnlineBrainIntensive | resources/HCP/pyxnat/pyxnat/__init__.py | Python | mit | 707 | [
"VisIt"
] | 550707408f2638a70c2ca304503d973abae0dbb789435800cf527551bb2204ec |
# Import the old tracking id from the RCM file for period 19660101-19701231,
# as it is the first used file to create historical indices:
# (e.g. tas_EUR-44_IPSL-IPSL-CM5A-MR_historical_r1i1p1_SMHI-RCA4_v1_day_19660101-19701231.nc)
#track_GCM_indice=$(
import netCDF4
from netCDF4 import Dataset
import ctypes
import icclim
import datetime
import icclim.util.callback as callback
#cb = callback.defaultCallback
import fnmatch
import os
print
#print '<<Loaded python modules>>'
print
# =====================================================================================================
# Define some paths
experiments_list = ['rcp45','rcp85']
for experiment in experiments_list:
# RCM output data and output of calculated indices
nobackup='/net/pc150394/nobackup/users/stepanov/'
# tas (bias corrected)
in_path_RCM_tas_nbc_50km=nobackup+"CLIPC/Model_data/tas/"+experiment+"/50km/daily/SMHI_DBS43_2006_2100/"
out_path_RCM_tas_nbc_50km=nobackup+"icclim_indices_v4.2.3_seapoint_fixed/EUR-44/"+experiment+"/tas/"
# output path still for test only
# =====================================================================================================
# Every RCM output file has predictable root name (specific to resolution!)
# ==> Construct data file names
#8/10 models. 2 more below in separate FOR loops.
models_list_50km = ['CCCma-CanESM2','CNRM-CERFACS-CNRM-CM5','NCC-NorESM1-M',
'MPI-M-MPI-ESM-LR','IPSL-IPSL-CM5A-MR','MIROC-MIROC5',
'NOAA-GFDL-GFDL-ESM2M','CSIRO-QCCCE-CSIRO-Mk3-6-0']
#models_list_50km = ['CCCma-CanESM2']
#models_list_50km = ['CNRM-CERFACS-CNRM-CM5']
for model in models_list_50km:
# CONSTRUCT RCM FILE NAMES
# New root for non-bias corrected (!nbc!) files:
tas_nbc_file_root_hist = "tasAdjust_EUR-44_"+model+"_"+experiment+"_r1i1p1_SMHI-RCA4_v1-SMHI-DBS43-EOBS10-1981-2010_day_"
tas_nbc_file_root_proj = "tasAdjust_EUR-44_"+model+"_"+experiment+"_r1i1p1_SMHI-RCA4_v1-SMHI-DBS43-EOBS10-1981-2010_day_"
# Explicit list
files_tas_nbc_50km_hist = in_path_RCM_tas_nbc_50km+tas_nbc_file_root_hist+"19660101-19701231.nc"
files_tas_nbc_50km_proj = in_path_RCM_tas_nbc_50km+tas_nbc_file_root_proj+"20060101-20101231.nc"
# Tell me which files you imported
print 'Historical input Model files:', files_tas_nbc_50km_hist # sep='\n'
print 'Projection input Model files:', files_tas_nbc_50km_proj # sep='\n'
# CONSTRUCT INDICES FILE NAMES
# Create datasets from netCDF files
nc_in_hist = Dataset(files_tas_nbc_50km_hist,'r')
nc_in_proj = Dataset(files_tas_nbc_50km_proj,'r')
# Print current GCM tracking id
# Historical
print
print
print "For historical model:", model
print "Historical tracking id", nc_in_hist.tracking_id
print
for file_hist in os.listdir(out_path_RCM_tas_nbc_50km):
# ----------------------------------------------------------------
# Pre-change of
# model name in output file for models:
# indice into r1m when writing output file:
#
# NCC-NorESM1-M --> NorESM1-M
# MIROC-MIROC5 --> MIROC5
model_fout=model
#print "input model_fout is: ",model
if model == 'NCC-NorESM1-M': model_fout='NorESM1-M'
elif model == 'MIROC-MIROC5': model_fout='MIROC5'
elif model == 'CNRM-CERFACS-CNRM-CM5': model_fout='CNRM-CM5'
elif model == 'MPI-M-MPI-ESM-LR': model_fout='MPI-ESM-LR'
elif model == 'IPSL-IPSL-CM5A-MR': model_fout='IPSL-CM5A-MR'
elif model == 'NOAA-GFDL-GFDL-ESM2M': model_fout='GFDL-ESM2M'
elif model == 'CSIRO-QCCCE-CSIRO-Mk3-6-0': model_fout='CSIRO-Mk3-6-0'
else: model_fout=model
#print "new model_fout is: ",model_fout
#if fnmatch.fnmatch(file_hist, '*CCCma-CanESM2_historical*'):
if fnmatch.fnmatch(file_hist, "*"+model_fout+"_historical*"):
#if fnmatch.fnmatch(file_hist, "*historical*"):
print "Indice where new historical invar_tracking_id goes is:", file_hist
#print
#print '%s' % (model)
# Create Dataset from these files
nc_indice_tas_hist = Dataset(out_path_RCM_tas_nbc_50km+file_hist,'a')
# Insert invar_tracking_id global attributed with value on the right
# (imported RCM tracking id from the single RCM file above)
#nc_indice_tas_hist.comment='fun'
nc_indice_tas_hist.invar_tracking_id=nc_in_hist.tracking_id
#nc_in_hist.comment = 'test'
#nc_in_hist.invar_tracking_id_test = 'test'
# Projections
print
print
print "For projections model:", model
print "Projection tracking id", nc_in_proj.tracking_id
print
print
for file_proj in os.listdir(out_path_RCM_tas_nbc_50km):
# ----------------------------------------------------------------
# Pre-change of
# model name in output file for models:
# indice into r1m when writing output file:
#
# NCC-NorESM1-M --> NorESM1-M
# MIROC-MIROC5 --> MIROC5
model_fout=model
#print "input model_fout is: ",model
if model == 'NCC-NorESM1-M': model_fout='NorESM1-M'
elif model == 'MIROC-MIROC5': model_fout='MIROC5'
elif model == 'CNRM-CERFACS-CNRM-CM5': model_fout='CNRM-CM5'
elif model == 'MPI-M-MPI-ESM-LR': model_fout='MPI-ESM-LR'
elif model == 'IPSL-IPSL-CM5A-MR': model_fout='IPSL-CM5A-MR'
elif model == 'NOAA-GFDL-GFDL-ESM2M': model_fout='GFDL-ESM2M'
elif model == 'CSIRO-QCCCE-CSIRO-Mk3-6-0': model_fout='CSIRO-Mk3-6-0'
else: model_fout=model
#print "new model_fout is: ",model_fout
if fnmatch.fnmatch(file_proj, "*"+model_fout+"_"+experiment+"*"):
print "Indice where new projection invar_tracking_id goes is:", file_proj
print
# Create Dataset from these files
nc_indice_tas_proj = Dataset(out_path_RCM_tas_nbc_50km+file_proj,'a')
# Insert invar_tracking_id global attributed with value on the right
# (imported RCM tracking id from the single RCM file above)
#nc_indice_tas_hist.comment='fun'
nc_indice_tas_proj.invar_tracking_id=nc_in_proj.tracking_id
# Had-GEM
models_list_50km_HadGEM = ['MOHC-HadGEM2-ES']
for model in models_list_50km_HadGEM:
# CONSTRUCT RCM FILE NAMES
# New root for non-bias corrected (!nbc!) files:
tas_nbc_file_root_hist = "tasAdjust_EUR-44_"+model+"_"+experiment+"_r1i1p1_SMHI-RCA4_v1-SMHI-DBS43-EOBS10-1981-2010_day_"
tas_nbc_file_root_proj = "tasAdjust_EUR-44_"+model+"_"+experiment+"_r1i1p1_SMHI-RCA4_v1-SMHI-DBS43-EOBS10-1981-2010_day_"
# Explicit list
files_tas_nbc_50km_hist = in_path_RCM_tas_nbc_50km+tas_nbc_file_root_hist+"19660101-19701230.nc"
files_tas_nbc_50km_proj = in_path_RCM_tas_nbc_50km+tas_nbc_file_root_proj+"20060101-20101230.nc"
# Tell me which files you imported
print 'Historical input Model files:', files_tas_nbc_50km_hist # sep='\n'
print 'Projection input Model files:', files_tas_nbc_50km_proj # sep='\n'
# CONSTRUCT INDICES FILE NAMES
# Create datasets from netCDF files
nc_in_hist = Dataset(files_tas_nbc_50km_hist,'r')
nc_in_proj = Dataset(files_tas_nbc_50km_proj,'r')
# Print current GCM tracking id
# Historical
print
print
print "For historical model:", model
print "Historical tracking id", nc_in_hist.tracking_id
print
for file_hist in os.listdir(out_path_RCM_tas_nbc_50km):
#if fnmatch.fnmatch(file_hist, '*CCCma-CanESM2_historical*'):
if fnmatch.fnmatch(file_hist, "*"+model[5:15]+"_historical*"):
#if fnmatch.fnmatch(file_hist, "*historical*"):
print "Indice where new historical invar_tracking_id goes is:", file_hist
#print
#print '%s' % (model)
# Create Dataset from these files
nc_indice_tas_hist = Dataset(out_path_RCM_tas_nbc_50km+file_hist,'a')
# Insert invar_tracking_id global attributed with value on the right
# (imported RCM tracking id from the single RCM file above)
#nc_indice_tas_hist.comment='fun'
nc_indice_tas_hist.invar_tracking_id=nc_in_hist.tracking_id
#nc_in_hist.comment = 'test'
#nc_in_hist.invar_tracking_id_test = 'test'
# Projections
print
print
print "For projections model:", model
print "Projection tracking id", nc_in_proj.tracking_id
print
print
for file_proj in os.listdir(out_path_RCM_tas_nbc_50km):
if fnmatch.fnmatch(file_proj, "*"+model[5:15]+"_"+experiment+"*"):
print "Indice where new projection invar_tracking_id goes is:", file_proj
# Create Dataset from these files
nc_indice_tas_proj = Dataset(out_path_RCM_tas_nbc_50km+file_proj,'a')
# Insert invar_tracking_id global attributed with value on the right
# (imported RCM tracking id from the single RCM file above)
#nc_indice_tas_hist.comment='fun'
nc_indice_tas_proj.invar_tracking_id=nc_in_proj.tracking_id
print
# EC-EARTH
models_list_50km_EC_EARTH = ['ICHEC-EC-EARTH']
for model in models_list_50km_EC_EARTH:
# CONSTRUCT RCM FILE NAMES
# New root for non-bias corrected (!nbc!) files:
tas_nbc_file_root_hist = "tasAdjust_EUR-44_"+model+"_"+experiment+"_r12i1p1_SMHI-RCA4_v1-SMHI-DBS43-EOBS10-1981-2010_day_"
tas_nbc_file_root_proj = "tasAdjust_EUR-44_"+model+"_"+experiment+"_r12i1p1_SMHI-RCA4_v1-SMHI-DBS43-EOBS10-1981-2010_day_"
# Explicit list
files_tas_nbc_50km_hist = in_path_RCM_tas_nbc_50km+tas_nbc_file_root_hist+"19660101-19701231.nc"
files_tas_nbc_50km_proj = in_path_RCM_tas_nbc_50km+tas_nbc_file_root_proj+"20060101-20101231.nc"
# Tell me which files you imported
print 'Historical input Model files:', files_tas_nbc_50km_hist # sep='\n'
print 'Projection input Model files:', files_tas_nbc_50km_proj # sep='\n'
# CONSTRUCT INDICES FILE NAMES
# Create datasets from netCDF files
nc_in_hist = Dataset(files_tas_nbc_50km_hist,'r')
nc_in_proj = Dataset(files_tas_nbc_50km_proj,'r')
# Print current GCM tracking id
# Historical
print
print
print "For historical model:", model
print "Historical tracking id", nc_in_hist.tracking_id
print
for file_hist in os.listdir(out_path_RCM_tas_nbc_50km):
#if fnmatch.fnmatch(file_hist, '*CCCma-CanESM2_historical*'):
if fnmatch.fnmatch(file_hist, "*"+model[6:14]+"_historical*"):
#if fnmatch.fnmatch(file_hist, "*historical*"):
print "Indice where new historical invar_tracking_id goes is:", file_hist
#print
#print '%s' % (model)
# Create Dataset from these files
nc_indice_tas_hist = Dataset(out_path_RCM_tas_nbc_50km+file_hist,'a')
# Insert invar_tracking_id global attributed with value on the right
# (imported RCM tracking id from the single RCM file above)
#nc_indice_tas_hist.comment='fun'
nc_indice_tas_hist.invar_tracking_id=nc_in_hist.tracking_id
#nc_in_hist.comment = 'test'
#nc_in_hist.invar_tracking_id_test = 'test'
# Projections
print
print
print "For projections model:", model
print "Projection tracking id", nc_in_proj.tracking_id
print
print
for file_proj in os.listdir(out_path_RCM_tas_nbc_50km):
if fnmatch.fnmatch(file_proj, "*"+model[6:14]+"_"+experiment+"*"):
print "Indice where new projection invar_tracking_id goes is:", file_proj
# Create Dataset from these files
nc_indice_tas_proj = Dataset(out_path_RCM_tas_nbc_50km+file_proj,'a')
# Insert invar_tracking_id global attributed with value on the right
# (imported RCM tracking id from the single RCM file above)
#nc_indice_tas_hist.comment='fun'
nc_indice_tas_proj.invar_tracking_id=nc_in_proj.tracking_id
quit() | igryski/Indices_icclim_ClipC | src/TMEAN/get_put_invar_tracking_id_python_TAS.py | Python | gpl-3.0 | 11,493 | [
"NetCDF"
] | 4d0e42db6b66a77184df536972b4530b2761c03d5b9476182490cc8d4ebd8c4e |
import os, sys
import csv
import unittest
from __main__ import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
from types import *
import math
import shutil
class DiagnosticIndex(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
parent.title = "DiagnosticIndex"
parent.categories = ["Quantification"]
parent.dependencies = []
parent.contributors = ["Laura PASCAL (UofM), Beatriz Paniagua (UNC) and Juan Carlos Prieto (UNC)"]
parent.helpText = """
DiagnosticIndex is used to define the OA type of
a patient according a Classification Groups that
you can create.
"""
parent.acknowledgementText = """
This work was supported by the National
Institutes of Dental and Craniofacial Research
and Biomedical Imaging and Bioengineering of
the National Institutes of Health under Award
Number R01DE024450.
"""
class DiagnosticIndexWidget(ScriptedLoadableModuleWidget):
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# ---- Widget Setup ----
# Global Variables
self.logic = DiagnosticIndexLogic(self)
self.dictVTKFiles = dict()
self.dictGroups = dict()
self.dictCSVFile = dict()
self.directoryList = list()
self.groupSelected = set()
self.dictShapeModels = dict()
self.patientList = list()
self.dictResult = dict()
# Interface
loader = qt.QUiLoader()
self.moduleName = 'DiagnosticIndex'
scriptedModulesPath = eval('slicer.modules.%s.path' % self.moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
path = os.path.join(scriptedModulesPath, 'Resources', 'UI', '%s.ui' % self.moduleName)
qfile = qt.QFile(path)
qfile.open(qt.QFile.ReadOnly)
widget = loader.load(qfile, self.parent)
self.layout = self.parent.layout()
self.widget = widget
self.layout.addWidget(widget)
# global variables of the Interface:
# Tab: Creation of CSV File for Classification Groups
self.collapsibleButton_creationCSVFile = self.logic.get('CollapsibleButton_creationCSVFile')
self.spinBox_group = self.logic.get('spinBox_group')
self.directoryButton_creationCSVFile = self.logic.get('DirectoryButton_creationCSVFile')
self.stackedWidget_manageGroup = self.logic.get('stackedWidget_manageGroup')
self.pushButton_addGroup = self.logic.get('pushButton_addGroup')
self.pushButton_removeGroup = self.logic.get('pushButton_removeGroup')
self.pushButton_modifyGroup = self.logic.get('pushButton_modifyGroup')
self.directoryButton_exportCSVFile = self.logic.get('DirectoryButton_exportCSVFile')
self.pushButton_exportCSVfile = self.logic.get('pushButton_exportCSVfile')
# Tab: Creation of New Classification Groups
self.collapsibleButton_creationClassificationGroups = self.logic.get('CollapsibleButton_creationClassificationGroups')
self.pathLineEdit_NewGroups = self.logic.get('PathLineEdit_NewGroups')
self.collapsibleGroupBox_previewVTKFiles = self.logic.get('CollapsibleGroupBox_previewVTKFiles')
self.checkableComboBox_ChoiceOfGroup = self.logic.get('CheckableComboBox_ChoiceOfGroup')
self.tableWidget_VTKFiles = self.logic.get('tableWidget_VTKFiles')
self.pushButton_previewVTKFiles = self.logic.get('pushButton_previewVTKFiles')
self.pushButton_compute = self.logic.get('pushButton_compute')
self.directoryButton_exportNewClassification = self.logic.get('DirectoryButton_exportNewClassification')
self.pushButton_exportNewClassification = self.logic.get('pushButton_exportNewClassification')
# Tab: Selection Classification Groups
self.collapsibleButton_SelectClassificationGroups = self.logic.get('CollapsibleButton_SelectClassificationGroups')
self.pathLineEdit_selectionClassificationGroups = self.logic.get('PathLineEdit_selectionClassificationGroups')
self.spinBox_healthyGroup = self.logic.get('spinBox_healthyGroup')
self.pushButton_previewGroups = self.logic.get('pushButton_previewGroups')
self.MRMLTreeView_classificationGroups = self.logic.get('MRMLTreeView_classificationGroups')
# Tab: Select Input Data
self.collapsibleButton_selectInputData = self.logic.get('CollapsibleButton_selectInputData')
self.MRMLNodeComboBox_VTKInputData = self.logic.get('MRMLNodeComboBox_VTKInputData')
self.pathLineEdit_CSVInputData = self.logic.get('PathLineEdit_CSVInputData')
self.checkBox_fileInGroups = self.logic.get('checkBox_fileInGroups')
self.pushButton_applyOAIndex = self.logic.get('pushButton_applyOAIndex')
# Tab: Result / Analysis
self.collapsibleButton_Result = self.logic.get('CollapsibleButton_Result')
self.tableWidget_result = self.logic.get('tableWidget_result')
self.pushButton_exportResult = self.logic.get('pushButton_exportResult')
self.directoryButton_exportResult = self.logic.get('DirectoryButton_exportResult')
# Widget Configuration
# disable/enable and hide/show widget
self.spinBox_healthyGroup.setDisabled(True)
self.pushButton_previewGroups.setDisabled(True)
self.pushButton_compute.setDisabled(True)
self.pushButton_compute.setDisabled(True)
self.directoryButton_exportNewClassification.setDisabled(True)
self.pushButton_exportNewClassification.setDisabled(True)
self.checkBox_fileInGroups.setDisabled(True)
self.checkableComboBox_ChoiceOfGroup.setDisabled(True)
self.tableWidget_VTKFiles.setDisabled(True)
self.pushButton_previewVTKFiles.setDisabled(True)
# qMRMLNodeComboBox configuration
self.MRMLNodeComboBox_VTKInputData.setMRMLScene(slicer.mrmlScene)
# initialisation of the stackedWidget to display the button "add group"
self.stackedWidget_manageGroup.setCurrentIndex(0)
# spinbox configuration in the tab "Creation of CSV File for Classification Groups"
self.spinBox_group.setMinimum(1)
self.spinBox_group.setMaximum(1)
self.spinBox_group.setValue(1)
# tree view configuration
headerTreeView = self.MRMLTreeView_classificationGroups.header()
headerTreeView.setVisible(False)
self.MRMLTreeView_classificationGroups.setMRMLScene(slicer.app.mrmlScene())
self.MRMLTreeView_classificationGroups.sortFilterProxyModel().nodeTypes = ['vtkMRMLModelNode']
self.MRMLTreeView_classificationGroups.setDisabled(True)
sceneModel = self.MRMLTreeView_classificationGroups.sceneModel()
# sceneModel.setHorizontalHeaderLabels(["Group Classification"])
sceneModel.colorColumn = 1
sceneModel.opacityColumn = 2
headerTreeView.setStretchLastSection(False)
headerTreeView.setResizeMode(sceneModel.nameColumn,qt.QHeaderView.Stretch)
headerTreeView.setResizeMode(sceneModel.colorColumn,qt.QHeaderView.ResizeToContents)
headerTreeView.setResizeMode(sceneModel.opacityColumn,qt.QHeaderView.ResizeToContents)
# configuration of the table for preview VTK file
self.tableWidget_VTKFiles.setColumnCount(4)
self.tableWidget_VTKFiles.setHorizontalHeaderLabels([' VTK files ', ' Group ', ' Visualization ', 'Color'])
self.tableWidget_VTKFiles.setColumnWidth(0, 200)
horizontalHeader = self.tableWidget_VTKFiles.horizontalHeader()
horizontalHeader.setStretchLastSection(False)
horizontalHeader.setResizeMode(0,qt.QHeaderView.Stretch)
horizontalHeader.setResizeMode(1,qt.QHeaderView.ResizeToContents)
horizontalHeader.setResizeMode(2,qt.QHeaderView.ResizeToContents)
horizontalHeader.setResizeMode(3,qt.QHeaderView.ResizeToContents)
self.tableWidget_VTKFiles.verticalHeader().setVisible(False)
# configuration of the table to display the result
self.tableWidget_result.setColumnCount(2)
self.tableWidget_result.setHorizontalHeaderLabels([' VTK files ', ' Assigned Group '])
self.tableWidget_result.setColumnWidth(0, 300)
horizontalHeader = self.tableWidget_result.horizontalHeader()
horizontalHeader.setStretchLastSection(False)
horizontalHeader.setResizeMode(0,qt.QHeaderView.Stretch)
horizontalHeader.setResizeMode(1,qt.QHeaderView.ResizeToContents)
self.tableWidget_result.verticalHeader().setVisible(False)
# --------------------------------------------------------- #
# Connection #
# --------------------------------------------------------- #
# Tab: Creation of CSV File for Classification Groups
self.collapsibleButton_creationCSVFile.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(self.collapsibleButton_creationCSVFile))
self.spinBox_group.connect('valueChanged(int)', self.onManageGroup)
self.pushButton_addGroup.connect('clicked()', self.onAddGroupForCreationCSVFile)
self.pushButton_removeGroup.connect('clicked()', self.onRemoveGroupForCreationCSVFile)
self.pushButton_modifyGroup.connect('clicked()', self.onModifyGroupForCreationCSVFile)
self.pushButton_exportCSVfile.connect('clicked()', self.onExportForCreationCSVFile)
# Tab: Creation of New Classification Groups
self.collapsibleButton_creationClassificationGroups.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(self.collapsibleButton_creationClassificationGroups))
self.pathLineEdit_NewGroups.connect('currentPathChanged(const QString)', self.onNewGroups)
self.checkableComboBox_ChoiceOfGroup.connect('checkedIndexesChanged()', self.onCheckableComboBoxValueChanged)
self.pushButton_previewVTKFiles.connect('clicked()', self.onPreviewVTKFiles)
self.pushButton_compute.connect('clicked()', self.onComputeNewClassificationGroups)
self.pushButton_exportNewClassification.connect('clicked()', self.onExportNewClassificationGroups)
# Tab: Selection of Classification Groups
self.collapsibleButton_SelectClassificationGroups.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(self.collapsibleButton_SelectClassificationGroups))
self.pathLineEdit_selectionClassificationGroups.connect('currentPathChanged(const QString)', self.onSelectionClassificationGroups)
self.pushButton_previewGroups.connect('clicked()', self.onPreviewGroupMeans)
# Tab: Select Input Data
self.collapsibleButton_selectInputData.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(self.collapsibleButton_selectInputData))
self.MRMLNodeComboBox_VTKInputData.connect('currentNodeChanged(vtkMRMLNode*)', self.onVTKInputData)
self.checkBox_fileInGroups.connect('clicked()', self.onCheckFileInGroups)
self.pathLineEdit_CSVInputData.connect('currentPathChanged(const QString)', self.onCSVInputData)
self.pushButton_applyOAIndex.connect('clicked()', self.onComputeOAIndex)
# Tab: Result / Analysis
self.collapsibleButton_Result.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(self.collapsibleButton_Result))
self.pushButton_exportResult.connect('clicked()', self.onExportResult)
slicer.mrmlScene.AddObserver(slicer.mrmlScene.EndCloseEvent, self.onCloseScene)
# function called each time that the user "enter" in Diagnostic Index interface
def enter(self):
#TODO
pass
# function called each time that the user "exit" in Diagnostic Index interface
def exit(self):
#TODO
pass
# function called each time that the scene is closed (if Diagnostic Index has been initialized)
def onCloseScene(self, obj, event):
print "onCloseScene"
self.dictVTKFiles = dict()
self.dictGroups = dict()
self.dictCSVFile = dict()
self.directoryList = list()
self.groupSelected = set()
self.dictShapeModels = dict()
self.patientList = list()
self.dictResult = dict()
# Tab: New Classification Groups
self.pathLineEdit_NewGroups.setCurrentPath(" ")
self.checkableComboBox_ChoiceOfGroup.setDisabled(True)
self.tableWidget_VTKFiles.clear()
self.tableWidget_VTKFiles.setColumnCount(4)
self.tableWidget_VTKFiles.setHorizontalHeaderLabels([' VTK files ', ' Group ', ' Visualization ', 'Color'])
self.tableWidget_VTKFiles.setColumnWidth(0, 200)
horizontalHeader = self.tableWidget_VTKFiles.horizontalHeader()
horizontalHeader.setStretchLastSection(False)
horizontalHeader.setResizeMode(0,qt.QHeaderView.Stretch)
horizontalHeader.setResizeMode(1,qt.QHeaderView.ResizeToContents)
horizontalHeader.setResizeMode(2,qt.QHeaderView.ResizeToContents)
horizontalHeader.setResizeMode(3,qt.QHeaderView.ResizeToContents)
self.tableWidget_VTKFiles.verticalHeader().setVisible(False)
self.tableWidget_VTKFiles.setDisabled(True)
self.pushButton_previewVTKFiles.setDisabled(True)
self.pushButton_compute.setDisabled(True)
self.directoryButton_exportNewClassification.setDisabled(True)
self.pushButton_exportNewClassification.setDisabled(True)
# Tab: Selection of Classification Groups
self.pathLineEdit_selectionClassificationGroups.setCurrentPath(" ")
if self.spinBox_healthyGroup.enabled:
self.spinBox_healthyGroup.setValue(0)
self.spinBox_healthyGroup.setDisabled(True)
# Tab: Preview of Classification Group
self.MRMLTreeView_classificationGroups.setDisabled(True)
self.pushButton_previewGroups.setDisabled(True)
# Tab: Select Input Data
self.pathLineEdit_CSVInputData.setCurrentPath(" ")
self.checkBox_fileInGroups.setDisabled(True)
# Tab: Result / Analysis
self.tableWidget_result.clear()
self.tableWidget_result.setColumnCount(2)
self.tableWidget_result.setHorizontalHeaderLabels([' VTK files ', ' Assigned Group '])
self.tableWidget_result.setColumnWidth(0, 300)
horizontalHeader = self.tableWidget_result.horizontalHeader()
horizontalHeader.setStretchLastSection(False)
horizontalHeader.setResizeMode(0,qt.QHeaderView.Stretch)
horizontalHeader.setResizeMode(1,qt.QHeaderView.ResizeToContents)
self.tableWidget_result.verticalHeader().setVisible(False)
# Only one tab can be display at the same time:
# When one tab is opened all the other tabs are closed
def onSelectedCollapsibleButtonOpen(self, selectedCollapsibleButton):
if selectedCollapsibleButton.isChecked():
collapsibleButtonList = [self.collapsibleButton_creationCSVFile,
self.collapsibleButton_creationClassificationGroups,
self.collapsibleButton_SelectClassificationGroups,
self.collapsibleButton_selectInputData,
self.collapsibleButton_Result]
for collapsibleButton in collapsibleButtonList:
collapsibleButton.setChecked(False)
selectedCollapsibleButton.setChecked(True)
# ---------------------------------------------------- #
# Tab: Creation of CSV File for Classification Groups #
# ---------------------------------------------------- #
# Function in order to manage the display of these three buttons:
# - "Add Group"
# - "Modify Group"
# - "Remove Group"
def onManageGroup(self):
# Display the button:
# - "Add Group" for a group which hasn't been added yet
# - "Remove Group" for the last group added
# - "Modify Group" for all the groups added
if self.spinBox_group.maximum == self.spinBox_group.value:
self.stackedWidget_manageGroup.setCurrentIndex(0)
else:
self.stackedWidget_manageGroup.setCurrentIndex(1)
if (self.spinBox_group.maximum - 1) == self.spinBox_group.value:
self.pushButton_removeGroup.show()
else:
self.pushButton_removeGroup.hide()
# Update the path of the directory button
if len(self.directoryList) > 0:
self.directoryButton_creationCSVFile.directory = self.directoryList[self.spinBox_group.value - 1]
# Function to add a group of the dictionary
# - Add the paths of all the vtk files found in the directory given
# of a dictionary which will be used to create the CSV file
def onAddGroupForCreationCSVFile(self):
# Error message
directory = self.directoryButton_creationCSVFile.directory.encode('utf-8')
if directory in self.directoryList:
index = self.directoryList.index(directory) + 1
slicer.util.errorDisplay('Path of directory already used for the group ' + str(index))
return
# Add the paths of vtk files of the dictionary
self.logic.addGroupToDictionary(self.dictCSVFile, directory, self.directoryList, self.spinBox_group.value)
condition = self.logic.checkSeveralMeshInDict(self.dictCSVFile)
if not condition:
# Remove the paths of vtk files of the dictionary
self.logic.removeGroupToDictionary(self.dictCSVFile, self.directoryList, self.spinBox_group.value)
return
# Increment of the number of the group in the spinbox
self.spinBox_group.blockSignals(True)
self.spinBox_group.setMaximum(self.spinBox_group.value + 1)
self.spinBox_group.setValue(self.spinBox_group.value + 1)
self.spinBox_group.blockSignals(False)
# Message for the user
slicer.util.delayDisplay("Group Added")
# Function to remove a group of the dictionary
# - Remove the paths of all the vtk files corresponding to the selected group
# of the dictionary which will be used to create the CSV file
def onRemoveGroupForCreationCSVFile(self):
# Remove the paths of the vtk files of the dictionary
self.logic.removeGroupToDictionary(self.dictCSVFile, self.directoryList, self.spinBox_group.value)
# Decrement of the number of the group in the spinbox
self.spinBox_group.blockSignals(True)
self.spinBox_group.setMaximum(self.spinBox_group.maximum - 1)
self.spinBox_group.blockSignals(False)
# Change the buttons "remove group" and "modify group" in "add group"
self.stackedWidget_manageGroup.setCurrentIndex(0)
# Message for the user
slicer.util.delayDisplay("Group removed")
# Function to modify a group of the dictionary:
# - Remove of the dictionary the paths of all vtk files corresponding to the selected group
# - Add of the dictionary the new paths of all the vtk files
def onModifyGroupForCreationCSVFile(self):
# Error message
directory = self.directoryButton_creationCSVFile.directory.encode('utf-8')
if directory in self.directoryList:
index = self.directoryList.index(directory) + 1
slicer.util.errorDisplay('Path of directory already used for the group ' + str(index))
return
# Remove the paths of vtk files of the dictionary
self.logic.removeGroupToDictionary(self.dictCSVFile, self.directoryList, self.spinBox_group.value)
# Add the paths of vtk files of the dictionary
self.logic.addGroupToDictionary(self.dictCSVFile, directory, self.directoryList, self.spinBox_group.value)
# Message for the user
slicer.util.delayDisplay("Group modified")
# Function to export the CSV file in the directory chosen by the user
# - Save the CSV file from the dictionary previously filled
# - Load automatically this CSV file in the next tab: "Creation of New Classification Groups"
def onExportForCreationCSVFile(self):
# Path of the csv file
directory = self.directoryButton_exportCSVFile.directory.encode('utf-8')
basename = 'Groups.csv'
filepath = directory + "/" + basename
# Message if the csv file already exists
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
if os.path.exists(filepath):
messageBox.setText('File ' + filepath + ' already exists!')
messageBox.setInformativeText('Do you want to replace it ?')
messageBox.setStandardButtons( messageBox.No | messageBox.Yes)
choice = messageBox.exec_()
if choice == messageBox.No:
return
# Save the CSV File
self.logic.creationCSVFile(directory, basename, self.dictCSVFile, "Groups")
# Re-Initialization of the first tab
self.spinBox_group.setMaximum(1)
self.spinBox_group.setValue(1)
self.stackedWidget_manageGroup.setCurrentIndex(0)
self.directoryButton_creationCSVFile.directory = qt.QDir.homePath() + '/Desktop'
self.directoryButton_exportCSVFile.directory = qt.QDir.homePath() + '/Desktop'
# Re-Initialization of:
# - the dictionary containing all the paths of the vtk groups
# - the list containing all the paths of the different directories
self.directoryList = list()
self.dictCSVFile = dict()
# Message in the python console
print "Export CSV File: " + filepath
# Load automatically the CSV file in the pathline in the next tab "Creation of New Classification Groups"
self.pathLineEdit_NewGroups.setCurrentPath(filepath)
# ---------------------------------------------------- #
# Tab: Creation of New Classification Groups #
# ---------------------------------------------------- #
# Function to read the CSV file containing all the vtk filepaths needed to create the new Classification Groups
def onNewGroups(self):
# Re-initialization of the dictionary containing all the vtk files
# which will be used to create a new Classification Groups
self.dictVTKFiles = dict()
# Check if the path exists:
if not os.path.exists(self.pathLineEdit_NewGroups.currentPath):
return
print "------ Creation of a new Classification Groups ------"
# Check if it's a CSV file
condition1 = self.logic.checkExtension(self.pathLineEdit_NewGroups.currentPath, ".csv")
if not condition1:
self.pathLineEdit_NewGroups.setCurrentPath(" ")
return
# Download the CSV file
self.logic.table = self.logic.readCSVFile(self.pathLineEdit_NewGroups.currentPath)
condition2 = self.logic.creationDictVTKFiles(self.dictVTKFiles)
condition3 = self.logic.checkSeveralMeshInDict(self.dictVTKFiles)
# If the file is not conformed:
# Re-initialization of the dictionary containing all the data
# which will be used to create a new Classification Groups
if not (condition2 and condition3):
self.dictVTKFiles = dict()
self.pathLineEdit_NewGroups.setCurrentPath(" ")
return
# Fill the table for the preview of the vtk files in Shape Population Viewer
self.logic.fillTableForPreviewVTKFilesInSPV(self.dictVTKFiles,
self.checkableComboBox_ChoiceOfGroup,
self.tableWidget_VTKFiles)
# Enable/disable buttons
self.checkableComboBox_ChoiceOfGroup.setEnabled(True)
self.tableWidget_VTKFiles.setEnabled(True)
self.pushButton_previewVTKFiles.setEnabled(True)
self.pushButton_compute.setEnabled(True)
# Function to manage the checkable combobox to allow the user to choose the group that he wants to preview in SPV
def onCheckableComboBoxValueChanged(self):
# Update the checkboxes in the qtableWidget of each vtk file
index = self.checkableComboBox_ChoiceOfGroup.currentIndex
for row in range(0,self.tableWidget_VTKFiles.rowCount):
# Recovery of the group of the vtk file contained in the combobox (column 2)
widget = self.tableWidget_VTKFiles.cellWidget(row, 1)
tuple = widget.children()
comboBox = qt.QComboBox()
comboBox = tuple[1]
group = comboBox.currentIndex + 1
if group == (index + 1):
# check the checkBox
widget = self.tableWidget_VTKFiles.cellWidget(row, 2)
tuple = widget.children()
checkBox = tuple[1]
checkBox.blockSignals(True)
item = self.checkableComboBox_ChoiceOfGroup.model().item(index, 0)
if item.checkState():
checkBox.setChecked(True)
self.groupSelected.add(index + 1)
else:
checkBox.setChecked(False)
self.groupSelected.discard(index + 1)
checkBox.blockSignals(False)
# Update the color in the qtableWidget of each vtk file
colorTransferFunction = self.logic.creationColorTransfer(self.groupSelected)
self.updateColorInTableForPreviewInSPV(colorTransferFunction)
# Function to manage the combobox which allow the user to change the group of a vtk file
def onGroupValueChanged(self):
# Updade the dictionary which containing the VTK files sorted by groups
self.logic.onComboBoxTableValueChanged(self.dictVTKFiles, self.tableWidget_VTKFiles)
# Update the checkable combobox which display the groups selected to preview them in SPV
self.onCheckBoxTableValueChanged()
# Function to manage the checkbox in the table used to make a preview in SPV
def onCheckBoxTableValueChanged(self):
self.groupSelected = set()
# Update the checkable comboBox which allow to select what groups the user wants to display in SPV
self.checkableComboBox_ChoiceOfGroup.blockSignals(True)
allcheck = True
for key, value in self.dictVTKFiles.items():
item = self.checkableComboBox_ChoiceOfGroup.model().item(key - 1, 0)
if not value == []:
for vtkFile in value:
filename = os.path.basename(vtkFile)
for row in range(0,self.tableWidget_VTKFiles.rowCount):
qlabel = self.tableWidget_VTKFiles.cellWidget(row, 0)
if qlabel.text == filename:
widget = self.tableWidget_VTKFiles.cellWidget(row, 2)
tuple = widget.children()
checkBox = tuple[1]
if not checkBox.checkState():
allcheck = False
item.setCheckState(0)
else:
self.groupSelected.add(key)
if allcheck:
item.setCheckState(2)
else:
item.setCheckState(0)
allcheck = True
self.checkableComboBox_ChoiceOfGroup.blockSignals(False)
# Update the color in the qtableWidget which will display in SPV
colorTransferFunction = self.logic.creationColorTransfer(self.groupSelected)
self.updateColorInTableForPreviewInSPV(colorTransferFunction)
# Function to update the colors that the selected vtk files will have in Shape Population Viewer
def updateColorInTableForPreviewInSPV(self, colorTransferFunction):
for row in range(0,self.tableWidget_VTKFiles.rowCount):
# Recovery of the group display in the table for each vtk file
widget = self.tableWidget_VTKFiles.cellWidget(row, 1)
tuple = widget.children()
comboBox = qt.QComboBox()
comboBox = tuple[1]
group = comboBox.currentIndex + 1
# Recovery of the checkbox for each vtk file
widget = self.tableWidget_VTKFiles.cellWidget(row, 2)
tuple = widget.children()
checkBox = qt.QCheckBox()
checkBox = tuple[1]
# If the checkbox is check, the color is found thanks to the color transfer function
# Else the color is put at white
if checkBox.isChecked():
rgb = colorTransferFunction.GetColor(group)
widget = self.tableWidget_VTKFiles.cellWidget(row, 3)
self.tableWidget_VTKFiles.item(row,3).setBackground(qt.QColor(rgb[0]*255,rgb[1]*255,rgb[2]*255))
else:
self.tableWidget_VTKFiles.item(row,3).setBackground(qt.QColor(255,255,255))
# Function to display the selected vtk files in Shape Population Viewer
# - Add a color map "DisplayClassificationGroup"
# - Launch the CLI ShapePopulationViewer
def onPreviewVTKFiles(self):
print "--- Preview VTK Files in ShapePopulationViewer ---"
if os.path.exists(self.pathLineEdit_NewGroups.currentPath):
# Creation of a color map to visualize each group with a different color in ShapePopulationViewer
self.logic.addColorMap(self.tableWidget_VTKFiles, self.dictVTKFiles)
# Creation of a CSV file to load the vtk files in ShapePopulationViewer
filePathCSV = slicer.app.temporaryPath + '/' + 'VTKFilesPreview_OAIndex.csv'
self.logic.creationCSVFileForSPV(filePathCSV, self.tableWidget_VTKFiles, self.dictVTKFiles)
# Launch the CLI ShapePopulationViewer
parameters = {}
parameters["CSVFile"] = filePathCSV
launcherSPV = slicer.modules.launcher
slicer.cli.run(launcherSPV, None, parameters, wait_for_completion=True)
# Remove the vtk files previously created in the temporary directory of Slicer
for value in self.dictVTKFiles.values():
self.logic.removeDataVTKFiles(value)
# Function to compute the new Classification Groups
# - Remove all the arrays of all the vtk files
# - Compute the mean of each group thanks to Statismo
def onComputeNewClassificationGroups(self):
for key, value in self.dictVTKFiles.items():
# Delete all the arrays in vtk file
self.logic.deleteArrays(key, value)
# Compute the shape model of each group
self.logic.buildShapeModel(key, value)
# Remove the vtk files used to create the shape model of each group
self.logic.removeDataVTKFiles(value)
# Storage of the shape model for each group
self.logic.storeShapeModel(self.dictShapeModels, key)
# Enable the option to export the new data
self.directoryButton_exportNewClassification.setEnabled(True)
self.pushButton_exportNewClassification.setEnabled(True)
# Function to export the new Classification Groups
# - Data saved:
# - Save the mean vtk files in the selected directory
# - Save the CSV file in the selected directory
# - Load automatically the CSV file in the next tab: "Selection of Classification Groups"
def onExportNewClassificationGroups(self):
print "--- Export the new Classification Groups ---"
# Message for the user if files already exist
directory = self.directoryButton_exportNewClassification.directory.encode('utf-8')
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
filePathExisting = list()
# Check if the CSV file exists
CSVfilePath = directory + "/ClassificationGroups.csv"
if os.path.exists(CSVfilePath):
filePathExisting.append(CSVfilePath)
# Check if the shape model exist
for key, value in self.dictShapeModels.items():
modelFilename = os.path.basename(value)
modelFilePath = directory + '/' + modelFilename
if os.path.exists(modelFilePath):
filePathExisting.append(modelFilePath)
# Write the message for the user
if len(filePathExisting) > 0:
if len(filePathExisting) == 1:
text = 'File ' + filePathExisting[0] + ' already exists!'
informativeText = 'Do you want to replace it ?'
elif len(filePathExisting) > 1:
text = 'These files are already exist: \n'
for path in filePathExisting:
text = text + path + '\n'
informativeText = 'Do you want to replace them ?'
messageBox.setText(text)
messageBox.setInformativeText(informativeText)
messageBox.setStandardButtons( messageBox.No | messageBox.Yes)
choice = messageBox.exec_()
if choice == messageBox.No:
return
# Save the CSV File and the shape model of each group
self.logic.saveNewClassificationGroups('ClassificationGroups.csv', directory, self.dictShapeModels)
# Remove the shape model (GX.h5) of each group
self.logic.removeDataAfterNCG(self.dictShapeModels)
# Re-Initialization of the dictionary containing the path of the shape model of each group
self.dictShapeModels = dict()
# Message for the user
slicer.util.delayDisplay("Files Saved")
# Disable the option to export the new data
self.directoryButton_exportNewClassification.setDisabled(True)
self.pushButton_exportNewClassification.setDisabled(True)
# Load automatically the CSV file in the pathline in the next tab "Selection of Classification Groups"
if self.pathLineEdit_selectionClassificationGroups.currentPath == CSVfilePath:
self.pathLineEdit_selectionClassificationGroups.setCurrentPath(" ")
self.pathLineEdit_selectionClassificationGroups.setCurrentPath(CSVfilePath)
# ---------------------------------------------------- #
# Tab: Selection of Classification Groups #
# ---------------------------------------------------- #
# Function to select the Classification Groups
def onSelectionClassificationGroups(self):
# Re-initialization of the dictionary containing the Classification Groups
self.dictShapeModels = dict()
# Check if the path exists:
if not os.path.exists(self.pathLineEdit_selectionClassificationGroups.currentPath):
return
print "------ Selection of a Classification Groups ------"
# Check if it's a CSV file
condition1 = self.logic.checkExtension(self.pathLineEdit_selectionClassificationGroups.currentPath, ".csv")
if not condition1:
self.pathLineEdit_selectionClassificationGroups.setCurrentPath(" ")
return
# Read CSV File:
self.logic.table = self.logic.readCSVFile(self.pathLineEdit_selectionClassificationGroups.currentPath)
condition3 = self.logic.creationDictShapeModel(self.dictShapeModels)
# If the file is not conformed:
# Re-initialization of the dictionary containing the Classification Groups
if not condition3:
self.dictShapeModels = dict()
self.pathLineEdit_selectionClassificationGroups.setCurrentPath(" ")
return
# Enable/disable buttons
self.spinBox_healthyGroup.setEnabled(True)
self.pushButton_previewGroups.setEnabled(True)
self.MRMLTreeView_classificationGroups.setEnabled(True)
# Configuration of the spinbox specify the healthy group
# Set the Maximum value of spinBox_healthyGroup at the maximum number groups
self.spinBox_healthyGroup.setMaximum(len(self.dictShapeModels))
# Function to preview the Classification Groups in Slicer
# - The opacity of all the vtk files is set to 0.8
# - The healthy group is white and the others are red
def onPreviewGroupMeans(self):
print "------ Preview of the Group's Mean in Slicer ------"
for group, h5path in self.dictShapeModels.items():
# Compute the mean of each group thanks to Statismo
self.logic.computeMean(group, h5path)
# Storage of the means for each group
self.logic.storageMean(self.dictGroups, group)
# If the user doesn't specify the healthy group
# error message for the user
# Else
# load the Classification Groups in Slicer
if self.spinBox_healthyGroup.value == 0:
# Error message:
slicer.util.errorDisplay('Miss the number of the healthy group ')
else:
for key in self.dictGroups.keys():
filename = self.dictGroups.get(key, None)
loader = slicer.util.loadModel
loader(filename)
# Change the color and the opacity for each vtk file
list = slicer.mrmlScene.GetNodesByClass("vtkMRMLModelNode")
end = list.GetNumberOfItems()
for i in range(3,end):
model = list.GetItemAsObject(i)
disp = model.GetDisplayNode()
for group in self.dictGroups.keys():
filename = self.dictGroups.get(group, None)
if os.path.splitext(os.path.basename(filename))[0] == model.GetName():
if self.spinBox_healthyGroup.value == group:
disp.SetColor(1, 1, 1)
disp.VisibilityOn()
else:
disp.SetColor(1, 0, 0)
disp.VisibilityOff()
disp.SetOpacity(0.8)
break
disp.VisibilityOff()
# Center the 3D view of the scene
layoutManager = slicer.app.layoutManager()
threeDWidget = layoutManager.threeDWidget(0)
threeDView = threeDWidget.threeDView()
threeDView.resetFocalPoint()
# ---------------------------------------------------- #
# Tab: Select Input Data #
# ---------------------------------------------------- #
# Function to select the vtk Input Data
def onVTKInputData(self):
# Remove the old vtk file in the temporary directory of slicer if it exists
if self.patientList:
print "onVTKInputData remove old vtk file"
oldVTKPath = slicer.app.temporaryPath + "/" + os.path.basename(self.patientList[0])
if os.path.exists(oldVTKPath):
os.remove(oldVTKPath)
# Re-Initialization of the patient list
self.patientList = list()
# Handle checkbox "File already in the groups"
self.enableOption()
# Delete the path in CSV file
currentNode = self.MRMLNodeComboBox_VTKInputData.currentNode()
if currentNode == None:
return
self.pathLineEdit_CSVInputData.setCurrentPath(" ")
# Adding the vtk file to the list of patient
currentNode = self.MRMLNodeComboBox_VTKInputData.currentNode()
if not currentNode == None:
# Save the selected node in the temporary directory of slicer
vtkfilepath = slicer.app.temporaryPath + "/" + self.MRMLNodeComboBox_VTKInputData.currentNode().GetName() + ".vtk"
self.logic.saveVTKFile(self.MRMLNodeComboBox_VTKInputData.currentNode().GetPolyData(), vtkfilepath)
# Adding to the list
self.patientList.append(vtkfilepath)
# Function to handle the checkbox "File already in the groups"
def enableOption(self):
# Enable or disable the checkbox "File already in the groups" according to the data previously selected
currentNode = self.MRMLNodeComboBox_VTKInputData.currentNode()
if currentNode == None:
if self.checkBox_fileInGroups.isChecked():
self.checkBox_fileInGroups.setChecked(False)
self.checkBox_fileInGroups.setDisabled(True)
elif os.path.exists(self.pathLineEdit_NewGroups.currentPath):
self.checkBox_fileInGroups.setEnabled(True)
# Check if the selected file is in the groups used to create the classification groups
self.onCheckFileInGroups()
# Function to check if the selected file is in the groups used to create the classification groups
# - If it's not the case:
# - display of a error message
# - deselected checkbox
def onCheckFileInGroups(self):
if self.checkBox_fileInGroups.isChecked():
node = self.MRMLNodeComboBox_VTKInputData.currentNode()
if not node == None:
vtkfileToFind = node.GetName() + '.vtk'
find = self.logic.actionOnDictionary(self.dictVTKFiles, vtkfileToFind, None, 'find')
if find == False:
slicer.util.errorDisplay('The selected file is not a file used to create the Classification Groups!')
self.checkBox_fileInGroups.setChecked(False)
# Function to select the CSV Input Data
def onCSVInputData(self):
self.patientList = list()
# Delete the path in VTK file
if not os.path.exists(self.pathLineEdit_CSVInputData.currentPath):
return
self.MRMLNodeComboBox_VTKInputData.setCurrentNode(None)
# Adding the name of the node a list
if os.path.exists(self.pathLineEdit_CSVInputData.currentPath):
patientTable = vtk.vtkTable
patientTable = self.logic.readCSVFile(self.pathLineEdit_CSVInputData.currentPath)
for i in range(0, patientTable.GetNumberOfRows()):
self.patientList.append(patientTable.GetValue(i,0).ToString())
# Handle checkbox "File already in the groups"
self.enableOption()
# Function to define the OA index type of the patient
# *** CROSS VALIDATION:
# - If the user specified that the vtk file was in the groups used to create the Classification Groups:
# - Save the current classification groups
# - Re-compute the new classification groups without this file
# - Define the OA index type of a patient
# - Recovery the classification groups
# *** Define the OA index of a patient:
# - Else:
# - Compute the ShapeOALoads for each group
# - Compute the OA index type of a patient
def onComputeOAIndex(self):
print "------ Compute the OA index Type of a patient ------"
# Check if the user gave all the data used to compute the OA index type of the patient:
# - VTK input data or CSV input data
# - CSV file containing the Classification Groups
if not os.path.exists(self.pathLineEdit_selectionClassificationGroups.currentPath):
slicer.util.errorDisplay('Miss the CSV file containing the Classification Groups')
return
if self.MRMLNodeComboBox_VTKInputData.currentNode() == None and not self.pathLineEdit_CSVInputData.currentPath:
slicer.util.errorDisplay('Miss the Input Data')
return
# **** CROSS VALIDATION ****
# If the selected file is in the groups used to create the classification groups
if self.checkBox_fileInGroups.isChecked():
# Remove the file in the dictionary used to compute the classification groups
listSaveVTKFiles = list()
vtkfileToRemove = self.MRMLNodeComboBox_VTKInputData.currentNode().GetName() + '.vtk'
listSaveVTKFiles = self.logic.actionOnDictionary(self.dictVTKFiles,
vtkfileToRemove,
listSaveVTKFiles,
'remove')
# Copy the Classification Groups
dictShapeModelsTemp = dict()
dictShapeModelsTemp = self.dictShapeModels
self.dictShapeModels = dict()
# Re-compute the new classification groups
self.onComputeNewClassificationGroups()
# *** Define the OA index type of a patient ***
# For each patient:
for patient in self.patientList:
# Compute the ShapeOALoads for each group
for key, value in self.dictShapeModels.items():
self.logic.computeShapeOALoads(key, patient, value)
# Compute the OA index type of a patient
resultgroup = self.logic.computeOAIndex(self.dictShapeModels.keys())
# Display the result in the next tab "Result/Analysis"
self.displayResult(resultgroup, os.path.basename(patient))
# Remove the CSV file containing the Shape OA Vector Loads
self.logic.removeShapeOALoadsCSVFile(self.dictShapeModels.keys())
# **** CROSS VALIDATION ****
# If the selected file is in the groups used to create the classification groups
if self.checkBox_fileInGroups.isChecked():
# Add the file previously removed to the dictionary used to create the classification groups
self.logic.actionOnDictionary(self.dictVTKFiles,
vtkfileToRemove,
listSaveVTKFiles,
'add')
# Recovery the Classification Groups previously saved
self.dictShapeModels = dictShapeModelsTemp
# Remove the data previously created
self.logic.removeDataAfterNCG(self.dictShapeModels)
# ---------------------------------------------------- #
# Tab: Result / Analysis #
# ---------------------------------------------------- #
# Function to display the result in a table
def displayResult(self, resultGroup, VTKfilename):
row = self.tableWidget_result.rowCount
self.tableWidget_result.setRowCount(row + 1)
# Column 0: VTK file
labelVTKFile = qt.QLabel(VTKfilename)
labelVTKFile.setAlignment(0x84)
self.tableWidget_result.setCellWidget(row, 0, labelVTKFile)
# Column 1: Assigned Group
labelAssignedGroup = qt.QLabel(resultGroup)
labelAssignedGroup.setAlignment(0x84)
self.tableWidget_result.setCellWidget(row, 1, labelAssignedGroup)
# Function to export the result in a CSV File
def onExportResult(self):
# Directory
directory = self.directoryButton_exportResult.directory.encode('utf-8')
basename = "OAResult.csv"
# Message if the csv file already exists
filepath = directory + "/" + basename
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
if os.path.exists(filepath):
messageBox.setText('File ' + filepath + ' already exists!')
messageBox.setInformativeText('Do you want to replace it ?')
messageBox.setStandardButtons( messageBox.No | messageBox.Yes)
choice = messageBox.exec_()
if choice == messageBox.No:
return
# Directory
directory = self.directoryButton_exportResult.directory.encode('utf-8')
# Store data in a dictionary
self.logic.creationCSVFileForResult(self.tableWidget_result, directory, basename)
# Message in the python console and for the user
print "Export CSV File: " + filepath
slicer.util.delayDisplay("Result saved")
# ------------------------------------------------------------------------------------ #
# ALGORITHM #
# ------------------------------------------------------------------------------------ #
class DiagnosticIndexLogic(ScriptedLoadableModuleLogic):
def __init__(self, interface):
self.interface = interface
self.table = vtk.vtkTable
self.colorBar = {'Point1': [0, 0, 1, 0], 'Point2': [0.5, 1, 1, 0], 'Point3': [1, 1, 0, 0]}
# Functions to recovery the widget in the .ui file
def get(self, objectName):
return self.findWidget(self.interface.widget, objectName)
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
# Function to add all the vtk filepaths found in the given directory of a dictionary
def addGroupToDictionary(self, dictCSVFile, directory, directoryList, group):
# Fill a dictionary which contains the vtk files for the classification groups sorted by group
valueList = list()
for file in os.listdir(directory):
if file.endswith(".vtk"):
filepath = directory + '/' + file
valueList.append(filepath)
dictCSVFile[group] = valueList
# Add the path of the directory
directoryList.insert((group - 1), directory)
# Function to remove the group of the dictionary
def removeGroupToDictionary(self, dictCSVFile, directoryList, group):
# Remove the group from the dictionary
dictCSVFile.pop(group, None)
# Remove the path of the directory
directoryList.pop(group - 1)
# Check if the path given has the right extension
def checkExtension(self, filename, extension):
if os.path.splitext(os.path.basename(filename))[1] == extension:
return True
slicer.util.errorDisplay('Wrong extension file, a CSV file is needed!')
return False
# Function to read a CSV file
def readCSVFile(self, filename):
print "CSV FilePath: " + filename
CSVreader = vtk.vtkDelimitedTextReader()
CSVreader.SetFieldDelimiterCharacters(",")
CSVreader.SetFileName(filename)
CSVreader.SetHaveHeaders(True)
CSVreader.Update()
return CSVreader.GetOutput()
# Function to create a dictionary containing all the vtk filepaths sorted by group
# - the paths are given by a CSV file
# - If one paths doesn't exist
# Return False
# Else if all the path of all vtk file exist
# Return True
def creationDictVTKFiles(self, dict):
for i in range(0, self.table.GetNumberOfRows()):
if not os.path.exists(self.table.GetValue(i,0).ToString()):
slicer.util.errorDisplay('VTK file not found, path not good at lign ' + str(i+2))
return False
value = dict.get(self.table.GetValue(i,1).ToInt(), None)
if value == None:
dict[self.table.GetValue(i,1).ToInt()] = self.table.GetValue(i,0).ToString()
else:
if type(value) is ListType:
value.append(self.table.GetValue(i,0).ToString())
else:
tempList = list()
tempList.append(value)
tempList.append(self.table.GetValue(i,0).ToString())
dict[self.table.GetValue(i,1).ToInt()] = tempList
# Check
# print "Number of Groups in CSV Files: " + str(len(dict))
# for key, value in dict.items():
# print "Groupe: " + str(key)
# print "VTK Files: " + str(value)
return True
# Function to check if in each group there is at least more than one mesh
def checkSeveralMeshInDict(self, dict):
for key, value in dict.items():
if type(value) is not ListType or len(value) == 1:
slicer.util.errorDisplay('The group ' + str(key) + ' must contain more than one mesh.')
return False
return True
# Function to store the shape models for each group in a dictionary
# The function return True IF
# - all the paths exist
# - the extension of the paths is .h5
# - there are only one shape model per group
# else False
def creationDictShapeModel(self, dict):
for i in range(0, self.table.GetNumberOfRows()):
if not os.path.exists(self.table.GetValue(i,0).ToString()):
slicer.util.errorDisplay('H5 file not found, path not good at lign ' + str(i+2))
return False
if not os.path.splitext(os.path.basename(self.table.GetValue(i,0).ToString()))[1] == '.h5':
slicer.util.errorDisplay('Wrong extension file at lign ' + str(i+2) + '. A hdf5 file is needed!')
return False
if self.table.GetValue(i,1).ToInt() in dict:
slicer.util.errorDisplay('There are more than one shape model (hdf5 file) by groups')
return False
dict[self.table.GetValue(i,1).ToInt()] = self.table.GetValue(i,0).ToString()
# Check
# print "Number of Groups in CSV Files: " + str(len(dict))
# for key, value in dict.items():
# print "Groupe: " + str(key)
# print "H5 Files: " + str(value)
return True
# Function to add a color map "DisplayClassificationGroup" to all the vtk files
# which allow the user to visualize each group with a different color in ShapePopulationViewer
def addColorMap(self, table, dictVTKFiles):
for key, value in dictVTKFiles.items():
for vtkFile in value:
# Read VTK File
reader = vtk.vtkDataSetReader()
reader.SetFileName(vtkFile)
reader.ReadAllVectorsOn()
reader.ReadAllScalarsOn()
reader.Update()
polyData = reader.GetOutput()
# Copy of the polydata
polyDataCopy = vtk.vtkPolyData()
polyDataCopy.DeepCopy(polyData)
pointData = polyDataCopy.GetPointData()
# Add a New Array "DisplayClassificationGroup" to the polydata copy
# which will have as the value for all the points the group associated of the mesh
numPts = polyDataCopy.GetPoints().GetNumberOfPoints()
arrayName = "DisplayClassificationGroup"
hasArrayInt = pointData.HasArray(arrayName)
if hasArrayInt == 1:
pointData.RemoveArray(arrayName)
arrayToAdd = vtk.vtkDoubleArray()
arrayToAdd.SetName(arrayName)
arrayToAdd.SetNumberOfComponents(1)
arrayToAdd.SetNumberOfTuples(numPts)
for i in range(0, numPts):
arrayToAdd.InsertTuple1(i, key)
pointData.AddArray(arrayToAdd)
# Save in the temporary directory in Slicer the vtk file with the new array
# to visualize them in Shape Population Viewer
writer = vtk.vtkPolyDataWriter()
filepath = slicer.app.temporaryPath + '/' + os.path.basename(vtkFile)
writer.SetFileName(filepath)
if vtk.VTK_MAJOR_VERSION <= 5:
writer.SetInput(polyDataCopy)
else:
writer.SetInputData(polyDataCopy)
writer.Update()
writer.Write()
# Function to create a CSV file containing all the selected vtk files that the user wants to display in SPV
def creationCSVFileForSPV(self, filename, table, dictVTKFiles):
# Creation a CSV file with a header 'VTK Files'
file = open(filename, 'w')
cw = csv.writer(file, delimiter=',')
cw.writerow(['VTK Files'])
# Add the path of the vtk files if the users selected it
for row in range(0,table.rowCount):
# check the checkBox
widget = table.cellWidget(row, 2)
tuple = widget.children()
checkBox = qt.QCheckBox()
checkBox = tuple[1]
if checkBox.isChecked():
# Recovery of group fo each vtk file
widget = table.cellWidget(row, 1)
tuple = widget.children()
comboBox = qt.QComboBox()
comboBox = tuple[1]
group = comboBox.currentIndex + 1
# Recovery of the vtk filename
qlabel = table.cellWidget(row, 0)
vtkFile = qlabel.text
pathVTKFile = slicer.app.temporaryPath + '/' + vtkFile
cw.writerow([pathVTKFile])
file.close()
# Function to fill the table of the preview of all VTK files
# - Checkable combobox: allow the user to select one or several groups that he wants to display in SPV
# - Column 0: filename of the vtk file
# - Column 1: combobox with the group corresponding to the vtk file
# - Column 2: checkbox to allow the user to choose which models will be displayed in SPV
# - Column 3: color that the mesh will have in SPV
def fillTableForPreviewVTKFilesInSPV(self, dictVTKFiles, checkableComboBox, table):
row = 0
for key, value in dictVTKFiles.items():
# Fill the Checkable Combobox
checkableComboBox.addItem("Group " + str(key))
# Table:
for vtkFile in value:
table.setRowCount(row + 1)
# Column 0:
filename = os.path.basename(vtkFile)
labelVTKFile = qt.QLabel(filename)
labelVTKFile.setAlignment(0x84)
table.setCellWidget(row, 0, labelVTKFile)
# Column 1:
widget = qt.QWidget()
layout = qt.QHBoxLayout(widget)
comboBox = qt.QComboBox()
comboBox.addItems(dictVTKFiles.keys())
comboBox.setCurrentIndex(key - 1)
layout.addWidget(comboBox)
layout.setAlignment(0x84)
layout.setContentsMargins(0, 0, 0, 0)
widget.setLayout(layout)
table.setCellWidget(row, 1, widget)
comboBox.connect('currentIndexChanged(int)', self.interface.onGroupValueChanged)
# Column 2:
widget = qt.QWidget()
layout = qt.QHBoxLayout(widget)
checkBox = qt.QCheckBox()
layout.addWidget(checkBox)
layout.setAlignment(0x84)
layout.setContentsMargins(0, 0, 0, 0)
widget.setLayout(layout)
table.setCellWidget(row, 2, widget)
checkBox.connect('stateChanged(int)', self.interface.onCheckBoxTableValueChanged)
# Column 3:
table.setItem(row, 3, qt.QTableWidgetItem())
table.item(row,3).setBackground(qt.QColor(255,255,255))
row = row + 1
# Function to change the group of a vtk file
# - The user can change the group thanks to the combobox in the table used for the preview in SPV
def onComboBoxTableValueChanged(self, dictVTKFiles, table):
# For each row of the table
for row in range(0,table.rowCount):
# Recovery of the group associated to the vtk file which is in the combobox
widget = table.cellWidget(row, 1)
tuple = widget.children()
comboBox = qt.QComboBox()
comboBox = tuple[1]
group = comboBox.currentIndex + 1
# Recovery of the filename of vtk file
qlabel = table.cellWidget(row, 0)
vtkFile = qlabel.text
# Update the dictionary if the vtk file has not the same group in the combobox than in the dictionary
value = dictVTKFiles.get(group, None)
if not any(vtkFile in s for s in value):
# Find which list of the dictionary the vtk file is in
for value in dictVTKFiles.values():
if any(vtkFile in s for s in value):
pathList = [s for s in value if vtkFile in s]
path = pathList[0]
# Remove the vtk file from the wrong group
value.remove(path)
# Add the vtk file in the right group
newvalue = dictVTKFiles.get(group, None)
newvalue.append(path)
break
# Function to create the same color transfer function than there is in SPV
def creationColorTransfer(self, groupSelected):
# Creation of the color transfer function with the updated range
colorTransferFunction = vtk.vtkColorTransferFunction()
if len(groupSelected) > 0:
groupSelectedList = list(groupSelected)
rangeColorTransfer = [groupSelectedList[0], groupSelectedList[len(groupSelectedList) - 1]]
colorTransferFunction.AdjustRange(rangeColorTransfer)
for key, value in self.colorBar.items():
# postion on the current arrow
x = (groupSelectedList[len(groupSelectedList) - 1] - groupSelectedList[0]) * value[0] + groupSelectedList[0]
# color of the current arrow
r = value[1]
g = value[2]
b = value[3]
colorTransferFunction.AddRGBPoint(x,r,g,b)
return colorTransferFunction
# Function to copy and delete all the arrays of all the meshes contained in a list
def deleteArrays(self, key, value):
for vtkFile in value:
# Read VTK File
reader = vtk.vtkDataSetReader()
reader.SetFileName(vtkFile)
reader.ReadAllVectorsOn()
reader.ReadAllScalarsOn()
reader.Update()
polyData = reader.GetOutput()
# Copy of the polydata
polyDataCopy = vtk.vtkPolyData()
polyDataCopy.DeepCopy(polyData)
pointData = polyDataCopy.GetPointData()
# Remove all the arrays
numAttributes = pointData.GetNumberOfArrays()
for i in range(0, numAttributes):
pointData.RemoveArray(0)
# Creation of the path of the vtk file without arrays to save it in the temporary directory of Slicer
filename = os.path.basename(vtkFile)
filepath = slicer.app.temporaryPath + '/' + filename
# Save the vtk file without array in the temporary directory in Slicer
self.saveVTKFile(polyDataCopy, filepath)
# Function to save a VTK file to the filepath given
def saveVTKFile(self, polydata, filepath):
writer = vtk.vtkPolyDataWriter()
writer.SetFileName(filepath)
if vtk.VTK_MAJOR_VERSION <= 5:
writer.SetInput(polydata)
else:
writer.SetInputData(polydata)
writer.Update()
writer.Write()
# Function to save in the temporary directory of Slicer a shape model file called GX.h5
# built with the vtk files contained in the group X
def buildShapeModel(self, groupnumber, vtkList):
print "--- Build the shape model of the group " + str(groupnumber) + " ---"
# Call of saveModel used to build a shape model from a given list of meshes
# Arguments:
# --groupnumber is the number of the group used to create the shape model
# --vtkfilelist is a list of vtk paths of one group that will be used to create the shape model
# --resultdir is the path where the newly build model should be saved
# Creation of the command line
scriptedModulesPath = eval('slicer.modules.%s.path' % self.interface.moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
libPath = os.path.join(scriptedModulesPath)
sys.path.insert(0, libPath)
saveModel = os.path.join(scriptedModulesPath, '../hidden-cli-modules/saveModel')
#saveModel = "/Users/lpascal/Desktop/test/DiagnosticIndexExtension-build/bin/saveModel"
arguments = list()
arguments.append("--groupnumber")
arguments.append(groupnumber)
arguments.append("--vtkfilelist")
vtkfilelist = ""
for vtkFiles in vtkList:
vtkfilelist = vtkfilelist + vtkFiles + ','
arguments.append(vtkfilelist)
arguments.append("--resultdir")
resultdir = slicer.app.temporaryPath
arguments.append(resultdir)
# Call the CLI
process = qt.QProcess()
print "Calling " + os.path.basename(saveModel)
process.start(saveModel, arguments)
process.waitForStarted()
# print "state: " + str(process.state())
process.waitForFinished()
# print "error: " + str(process.error())
# Function to compute the mean between all the mesh-files contained in one group
def computeMean(self, group, h5path):
print "--- Compute the mean of the group " + str(group) + " ---"
# Call of computeMean used to compute a mean from a shape model
# Arguments:
# --groupnumber is the number of the group used to create the shape model
# --resultdir is the path where the newly build model should be saved
# --shapemodel: Shape model of one group (H5 file path)
# Creation of the command line
scriptedModulesPath = eval('slicer.modules.%s.path' % self.interface.moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
libPath = os.path.join(scriptedModulesPath)
sys.path.insert(0, libPath)
computeMean = os.path.join(scriptedModulesPath, '../hidden-cli-modules/computeMean')
#computeMean = "/Users/lpascal/Desktop/test/DiagnosticIndexExtension-build/bin/computeMean"
arguments = list()
arguments.append("--groupnumber")
arguments.append(group)
arguments.append("--resultdir")
resultdir = slicer.app.temporaryPath
arguments.append(resultdir)
arguments.append("--shapemodel")
arguments.append(h5path)
# Call the executable
process = qt.QProcess()
print "Calling " + os.path.basename(computeMean)
process.start(computeMean, arguments)
process.waitForStarted()
# print "state: " + str(process2.state())
process.waitForFinished()
# print "error: " + str(process2.error())
# Function to remove in the temporary directory all the data used to create the mean for each group
def removeDataVTKFiles(self, value):
# remove of all the vtk file
for vtkFile in value:
filepath = slicer.app.temporaryPath + '/' + os.path.basename(vtkFile)
if os.path.exists(filepath):
os.remove(filepath)
# Function to storage the mean of each group in a dictionary
def storageMean(self, dictGroups, key):
filename = "meanGroup" + str(key)
meanPath = slicer.app.temporaryPath + '/' + filename + '.vtk'
dictGroups[key] = meanPath
# Function to storage the shape model of each group in a dictionary
def storeShapeModel(self, dictShapeModels, key):
filename = "G" + str(key)
modelPath = slicer.app.temporaryPath + '/' + filename + '.h5'
dictShapeModels[key] = modelPath
# Function to create a CSV file:
# - Two columns are always created:
# - First column: path of the vtk files
# - Second column: group associated to this vtk file
# - If saveH5 is True, this CSV file will contain a New Classification Group, a thrid column is then added
# - Thrid column: path of the shape model of each group
def creationCSVFile(self, directory, CSVbasename, dictForCSV, option):
CSVFilePath = directory + "/" + CSVbasename
file = open(CSVFilePath, 'w')
cw = csv.writer(file, delimiter=',')
if option == "Groups":
cw.writerow(['VTK Files', 'Group'])
elif option == "NCG":
cw.writerow(['H5 Path', 'Group'])
for key, value in dictForCSV.items():
if type(value) is ListType:
for vtkFile in value:
if option == "Groups":
cw.writerow([vtkFile, str(key)])
elif option == "NCG":
cw.writerow([value, str(key)])
file.close()
# Function to save the data of the new Classification Groups in the directory given by the user
# - The mean vtk files of each groups
# - The shape models of each groups
# - The CSV file containing:
# - First column: the paths of mean vtk file of each group
# - Second column: the groups associated
# - Third column: the paths of the shape model of each group
def saveNewClassificationGroups(self, basename, directory, dictShapeModels):
dictForCSV = dict()
for key, value in dictShapeModels.items():
# Save the shape model (h5 file) of each group
h5Basename = "G" + str(key) + ".h5"
oldh5path = slicer.app.temporaryPath + "/" + h5Basename
newh5path = directory + "/" + h5Basename
shutil.copyfile(oldh5path, newh5path)
dictForCSV[key] = newh5path
# Save the CSV file containing all the data useful in order to compute OAIndex of a patient
self.creationCSVFile(directory, basename, dictForCSV, "NCG")
# Function to remove in the temporary directory all the data useless after to do a export of the new Classification Groups
def removeDataAfterNCG(self, dict):
for key in dict.keys():
# Remove of the shape model of each group
h5Path = slicer.app.temporaryPath + "/G" + str(key) + ".h5"
if os.path.exists(h5Path):
os.remove(h5Path)
# Function to make some action on a dictionary
def actionOnDictionary(self, dict, file, listSaveVTKFiles, action):
# Action Remove:
# Remove the vtk file to the dictionary dict
# If the vtk file was found:
# Return a list containing the key and the vtk file
# Else:
# Return False
# Action Find:
# Find the vtk file in the dictionary dict
# If the vtk file was found:
# Return True
# Else:
# Return False
if action == 'remove' or action == 'find':
if not file == None:
for key, value in dict.items():
for vtkFile in value:
filename = os.path.basename(vtkFile)
if filename == file:
if action == 'remove':
value.remove(vtkFile)
listSaveVTKFiles.append(key)
listSaveVTKFiles.append(vtkFile)
return listSaveVTKFiles
return True
return False
# Action Add:
# Add a vtk file to the dictionary dict at the given key contained in the first case of the list
if action == 'add':
if not listSaveVTKFiles == None and not file == None:
value = dict.get(listSaveVTKFiles[0], None)
value.append(listSaveVTKFiles[1])
# Function in order to compute the shape OA loads of a sample
def computeShapeOALoads(self, groupnumber, vtkfilepath, shapemodel):
# Call of computeShapeOALoads used to compute shape loads of a sample for the current shape model
# Arguments:
# --vtkfile: Sample Input Data (VTK file path)
# --resultdir: The path where the newly build model should be saved
# --groupnumber: The number of the group used to create the shape model
# --shapemodel: Shape model of one group (H5 file path)
# Creation of the command line
scriptedModulesPath = eval('slicer.modules.%s.path' % self.interface.moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
libPath = os.path.join(scriptedModulesPath)
sys.path.insert(0, libPath)
computeShapeOALoads = os.path.join(scriptedModulesPath, '../hidden-cli-modules/computeShapeOALoads')
#computeShapeOALoads = "/Users/lpascal/Desktop/test/DiagnosticIndexExtension-build/bin/computeShapeOALoads"
arguments = list()
arguments.append("--groupnumber")
arguments.append(groupnumber)
arguments.append("--vtkfile")
arguments.append(vtkfilepath)
arguments.append("--resultdir")
resultdir = slicer.app.temporaryPath
arguments.append(resultdir)
arguments.append("--shapemodel")
arguments.append(shapemodel)
# Call the CLI
process = qt.QProcess()
print "Calling " + os.path.basename(computeShapeOALoads)
process.start(computeShapeOALoads, arguments)
process.waitForStarted()
# print "state: " + str(process.state())
process.waitForFinished()
# print "error: " + str(process.error())
# Function to compute the OA index of a patient
def computeOAIndex(self, keyList):
OAIndexList = list()
for key in keyList:
ShapeOAVectorLoadsPath = slicer.app.temporaryPath + "/ShapeOAVectorLoadsG" + str(key) + ".csv"
if not os.path.exists(ShapeOAVectorLoadsPath):
return
tableShapeOAVectorLoads = vtk.vtkTable
tableShapeOAVectorLoads = self.readCSVFile(ShapeOAVectorLoadsPath)
sum = 0
for row in range(0, tableShapeOAVectorLoads.GetNumberOfRows()):
ShapeOALoad = tableShapeOAVectorLoads.GetValue(row, 0).ToDouble()
sum = sum + math.pow(ShapeOALoad, 2)
OAIndexList.append(math.sqrt(sum)/tableShapeOAVectorLoads.GetNumberOfRows())
# print OAIndexList
resultGroup = OAIndexList.index(min(OAIndexList)) + 1
# print "RESULT: " + str(resultGroup)
return resultGroup
# Function to remove the shape model of each group
def removeShapeOALoadsCSVFile(self, keylist):
for key in keylist:
shapeOALoadsPath = slicer.app.temporaryPath + "/ShapeOAVectorLoadsG" + str(key) + ".csv"
if os.path.exists(shapeOALoadsPath):
os.remove(shapeOALoadsPath)
def creationCSVFileForResult(self, table, directory, CSVbasename):
CSVFilePath = directory + "/" + CSVbasename
file = open(CSVFilePath, 'w')
cw = csv.writer(file, delimiter=',')
cw.writerow(['VTK Files', 'Assigned Group'])
for row in range(0,table.rowCount):
# Recovery of the filename of vtk file
qlabel = table.cellWidget(row, 0)
vtkFile = qlabel.text
# Recovery of the assigned group
qlabel = table.cellWidget(row, 1)
assignedGroup = qlabel.text
# Write the result in the CSV File
cw.writerow([vtkFile, str(assignedGroup)])
class DiagnosticIndexTest(ScriptedLoadableModuleTest):
pass
| laurapascal/DiagnosticIndexExtension | src/DiagnosticIndex/DiagnosticIndex.py | Python | apache-2.0 | 76,598 | [
"VTK"
] | 5d833e12d4531d3be0856b083679a8c8598b0bfdd9d876daa36153c5628cf97a |
from lettuce import step, world
# Browse from page to page
@step(r'(?:visit|access|open) the url "(.*)"')
def go_to_the_url(step, url):
world.response = world.browser.visit(url)
@step(r'go back(?: a page)?')
def go_back(step):
world.browser.back()
@step(r'go forward(?: a page)?')
def go_forward(step):
world.browser.forward()
@step(r'(?:reload|refresh)(?: the page)?')
def reload(step):
world.browser.reload()
| adw0rd/salad-py3 | salad/steps/browser/navigation.py | Python | bsd-3-clause | 436 | [
"VisIt"
] | 82372633afbecd1c5031e6fc628aa664a7528d7d1f1a3f209756da222fd4d168 |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.kubernetes.flagmanager import FlagManager
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
kubelet_opts = FlagManager('kubelet')
kubelet_opts.destroy('feature-gates')
kubelet_opts.destroy('experimental-nvidia-gpus')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service"
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
# cleanup old flagmanagers
FlagManager('kubelet').destroy_all()
FlagManager('kube-proxy').destroy_all()
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the kubelet service
- stop the kube-proxy service
- remove the 'kubernetes-worker.cni-plugins.installed' state
'''
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname())
service_stop('kubelet')
service_stop('kube-proxy')
remove_state('kubernetes-worker.cni-plugins.installed')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
archive = hookenv.resource_get('cni')
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
if (_systemctl_is_active('snap.kubelet.daemon')):
hookenv.status_set('active', 'Kubernetes worker running.')
# if kubelet is not running, we're waiting on something else to converge
elif (not _systemctl_is_active('snap.kubelet.daemon')):
hookenv.status_set('waiting', 'Waiting for kubelet to start.')
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved', 'kube-control.dns.available',
'cni.available', 'kubernetes-worker.restart-needed')
def start_worker(kube_api, kube_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
# set --allow-privileged flag for kubelet
set_privileged()
create_config(servers[0])
configure_worker_services(servers, dns, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress RC enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-replication-controller.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('kubernetes-worker.ingress.available')
def scale_ingress_controller():
''' Scale the number of ingress controller replicas to match the number of
nodes. '''
try:
output = kubectl('get', 'nodes', '-o', 'name')
count = len(output.splitlines())
kubectl('scale', '--replicas=%d' % count, 'rc/nginx-ingress-controller') # noqa
except CalledProcessError:
hookenv.log('Failed to scale ingress controllers. Will attempt again next update.') # noqa
@when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node.
'''
# scrub and try to format an array from the configuration option
config = hookenv.config()
user_labels = _parse_labels(config.get('labels'))
# For diffing sake, iterate the previous label set
if config.previous('labels'):
previous_labels = _parse_labels(config.previous('labels'))
hookenv.log('previous labels: {}'.format(previous_labels))
else:
# this handles first time run if there is no previous labels config
previous_labels = _parse_labels("")
# Calculate label removal
for label in previous_labels:
if label not in user_labels:
hookenv.log('Deleting node label {}'.format(label))
try:
_apply_node_label(label, delete=True)
except CalledProcessError:
hookenv.log('Error removing node label {}'.format(label))
# if the label is in user labels we do nothing here, it will get set
# during the atomic update below.
# Atomically set a label
for label in user_labels:
_apply_node_label(label)
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
key = layer_options.get('client_key_path')
cert = layer_options.get('client_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca, key, cert,
user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig('/root/.kube/config', server, ca, key, cert,
user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca, key, cert,
user='kubelet')
def configure_worker_services(api_servers, dns, cluster_cidr):
''' Add remaining flags for the worker services and configure snaps to use
them '''
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = FlagManager('kubelet')
kubelet_opts.add('require-kubeconfig', 'true')
kubelet_opts.add('kubeconfig', kubeconfig_path)
kubelet_opts.add('network-plugin', 'cni')
kubelet_opts.add('logtostderr', 'true')
kubelet_opts.add('v', '0')
kubelet_opts.add('address', '0.0.0.0')
kubelet_opts.add('port', '10250')
kubelet_opts.add('cluster-dns', dns['sdn-ip'])
kubelet_opts.add('cluster-domain', dns['domain'])
kubelet_opts.add('anonymous-auth', 'false')
kubelet_opts.add('client-ca-file', ca_cert_path)
kubelet_opts.add('tls-cert-file', server_cert_path)
kubelet_opts.add('tls-private-key-file', server_key_path)
kube_proxy_opts = FlagManager('kube-proxy')
kube_proxy_opts.add('cluster-cidr', cluster_cidr)
kube_proxy_opts.add('kubeconfig', kubeconfig_path)
kube_proxy_opts.add('logtostderr', 'true')
kube_proxy_opts.add('v', '0')
kube_proxy_opts.add('master', ','.join(api_servers), strict=True)
cmd = ['snap', 'set', 'kubelet'] + kubelet_opts.to_s().split(' ')
check_call(cmd)
cmd = ['snap', 'set', 'kube-proxy'] + kube_proxy_opts.to_s().split(' ')
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key, certificate, user='ubuntu',
context='juju-context', cluster='juju-cluster'):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} set-credentials {1} ' \
'--client-key={2} --client-certificate={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, user, key, certificate)))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress replication controller manifest
manifest = addon_path.format('ingress-replication-controller.yaml')
render('ingress-replication-controller.yaml', manifest, context)
hookenv.log('Creating the ingress replication controller.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if succesful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
flag = 'allow-privileged'
hookenv.log('Setting {}={}'.format(flag, privileged))
kubelet_opts = FlagManager('kubelet')
kubelet_opts.add(flag, privileged)
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
kubelet_opts = FlagManager('kubelet')
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts.add('experimental-nvidia-gpus', '1')
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts.add('feature-gates', 'Accelerators=true')
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
kubelet_opts = FlagManager('kubelet')
if get_version('kubelet') < (1, 6):
kubelet_opts.destroy('experimental-nvidia-gpus')
else:
kubelet_opts.remove('feature-gates', 'Accelerators=true')
# Remove node labels
_apply_node_label('gpu', delete=True)
_apply_node_label('cuda', delete=True)
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes '''
hostname = gethostname()
# TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True:
label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, hostname, label_key)
cmd = cmd + '-'
else:
cmd = cmd_base.format(kubeconfig_path, hostname, label)
if overwrite:
cmd = '{} --overwrite'.format(cmd)
check_call(split(cmd))
def _parse_labels(labels):
''' Parse labels from a key=value string separated by space.'''
label_array = labels.split(' ')
sanitized_labels = []
for item in label_array:
if '=' in item:
sanitized_labels.append(item)
else:
hookenv.log('Skipping malformed option: {}'.format(item))
return sanitized_labels
| realfake/kubernetes | cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py | Python | apache-2.0 | 29,377 | [
"CDK"
] | 4da21d95270ba427f8c673641a9a6ba3c93ccef5cdfeaa4410625c3cb96c1644 |
import argparse
import numpy as np
import os
from LigParGenTools import *
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog='Converter.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
SCRIPT TO CREATE CUSTOM SOLVENT BOXES FOR
OPENMM AND NAMD FROM LIGPARGEN FILES
Created on Mon Nov 14 15:10:05 2016
@author: Leela S. Dodda leela.dodda@yale.edu
@author: William L. Jorgensen Lab
if using PDB file
Usage: python CustomSolBox.py -p OCT.pdb -b 45 -r OCT
Usage: python CustomSolBox.py -p CYH.pdb -r CYH -b 28
REQUIREMENTS:
Preferably Anaconda python with following modules
argparse
numpy
"""
)
parser.add_argument(
"-p", "--pdb", help="Submit PDB file from CHEMSPIDER or PubChem", type=str)
parser.add_argument(
"-r", "--sol_name", help="Submit PDB file from CHEMSPIDER or PubChem", type=str)
parser.add_argument("-b", "--box_size", type=float,
help="SIZE of the CUBIC box in ANGSTROM")
parser.add_argument("-ns", "--num_solv", type=int,
help="NUMBER of Molecules in CUBIC box")
args = parser.parse_args()
try:
BOX_MAKER(args.pdb, args.box_size,args.sol_name, args.num_solv)
except TypeError:
print('For Help: python CustomSolBox.py -h')
| leelasd/LigParGenTools | CustomSolBox.py | Python | mit | 1,333 | [
"NAMD",
"OpenMM"
] | 3731e856e9980cca46c5b0ab446b61bd7ad86ab35d63b4231c526af63cbb2652 |
import numpy as np
import matplotlib.pyplot as plt
class ChainBundle():
def __init__(self):
self.data = None
def compute(self, n_chains, persistence_length, contour_length, n_segments=100):
"""
:param n_chains: How many chains to include in the bundle
:param persistence_length: Average correlation between chain segments gets down to 1/e after persistence_length
:param contour_length: Length of the Chains to be simulated
:param n_segments: How smooth chain should be approximated by linear segments.
Should be at least 50 for meaningful results
:writes in self.data: np.array of shape (n_chains, n_segments, 2) (last entry contains x and y coordinate)
"""
segment_length = contour_length / n_segments
persistence_length = float(persistence_length)
coordinates = np.zeros((n_chains, n_segments, 2), dtype=np.float64) # Initialise numpy array
coordinates[:, 0, :] = 0 # Chains start at (0,0)
coordinates[:, 1, 0] = segment_length # Chain starts directly into x direction
angle = [0]
for segment in range(n_segments - 2):
random_dir = np.random.choice([-1, 1], n_chains).astype(np.float64)
angle += random_dir * np.arccos(np.exp(- segment_length / persistence_length / 1.0))
coordinates[:, segment + 2, 0] = coordinates[:, segment + 1, 0] + segment_length * np.cos(angle)
coordinates[:, segment + 2, 1] = coordinates[:, segment + 1, 1] + segment_length * np.sin(angle)
self.data = coordinates
def compute_persistence_length(self, verbose=False):
"""
verbose == True:
:return: persistence length, x_values, exponential_curve
verbose == False:
:return: persistence length
"""
from scipy.optimize import curve_fit
"""Similar to
https://pythonhosted.org/MDAnalysis/_modules/MDAnalysis/analysis/polymer.html"""
n = self.data.shape[1]
results_total = np.zeros((self.data.shape[0], n - 1))
for a, chain in enumerate(self.data):
chain = chain
vecs = chain[1:] - chain[:-1]
vecs_norm = vecs / np.sqrt((vecs * vecs).sum(axis=1))[:, None]
inner_pr = np.inner(vecs_norm, vecs_norm)
results = np.zeros(n - 1)
for i in range(n - 1):
results[:(n - 1) - i] += inner_pr[i, i:]
norm = np.linspace(n - 1, 1, n - 1)
results = results / norm
results_total[a] = results
results_curve = results_total.mean(0)
def expon(lamb, x):
return np.exp(-lamb * x)
l = np.sqrt(np.sum(vecs[0] ** 2))
x = np.arange(n - 1) * l
lamb, dlamb = curve_fit(expon, x, results_curve, p0=0.1)
if verbose:
return 1 / lamb[0], x, results_curve
else:
return 1 / lamb[0]
def plot(self, ax=None, **kwargs):
"""
:param kwargs: e.g. color='blue'
:return: ax
"""
if ax is None:
ax = plt.gca()
label = kwargs.pop('label','')
if self.data is None:
raise "call .compute first to get a WLC bundle of chains"
else:
for i, chain in enumerate(self.data):
ax.plot(*chain.T, label="%s"%label if i == 0 else '', **kwargs)
return ax
| stefanhuber1993/worm_like_chain | WLC.py | Python | gpl-3.0 | 3,436 | [
"MDAnalysis"
] | 13d372997b0196456fb589f7918de747e9b3e6f4c04883be5c2b702efbb488ad |
"""Handle disambiguation of reads from a chimeric input, splitting by organism.
Given specification of mixed input samples, splits a sample into multiple
sub-samples for alignment to individual genomes, then runs third-party disambiguation
scripts to reconcile.
Uses disambiguation scripts contributed by AstraZeneca, incorporated into bcbio-nextgen:
https://github.com/mjafin/disambiguate
"""
import collections
import copy
import os
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline.disambiguate.run import main as disambiguate_main
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import merge, run_info
from bcbio.provenance import do
from bcbio import bam
def split(*items):
"""Split samples into all possible genomes for alignment.
"""
out = []
for data in [x[0] for x in items]:
dis_orgs = data["config"]["algorithm"].get("disambiguate")
if dis_orgs:
data["disambiguate"] = {"genome_build": data["genome_build"],
"base": True}
out.append([data])
# handle the instance where a single organism is disambiguated
if isinstance(dis_orgs, basestring):
dis_orgs = [dis_orgs]
for dis_org in dis_orgs:
dis_data = copy.deepcopy(data)
dis_data["disambiguate"] = {"genome_build": dis_org}
dis_data["genome_build"] = dis_org
dis_data = run_info.add_reference_resources(dis_data)
out.append([dis_data])
else:
out.append([data])
return out
def resolve(items, run_parallel):
"""Combine aligned and split samples into final set of disambiguated reads.
"""
out = []
to_process = collections.defaultdict(list)
for data in [x[0] for x in items]:
if "disambiguate" in data:
split_part = tuple(data["align_split"]) if data.get("combine") else None
to_process[(dd.get_sample_name(data), split_part)].append(data)
else:
out.append([data])
if len(to_process) > 0:
dis1 = run_parallel("run_disambiguate",
[(xs, xs[0]["config"]) for xs in to_process.itervalues()])
disambigs_by_name = collections.defaultdict(list)
print len(dis1)
for xs in dis1:
assert len(xs) == 1
data = xs[0]
disambigs_by_name[dd.get_sample_name(data)].append(data)
dis2 = run_parallel("disambiguate_merge_extras",
[(xs, xs[0]["config"]) for xs in disambigs_by_name.itervalues()])
else:
dis2 = []
return out + dis2
def merge_extras(items, config):
"""Merge extra disambiguated reads into a final BAM file.
"""
final = {}
for extra_name in items[0]["disambiguate"].keys():
in_files = []
for data in items:
in_files.append(data["disambiguate"][extra_name])
out_file = "%s-allmerged%s" % os.path.splitext(in_files[0])
if in_files[0].endswith(".bam"):
print out_file, in_files
merged_file = merge.merge_bam_files(in_files, os.path.dirname(out_file), config,
out_file=out_file)
else:
assert extra_name == "summary", extra_name
merged_file = _merge_summary(in_files, out_file, items[0])
final[extra_name] = merged_file
out = []
for data in items:
data["disambiguate"] = final
out.append([data])
return out
def _merge_summary(in_files, out_file, data):
"""Create one big summary file for disambiguation from multiple splits.
"""
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for i, in_file in enumerate(in_files):
with open(in_file) as in_handle:
for j, line in enumerate(in_handle):
if j == 0:
if i == 0:
out_handle.write(line)
else:
out_handle.write(line)
return out_file
def run(items, config):
"""Run third party disambiguation script, resolving into single set of calls.
"""
assert len(items) == 2, "Can only resolve two organism disambiguation"
# check aligner, handling tophat/tophat2 distinctions
aligner = config["algorithm"].get("aligner")
aligner = "tophat" if aligner.startswith("tophat") else aligner
assert aligner in ["bwa", "tophat", "star"], "Disambiguation only supported for bwa, star and tophat alignments."
if items[0]["disambiguate"].get("base"):
data_a, data_b = items
else:
data_b, data_a = items
work_bam_a = bam.sort(data_a["work_bam"], config, "queryname")
work_bam_b = bam.sort(data_b["work_bam"], config, "queryname")
if data_a.get("align_split"):
base_dir = utils.safe_makedir(os.path.normpath(os.path.join(os.path.dirname(work_bam_a),
os.pardir, os.pardir,
"disambiguate_%s" % aligner)))
out_dir = os.path.join(base_dir, "_".join([str(x) for x in data_a["align_split"]]))
else:
out_dir = os.path.normpath(os.path.join(os.path.dirname(work_bam_a),
os.pardir, "disambiguate_%s" % aligner))
base_name = os.path.join(out_dir, os.path.splitext(os.path.basename(work_bam_a))[0])
summary_file = "%s_summary.txt" % base_name
if not utils.file_exists(summary_file):
with file_transaction(items[0], out_dir) as tx_out_dir:
Args = collections.namedtuple("Args", "A B output_dir intermediate_dir "
"no_sort prefix aligner")
args = Args(work_bam_a, work_bam_b, tx_out_dir, tx_out_dir,
True, "", aligner)
disambiguate_main(args)
data_a["disambiguate"] = \
{data_b["genome_build"]: bam.sort("%s.disambiguatedSpeciesB.bam" % base_name, config),
"%s-ambiguous" % data_a["genome_build"]: bam.sort("%s.ambiguousSpeciesA.bam" % base_name, config),
"%s-ambiguous" % data_b["genome_build"]: bam.sort("%s.ambiguousSpeciesB.bam" % base_name, config),
"summary": summary_file}
data_a["work_bam"] = bam.sort("%s.disambiguatedSpeciesA.bam" % base_name, config)
return [[data_a]]
def run_cplusplus(items, config):
"""Run third party disambiguation script, resolving into single set of calls.
"""
assert len(items) == 2, "Can only resolve two organism disambiguation"
# check aligner, handling tophat/tophat2 distinctions
aligner = config["algorithm"].get("aligner")
aligner = "tophat" if aligner.startswith("tophat") else aligner
assert aligner in ["bwa", "tophat", "star"], "Disambiguation only supported for bwa, star and tophat alignments."
if items[0]["disambiguate"].get("base"):
data_a, data_b = items
else:
data_b, data_a = items
work_bam_a = bam.sort(data_a["work_bam"], config, "queryname")
work_bam_b = bam.sort(data_b["work_bam"], config, "queryname")
out_dir = os.path.normpath(os.path.join(os.path.dirname(work_bam_a),
os.pardir, os.pardir, "disambiguate"))
base_name = os.path.join(out_dir, os.path.splitext(os.path.basename(work_bam_a))[0])
summary_file = "%s_summary.txt" % base_name
if not utils.file_exists(summary_file):
with file_transaction(items[0], out_dir) as tx_out_dir:
raise NotImplementedError("Still need to test and support C++ version")
cmd = ""
do.run(cmd.format(**locals()), "Disambiguation", data_a)
data_a["disambiguate"] = \
{data_b["genome_build"]: "%s.disambiguatedSpeciesB.bam" % base_name,
"%s-ambiguous" % data_a["genome_build"]: "%s.ambiguousSpeciesA.bam" % base_name,
"%s-ambiguous" % data_b["genome_build"]: "%s.ambiguousSpeciesB.bam" % base_name,
"summary": summary_file}
data_a["work_bam"] = bam.sort("%s.disambiguatedSpeciesA.bam" % base_name, config)
return [[data_a]]
| guillermo-carrasco/bcbio-nextgen | bcbio/pipeline/disambiguate/__init__.py | Python | mit | 8,391 | [
"BWA"
] | cc4d9c7cc849b638cb9b80ddb5cf5524b7f07eae9146775bf3897a98c22a898a |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant les constantes des familiers."""
REGIMES = [
"carnivore",
"fantôme",
"herbivore",
"insectivore",
"frugivore",
]
NOMS = (
"Médor",
"Centaurin",
"Éclipse",
"Rage",
"Flamme",
"Cactus",
"Jiro",
"Arcène",
"Bouchon",
"Agité",
"griotte",
"coco",
"Derkh",
"Tiadaminet",
"lanaé",
"Dinare",
"étincelle",
"Pyros",
"Hydros",
"Géos",
"Aéros",
"Ignus",
"Aqua",
"Terra",
"Ventus",
"Ange",
"Démon",
"Erable",
"Châtaigne",
"Flash",
"Soleil",
"Lune",
"Etoile",
"Ciel",
"Véloce",
"Squam",
"Bulle",
"Terrible",
"Cabichon",
"Alpha",
"Bêta",
"Delta",
"Oméga",
"Tétra",
"Zelda",
"Pétra",
"Chatouille",
"Pelotte",
"Amour",
"Sucrette",
"Croc",
"Liberté",
"Chaos",
"Orange",
"Brunion",
"Lichen",
"Maladresse",
"Fringale",
"Fleur",
"Papillon",
"Brouzouf",
"Dédale",
"Kaki",
"Ballon",
"Nitro",
"Cryos",
"Brontos",
"Fulguris",
"Clochette",
"Griffe",
"Bouboule",
"Scarabée",
"Crevette",
"Datchoum",
"Royal",
"Prince",
"Princesse",
"Impérator",
"Croquignole",
"Narcos",
"Nocturne",
"Susucre",
"Bourrin",
"Grobill",
"Pataud",
"Nickel",
"Saltimbanque",
"Explosion",
"Mirage",
"Céleste",
"Quartz",
"Corail",
"Nabuku",
"Charge",
"Bruyant",
"Coco",
"hybride",
"Tristounet",
"Baveux",
"Corbeille",
"Alcalin",
"Acide",
"Jojo",
"bobo",
"Cyclone",
"Tornade",
"Ouragan",
"Sable",
"machin",
"Bidule",
"regretté",
"Lumière",
"Ombre",
"Clair-obscur",
"Tortuga",
"Super",
"Boucle",
"Toupie",
"Ventru",
"Rivière",
"Coquin",
"Gobelin",
"Nounouille",
"nunuche",
"Tronquignol",
"Torche",
"Infernos"
)
# Types d'harnachement
TYPES_HARNACHEMENT = [
"bride",
"laisse",
"selle",
]
| vlegoff/tsunami | src/secondaires/familier/constantes.py | Python | bsd-3-clause | 4,223 | [
"FLEUR"
] | 430b938433140bd70fd267d80772dcac3bb5f8f1b6b61d2855702ef10464a8e6 |
#!/usr/bin/env python
"""Search for non-ASCII characters in the ABINIT src files"""
import os
import re
import sys
__author__ = "M. Giantomassi"
def isascii(string, verbose=False):
"""False if string cointains non-ASCII characters."""
try:
string.decode('ascii')
return True
except UnicodeDecodeError:
if verbose: print "not a ascii-encoded unicode string"
return False
else:
if verbose: print "It may have been an ascii-encoded unicode string"
return False
re_srcfile = re.compile("\.([Ff]|[Ff]90|finc|h|c|cu)$")
def is_srcfile(dirpath, fname):
return re_srcfile.search(fname)
def abinit_test_generator():
def test_func(abenv):
"Search for non-ASCII characters in the ABINIT src files"
top = abenv.apath_of("src")
try:
return main(top)
except Exception:
import sys
raise sys.exc_info()[1] # Reraise current exception (py2.4 compliant)
return {"test_func" : test_func}
def main(top):
exit_status = 0
for dirpath, dirnames, files in os.walk(top):
for src in files:
if is_srcfile(dirpath, src):
fpath = os.path.join(dirpath,src)
lines = file(fpath).readlines()
for lno, line in enumerate(lines):
if not isascii(line):
exit_status += 1
print ">>> Non-ASCII character at: ",fpath,":",lno+1
print line
return exit_status
if __name__ == "__main__":
if len(sys.argv) == 1:
top = "../../../src"
print "--------------------------------------------------------"
print " Searching for non-ASCII characters in ABINIT src files "
print "--------------------------------------------------------"
else:
top = sys.argv[1]
exit_status = main(top)
sys.exit(exit_status)
| SamKChang/abinit-7.10.5_multipole | special/scripts/check_ascii.py | Python | gpl-3.0 | 1,758 | [
"ABINIT"
] | 2abfa29d2fe4f9672b008cd8c8dd03602a4afc81e5333785c792391f3c1f9a32 |
import librosa
from scipy.signal import lfilter
from functools import partial
import numpy as np
from scipy.signal import gaussian
from librosa.core.spectrum import stft
PADDING = 0.1
def load_waveform(file_path):
signal, sr = librosa.load(file_path, sr=None)
signal = lfilter([1., -0.95], 1, signal, axis=0)
return signal, sr
def generate_spectrogram(signal, sr, color_scale='log'):
n_fft = 256
# if len(self._signal) / self._sr > 30:
window_length = 0.005
win_len = int(window_length * sr)
if win_len > n_fft:
n_fft = win_len
num_steps = 1000
if len(signal) < num_steps:
num_steps = len(signal)
step_samp = int(len(signal) / num_steps)
time_step = step_samp / sr
freq_step = sr / n_fft
# if step_samp < 28:
# step_samp = 28
# step = step_samp / self._sr
# self._n_fft = 512
# window = partial(gaussian, std = 250/12)
window = 'gaussian'
# win_len = None
if window == 'gaussian':
window = partial(gaussian, std=0.45 * (win_len) / 2)
# import matplotlib.pyplot as plt
# plt.plot(window(250))
# plt.show()
data = stft(signal, n_fft, step_samp, center=True, win_length=win_len, window=window)
data = np.abs(data)
data = 20 * np.log10(data) if color_scale == 'log' else data
return data, time_step, freq_step
def make_path_safe(path):
return path.replace('\\', '/').replace(' ', '%20')
| samihuc/PolyglotDB | polyglotdb/acoustics/utils.py | Python | mit | 1,440 | [
"Gaussian"
] | 6b00216e2323d4f183b36082310bf607051ab8689af19384641171a58c601f23 |
import numpy as np
import theano
from theano import tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import scipy.misc
from src.utils import load_MNIST
from src.utils import display_weigths
from src.utils import display_samples
from src.utils import normalize_img
class GRBM(object):
# Implement a Gaussian-Bernoulli Restricted Boltzmann Machine
def __init__(self, input, n_input, n_hidden):
self.input = input
self.n_input = n_input
self.n_hidden = n_hidden
# Rescale terms for visible units
self.a = theano.shared(value=np.zeros(self.n_input, dtype=theano.config.floatX),
borrow=True,
name='b')
# Bias terms for hidden units
self.b = theano.shared(np.zeros(self.n_hidden,dtype=theano.config.floatX),
borrow=True,
name='a')
# Weights
rng = np.random.RandomState(2468)
self.W = theano.shared(np.asarray(
rng.uniform(
-4 * np.sqrt(6. / (self.n_hidden + self.n_input)),
4 * np.sqrt(6. / (self.n_hidden + self.n_input)),
(self.n_input, self.n_hidden)
),dtype=theano.config.floatX),
name='W')
self.srng = RandomStreams(rng.randint(2 ** 30))
def v_sample(self, h):
# Derive a sample of visible units from the hidden units h
mu = self.a + T.dot(h,self.W.T)
# v_sample = mu + self.srng.normal(size=mu.shape, avg=0, std=1.0, dtype=theano.config.floatX)
v_sample = mu # error-free reconstruction
return [mu, v_sample]
def h_sample(self, v):
# Derive a sample of hidden units from the visible units v
act = self.b + T.dot(v,self.W)
prob = T.nnet.sigmoid(act)
return [prob, self.srng.binomial(size=act.shape,n=1,p=prob,dtype=theano.config.floatX)]
def output(self):
prob, hS = self.h_sample(self.input)
return prob
def gibbs_update(self, h):
# A Gibbs step
nv_prob, nv_sample = self.v_sample(h)
nh_prob, nh_sample = self.h_sample(nv_prob)
return [nv_prob, nv_sample, nh_prob, nh_sample]
def alt_gibbs_update(self, v):
# A Gibbs step
nh_prob, nh_sample = self.h_sample(v)
nv_prob, nv_sample = self.v_sample(nh_prob)
return [nv_prob, nv_sample, nh_prob, nh_sample]
def free_energy(self, v_sample):
wx_b = T.dot(v_sample, self.W) + self.b
vbias_term = 0.5 * T.dot((v_sample - self.a), (v_sample - self.a).T)
hidden_term = T.sum(T.nnet.softplus(wx_b), axis=1)
return -hidden_term - vbias_term
def CD(self, k=1, eps=0.01):
# Contrastive divergence
# Positive phase
h0_prob, h0_sample = self.h_sample(self.input)
# Negative phase
( [ nv_probs,
nv_samples,
nh_probs,
nh_samples],
updates) = theano.scan(self.gibbs_update,
outputs_info=[None, None, None, h0_sample],
n_steps=k,
name="gibbs_update")
vK_sample = nv_samples[-1]
cost = T.mean(self.free_energy(self.input)) - T.mean(
self.free_energy(vK_sample))
params = [self.a, self.b, self.W]
# We must not compute the gradient through the gibbs sampling
gparams = T.grad(cost, params, consider_constant=[self.input, vK_sample])
for param, gparam in zip(params, gparams):
updates[param] = param - gparam * T.cast(eps,dtype=theano.config.floatX)
dist = T.mean(T.sqr(self.input - vK_sample))
return dist, updates
def test_grbm(batch_size = 20, training_epochs = 15, k=1, n_hidden=200):
n_data, n_row, n_col, r_dataset, levels, targets = load_MNIST()
n_visible = n_row * n_col
# See https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdf
# 13.2: "it is [...] easier to first normalise each component of the data to have zero
# mean and unit variance and then to use noise free reconstructions, with the variance
# in equation 17 set to 1"
dataset = normalize_img(r_dataset)
index = T.lscalar('index')
x = T.matrix('x')
print("Building an RBM with %i visible inputs and %i hidden units" % (n_visible, n_hidden))
rbm = GRBM(x, n_visible, n_hidden)
dist, updates = rbm.CD(k)
train_set = theano.shared(dataset, borrow=True)
train = theano.function(
[index],
dist,
updates=updates,
givens={
x: train_set[index*batch_size : (index+1)*batch_size]
},
name="train"
)
for epoch in xrange(training_epochs):
dist = []
for n_batch in xrange(n_data//batch_size):
dist.append(train(n_batch))
print("Training epoch %d, mean batch reconstructed distance %f" % (epoch, np.mean(dist)))
# Construct image from the weight matrix
Wimg = display_weigths(rbm.W.get_value(borrow=True), n_row, n_col, n_hidden)
scipy.misc.imsave('filters_at_epoch_%i.png' % epoch,Wimg)
samples = []
vis_sample = theano.shared(np.asarray(dataset[1000:1010], dtype=theano.config.floatX))
samples.append(vis_sample.get_value(borrow=True))
for i in xrange(10):
( [ nv_probs,
nv_samples,
nh_probs,
nh_samples],
updates) = theano.scan(rbm.alt_gibbs_update,
outputs_info=[None, vis_sample, None, None],
n_steps=1000,
name="alt_gibbs_update")
run_gibbs = theano.function(
[],
[ nv_probs[-1],
nv_samples[-1]],
updates=updates,
mode='NanGuardMode',
name="run_gibbs"
)
nv_prob, nv_sample = run_gibbs()
samples.append(nv_prob)
Y = display_samples(samples, n_row, n_col)
scipy.misc.imsave('mix.png',Y)
if __name__ == '__main__':
test_grbm(training_epochs=15, k=1) | glgerard/MDBN | src/archived/grbm_grad.py | Python | apache-2.0 | 6,281 | [
"Gaussian"
] | 1a09f71c313bec4210ad99d4de3f4b89ff2f9c03f140bc9fa20fee3032a29472 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkCellDataToPointData(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkCellDataToPointData(), 'Processing.',
('vtkDataSet',), ('vtkDataSet',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| chrisidefix/devide | modules/vtk_basic/vtkCellDataToPointData.py | Python | bsd-3-clause | 497 | [
"VTK"
] | e5603d900d185f09984a5fa4623705a1ba8b039ed88117316d878f5d39005f0d |
# (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import time
import errno
import codecs
try:
import simplejson as json
except ImportError:
import json
from ansible import constants as C
from ansible.errors import *
from ansible.parsing.utils.jsonify import jsonify
from ansible.plugins.cache.base import BaseCacheModule
class CacheModule(BaseCacheModule):
"""
A caching module backed by json files.
"""
def __init__(self, *args, **kwargs):
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self._cache = {}
self._cache_dir = C.CACHE_PLUGIN_CONNECTION # expects a dir path
if not self._cache_dir:
raise AnsibleError("error, fact_caching_connection is not set, cannot use fact cache")
if not os.path.exists(self._cache_dir):
try:
os.makedirs(self._cache_dir)
except (OSError,IOError), e:
self._display.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, str(e)))
return None
def get(self, key):
if key in self._cache:
return self._cache.get(key)
if self.has_expired(key):
raise KeyError
cachefile = "%s/%s" % (self._cache_dir, key)
try:
f = codecs.open(cachefile, 'r', encoding='utf-8')
except (OSError,IOError), e:
self._display.warning("error while trying to read %s : %s" % (cachefile, str(e)))
pass
else:
try:
value = json.load(f)
self._cache[key] = value
return value
except ValueError:
self._display.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
raise KeyError
finally:
f.close()
def set(self, key, value):
self._cache[key] = value
cachefile = "%s/%s" % (self._cache_dir, key)
try:
f = codecs.open(cachefile, 'w', encoding='utf-8')
except (OSError,IOError), e:
self._display.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
pass
else:
f.write(jsonify(value))
finally:
f.close()
def has_expired(self, key):
cachefile = "%s/%s" % (self._cache_dir, key)
try:
st = os.stat(cachefile)
except (OSError,IOError), e:
if e.errno == errno.ENOENT:
return False
else:
self._display.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
pass
if time.time() - st.st_mtime <= self._timeout:
return False
if key in self._cache:
del self._cache[key]
return True
def keys(self):
keys = []
for k in os.listdir(self._cache_dir):
if not (k.startswith('.') or self.has_expired(k)):
keys.append(k)
return keys
def contains(self, key):
cachefile = "%s/%s" % (self._cache_dir, key)
if key in self._cache:
return True
if self.has_expired(key):
return False
try:
st = os.stat(cachefile)
return True
except (OSError,IOError), e:
if e.errno == errno.ENOENT:
return False
else:
self._display.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
pass
def delete(self, key):
del self._cache[key]
try:
os.remove("%s/%s" % (self._cache_dir, key))
except (OSError,IOError), e:
pass #TODO: only pass on non existing?
def flush(self):
self._cache = {}
for key in self.keys():
self.delete(key)
def copy(self):
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
| scottcunningham/ansible | lib/ansible/plugins/cache/jsonfile.py | Python | gpl-3.0 | 4,628 | [
"Brian"
] | e231d3326bf2da8d9211c24d7849d9bd551b04c1386ba49e8fb3fc8db26e79b7 |
### https://hg.python.org/cpython/file/3.6/Lib/html/entities.py
"""HTML character entity references."""
# maps the HTML5 named character references to the equivalent Unicode character(s)
html5 = {
'Aacute': '\xc1',
'aacute': '\xe1',
'Aacute;': '\xc1',
'aacute;': '\xe1',
'Abreve;': '\u0102',
'abreve;': '\u0103',
'ac;': '\u223e',
'acd;': '\u223f',
'acE;': '\u223e\u0333',
'Acirc': '\xc2',
'acirc': '\xe2',
'Acirc;': '\xc2',
'acirc;': '\xe2',
'acute': '\xb4',
'acute;': '\xb4',
'Acy;': '\u0410',
'acy;': '\u0430',
'AElig': '\xc6',
'aelig': '\xe6',
'AElig;': '\xc6',
'aelig;': '\xe6',
'af;': '\u2061',
'Afr;': '\U0001d504',
'afr;': '\U0001d51e',
'Agrave': '\xc0',
'agrave': '\xe0',
'Agrave;': '\xc0',
'agrave;': '\xe0',
'alefsym;': '\u2135',
'aleph;': '\u2135',
'Alpha;': '\u0391',
'alpha;': '\u03b1',
'Amacr;': '\u0100',
'amacr;': '\u0101',
'amalg;': '\u2a3f',
'AMP': '&',
'amp': '&',
'AMP;': '&',
'amp;': '&',
'And;': '\u2a53',
'and;': '\u2227',
'andand;': '\u2a55',
'andd;': '\u2a5c',
'andslope;': '\u2a58',
'andv;': '\u2a5a',
'ang;': '\u2220',
'ange;': '\u29a4',
'angle;': '\u2220',
'angmsd;': '\u2221',
'angmsdaa;': '\u29a8',
'angmsdab;': '\u29a9',
'angmsdac;': '\u29aa',
'angmsdad;': '\u29ab',
'angmsdae;': '\u29ac',
'angmsdaf;': '\u29ad',
'angmsdag;': '\u29ae',
'angmsdah;': '\u29af',
'angrt;': '\u221f',
'angrtvb;': '\u22be',
'angrtvbd;': '\u299d',
'angsph;': '\u2222',
'angst;': '\xc5',
'angzarr;': '\u237c',
'Aogon;': '\u0104',
'aogon;': '\u0105',
'Aopf;': '\U0001d538',
'aopf;': '\U0001d552',
'ap;': '\u2248',
'apacir;': '\u2a6f',
'apE;': '\u2a70',
'ape;': '\u224a',
'apid;': '\u224b',
'apos;': "'",
'ApplyFunction;': '\u2061',
'approx;': '\u2248',
'approxeq;': '\u224a',
'Aring': '\xc5',
'aring': '\xe5',
'Aring;': '\xc5',
'aring;': '\xe5',
'Ascr;': '\U0001d49c',
'ascr;': '\U0001d4b6',
'Assign;': '\u2254',
'ast;': '*',
'asymp;': '\u2248',
'asympeq;': '\u224d',
'Atilde': '\xc3',
'atilde': '\xe3',
'Atilde;': '\xc3',
'atilde;': '\xe3',
'Auml': '\xc4',
'auml': '\xe4',
'Auml;': '\xc4',
'auml;': '\xe4',
'awconint;': '\u2233',
'awint;': '\u2a11',
'backcong;': '\u224c',
'backepsilon;': '\u03f6',
'backprime;': '\u2035',
'backsim;': '\u223d',
'backsimeq;': '\u22cd',
'Backslash;': '\u2216',
'Barv;': '\u2ae7',
'barvee;': '\u22bd',
'Barwed;': '\u2306',
'barwed;': '\u2305',
'barwedge;': '\u2305',
'bbrk;': '\u23b5',
'bbrktbrk;': '\u23b6',
'bcong;': '\u224c',
'Bcy;': '\u0411',
'bcy;': '\u0431',
'bdquo;': '\u201e',
'becaus;': '\u2235',
'Because;': '\u2235',
'because;': '\u2235',
'bemptyv;': '\u29b0',
'bepsi;': '\u03f6',
'bernou;': '\u212c',
'Bernoullis;': '\u212c',
'Beta;': '\u0392',
'beta;': '\u03b2',
'beth;': '\u2136',
'between;': '\u226c',
'Bfr;': '\U0001d505',
'bfr;': '\U0001d51f',
'bigcap;': '\u22c2',
'bigcirc;': '\u25ef',
'bigcup;': '\u22c3',
'bigodot;': '\u2a00',
'bigoplus;': '\u2a01',
'bigotimes;': '\u2a02',
'bigsqcup;': '\u2a06',
'bigstar;': '\u2605',
'bigtriangledown;': '\u25bd',
'bigtriangleup;': '\u25b3',
'biguplus;': '\u2a04',
'bigvee;': '\u22c1',
'bigwedge;': '\u22c0',
'bkarow;': '\u290d',
'blacklozenge;': '\u29eb',
'blacksquare;': '\u25aa',
'blacktriangle;': '\u25b4',
'blacktriangledown;': '\u25be',
'blacktriangleleft;': '\u25c2',
'blacktriangleright;': '\u25b8',
'blank;': '\u2423',
'blk12;': '\u2592',
'blk14;': '\u2591',
'blk34;': '\u2593',
'block;': '\u2588',
'bne;': '=\u20e5',
'bnequiv;': '\u2261\u20e5',
'bNot;': '\u2aed',
'bnot;': '\u2310',
'Bopf;': '\U0001d539',
'bopf;': '\U0001d553',
'bot;': '\u22a5',
'bottom;': '\u22a5',
'bowtie;': '\u22c8',
'boxbox;': '\u29c9',
'boxDL;': '\u2557',
'boxDl;': '\u2556',
'boxdL;': '\u2555',
'boxdl;': '\u2510',
'boxDR;': '\u2554',
'boxDr;': '\u2553',
'boxdR;': '\u2552',
'boxdr;': '\u250c',
'boxH;': '\u2550',
'boxh;': '\u2500',
'boxHD;': '\u2566',
'boxHd;': '\u2564',
'boxhD;': '\u2565',
'boxhd;': '\u252c',
'boxHU;': '\u2569',
'boxHu;': '\u2567',
'boxhU;': '\u2568',
'boxhu;': '\u2534',
'boxminus;': '\u229f',
'boxplus;': '\u229e',
'boxtimes;': '\u22a0',
'boxUL;': '\u255d',
'boxUl;': '\u255c',
'boxuL;': '\u255b',
'boxul;': '\u2518',
'boxUR;': '\u255a',
'boxUr;': '\u2559',
'boxuR;': '\u2558',
'boxur;': '\u2514',
'boxV;': '\u2551',
'boxv;': '\u2502',
'boxVH;': '\u256c',
'boxVh;': '\u256b',
'boxvH;': '\u256a',
'boxvh;': '\u253c',
'boxVL;': '\u2563',
'boxVl;': '\u2562',
'boxvL;': '\u2561',
'boxvl;': '\u2524',
'boxVR;': '\u2560',
'boxVr;': '\u255f',
'boxvR;': '\u255e',
'boxvr;': '\u251c',
'bprime;': '\u2035',
'Breve;': '\u02d8',
'breve;': '\u02d8',
'brvbar': '\xa6',
'brvbar;': '\xa6',
'Bscr;': '\u212c',
'bscr;': '\U0001d4b7',
'bsemi;': '\u204f',
'bsim;': '\u223d',
'bsime;': '\u22cd',
'bsol;': '\\',
'bsolb;': '\u29c5',
'bsolhsub;': '\u27c8',
'bull;': '\u2022',
'bullet;': '\u2022',
'bump;': '\u224e',
'bumpE;': '\u2aae',
'bumpe;': '\u224f',
'Bumpeq;': '\u224e',
'bumpeq;': '\u224f',
'Cacute;': '\u0106',
'cacute;': '\u0107',
'Cap;': '\u22d2',
'cap;': '\u2229',
'capand;': '\u2a44',
'capbrcup;': '\u2a49',
'capcap;': '\u2a4b',
'capcup;': '\u2a47',
'capdot;': '\u2a40',
'CapitalDifferentialD;': '\u2145',
'caps;': '\u2229\ufe00',
'caret;': '\u2041',
'caron;': '\u02c7',
'Cayleys;': '\u212d',
'ccaps;': '\u2a4d',
'Ccaron;': '\u010c',
'ccaron;': '\u010d',
'Ccedil': '\xc7',
'ccedil': '\xe7',
'Ccedil;': '\xc7',
'ccedil;': '\xe7',
'Ccirc;': '\u0108',
'ccirc;': '\u0109',
'Cconint;': '\u2230',
'ccups;': '\u2a4c',
'ccupssm;': '\u2a50',
'Cdot;': '\u010a',
'cdot;': '\u010b',
'cedil': '\xb8',
'cedil;': '\xb8',
'Cedilla;': '\xb8',
'cemptyv;': '\u29b2',
'cent': '\xa2',
'cent;': '\xa2',
'CenterDot;': '\xb7',
'centerdot;': '\xb7',
'Cfr;': '\u212d',
'cfr;': '\U0001d520',
'CHcy;': '\u0427',
'chcy;': '\u0447',
'check;': '\u2713',
'checkmark;': '\u2713',
'Chi;': '\u03a7',
'chi;': '\u03c7',
'cir;': '\u25cb',
'circ;': '\u02c6',
'circeq;': '\u2257',
'circlearrowleft;': '\u21ba',
'circlearrowright;': '\u21bb',
'circledast;': '\u229b',
'circledcirc;': '\u229a',
'circleddash;': '\u229d',
'CircleDot;': '\u2299',
'circledR;': '\xae',
'circledS;': '\u24c8',
'CircleMinus;': '\u2296',
'CirclePlus;': '\u2295',
'CircleTimes;': '\u2297',
'cirE;': '\u29c3',
'cire;': '\u2257',
'cirfnint;': '\u2a10',
'cirmid;': '\u2aef',
'cirscir;': '\u29c2',
'ClockwiseContourIntegral;': '\u2232',
'CloseCurlyDoubleQuote;': '\u201d',
'CloseCurlyQuote;': '\u2019',
'clubs;': '\u2663',
'clubsuit;': '\u2663',
'Colon;': '\u2237',
'colon;': ':',
'Colone;': '\u2a74',
'colone;': '\u2254',
'coloneq;': '\u2254',
'comma;': ',',
'commat;': '@',
'comp;': '\u2201',
'compfn;': '\u2218',
'complement;': '\u2201',
'complexes;': '\u2102',
'cong;': '\u2245',
'congdot;': '\u2a6d',
'Congruent;': '\u2261',
'Conint;': '\u222f',
'conint;': '\u222e',
'ContourIntegral;': '\u222e',
'Copf;': '\u2102',
'copf;': '\U0001d554',
'coprod;': '\u2210',
'Coproduct;': '\u2210',
'COPY': '\xa9',
'copy': '\xa9',
'COPY;': '\xa9',
'copy;': '\xa9',
'copysr;': '\u2117',
'CounterClockwiseContourIntegral;': '\u2233',
'crarr;': '\u21b5',
'Cross;': '\u2a2f',
'cross;': '\u2717',
'Cscr;': '\U0001d49e',
'cscr;': '\U0001d4b8',
'csub;': '\u2acf',
'csube;': '\u2ad1',
'csup;': '\u2ad0',
'csupe;': '\u2ad2',
'ctdot;': '\u22ef',
'cudarrl;': '\u2938',
'cudarrr;': '\u2935',
'cuepr;': '\u22de',
'cuesc;': '\u22df',
'cularr;': '\u21b6',
'cularrp;': '\u293d',
'Cup;': '\u22d3',
'cup;': '\u222a',
'cupbrcap;': '\u2a48',
'CupCap;': '\u224d',
'cupcap;': '\u2a46',
'cupcup;': '\u2a4a',
'cupdot;': '\u228d',
'cupor;': '\u2a45',
'cups;': '\u222a\ufe00',
'curarr;': '\u21b7',
'curarrm;': '\u293c',
'curlyeqprec;': '\u22de',
'curlyeqsucc;': '\u22df',
'curlyvee;': '\u22ce',
'curlywedge;': '\u22cf',
'curren': '\xa4',
'curren;': '\xa4',
'curvearrowleft;': '\u21b6',
'curvearrowright;': '\u21b7',
'cuvee;': '\u22ce',
'cuwed;': '\u22cf',
'cwconint;': '\u2232',
'cwint;': '\u2231',
'cylcty;': '\u232d',
'Dagger;': '\u2021',
'dagger;': '\u2020',
'daleth;': '\u2138',
'Darr;': '\u21a1',
'dArr;': '\u21d3',
'darr;': '\u2193',
'dash;': '\u2010',
'Dashv;': '\u2ae4',
'dashv;': '\u22a3',
'dbkarow;': '\u290f',
'dblac;': '\u02dd',
'Dcaron;': '\u010e',
'dcaron;': '\u010f',
'Dcy;': '\u0414',
'dcy;': '\u0434',
'DD;': '\u2145',
'dd;': '\u2146',
'ddagger;': '\u2021',
'ddarr;': '\u21ca',
'DDotrahd;': '\u2911',
'ddotseq;': '\u2a77',
'deg': '\xb0',
'deg;': '\xb0',
'Del;': '\u2207',
'Delta;': '\u0394',
'delta;': '\u03b4',
'demptyv;': '\u29b1',
'dfisht;': '\u297f',
'Dfr;': '\U0001d507',
'dfr;': '\U0001d521',
'dHar;': '\u2965',
'dharl;': '\u21c3',
'dharr;': '\u21c2',
'DiacriticalAcute;': '\xb4',
'DiacriticalDot;': '\u02d9',
'DiacriticalDoubleAcute;': '\u02dd',
'DiacriticalGrave;': '`',
'DiacriticalTilde;': '\u02dc',
'diam;': '\u22c4',
'Diamond;': '\u22c4',
'diamond;': '\u22c4',
'diamondsuit;': '\u2666',
'diams;': '\u2666',
'die;': '\xa8',
'DifferentialD;': '\u2146',
'digamma;': '\u03dd',
'disin;': '\u22f2',
'div;': '\xf7',
'divide': '\xf7',
'divide;': '\xf7',
'divideontimes;': '\u22c7',
'divonx;': '\u22c7',
'DJcy;': '\u0402',
'djcy;': '\u0452',
'dlcorn;': '\u231e',
'dlcrop;': '\u230d',
'dollar;': '$',
'Dopf;': '\U0001d53b',
'dopf;': '\U0001d555',
'Dot;': '\xa8',
'dot;': '\u02d9',
'DotDot;': '\u20dc',
'doteq;': '\u2250',
'doteqdot;': '\u2251',
'DotEqual;': '\u2250',
'dotminus;': '\u2238',
'dotplus;': '\u2214',
'dotsquare;': '\u22a1',
'doublebarwedge;': '\u2306',
'DoubleContourIntegral;': '\u222f',
'DoubleDot;': '\xa8',
'DoubleDownArrow;': '\u21d3',
'DoubleLeftArrow;': '\u21d0',
'DoubleLeftRightArrow;': '\u21d4',
'DoubleLeftTee;': '\u2ae4',
'DoubleLongLeftArrow;': '\u27f8',
'DoubleLongLeftRightArrow;': '\u27fa',
'DoubleLongRightArrow;': '\u27f9',
'DoubleRightArrow;': '\u21d2',
'DoubleRightTee;': '\u22a8',
'DoubleUpArrow;': '\u21d1',
'DoubleUpDownArrow;': '\u21d5',
'DoubleVerticalBar;': '\u2225',
'DownArrow;': '\u2193',
'Downarrow;': '\u21d3',
'downarrow;': '\u2193',
'DownArrowBar;': '\u2913',
'DownArrowUpArrow;': '\u21f5',
'DownBreve;': '\u0311',
'downdownarrows;': '\u21ca',
'downharpoonleft;': '\u21c3',
'downharpoonright;': '\u21c2',
'DownLeftRightVector;': '\u2950',
'DownLeftTeeVector;': '\u295e',
'DownLeftVector;': '\u21bd',
'DownLeftVectorBar;': '\u2956',
'DownRightTeeVector;': '\u295f',
'DownRightVector;': '\u21c1',
'DownRightVectorBar;': '\u2957',
'DownTee;': '\u22a4',
'DownTeeArrow;': '\u21a7',
'drbkarow;': '\u2910',
'drcorn;': '\u231f',
'drcrop;': '\u230c',
'Dscr;': '\U0001d49f',
'dscr;': '\U0001d4b9',
'DScy;': '\u0405',
'dscy;': '\u0455',
'dsol;': '\u29f6',
'Dstrok;': '\u0110',
'dstrok;': '\u0111',
'dtdot;': '\u22f1',
'dtri;': '\u25bf',
'dtrif;': '\u25be',
'duarr;': '\u21f5',
'duhar;': '\u296f',
'dwangle;': '\u29a6',
'DZcy;': '\u040f',
'dzcy;': '\u045f',
'dzigrarr;': '\u27ff',
'Eacute': '\xc9',
'eacute': '\xe9',
'Eacute;': '\xc9',
'eacute;': '\xe9',
'easter;': '\u2a6e',
'Ecaron;': '\u011a',
'ecaron;': '\u011b',
'ecir;': '\u2256',
'Ecirc': '\xca',
'ecirc': '\xea',
'Ecirc;': '\xca',
'ecirc;': '\xea',
'ecolon;': '\u2255',
'Ecy;': '\u042d',
'ecy;': '\u044d',
'eDDot;': '\u2a77',
'Edot;': '\u0116',
'eDot;': '\u2251',
'edot;': '\u0117',
'ee;': '\u2147',
'efDot;': '\u2252',
'Efr;': '\U0001d508',
'efr;': '\U0001d522',
'eg;': '\u2a9a',
'Egrave': '\xc8',
'egrave': '\xe8',
'Egrave;': '\xc8',
'egrave;': '\xe8',
'egs;': '\u2a96',
'egsdot;': '\u2a98',
'el;': '\u2a99',
'Element;': '\u2208',
'elinters;': '\u23e7',
'ell;': '\u2113',
'els;': '\u2a95',
'elsdot;': '\u2a97',
'Emacr;': '\u0112',
'emacr;': '\u0113',
'empty;': '\u2205',
'emptyset;': '\u2205',
'EmptySmallSquare;': '\u25fb',
'emptyv;': '\u2205',
'EmptyVerySmallSquare;': '\u25ab',
'emsp13;': '\u2004',
'emsp14;': '\u2005',
'emsp;': '\u2003',
'ENG;': '\u014a',
'eng;': '\u014b',
'ensp;': '\u2002',
'Eogon;': '\u0118',
'eogon;': '\u0119',
'Eopf;': '\U0001d53c',
'eopf;': '\U0001d556',
'epar;': '\u22d5',
'eparsl;': '\u29e3',
'eplus;': '\u2a71',
'epsi;': '\u03b5',
'Epsilon;': '\u0395',
'epsilon;': '\u03b5',
'epsiv;': '\u03f5',
'eqcirc;': '\u2256',
'eqcolon;': '\u2255',
'eqsim;': '\u2242',
'eqslantgtr;': '\u2a96',
'eqslantless;': '\u2a95',
'Equal;': '\u2a75',
'equals;': '=',
'EqualTilde;': '\u2242',
'equest;': '\u225f',
'Equilibrium;': '\u21cc',
'equiv;': '\u2261',
'equivDD;': '\u2a78',
'eqvparsl;': '\u29e5',
'erarr;': '\u2971',
'erDot;': '\u2253',
'Escr;': '\u2130',
'escr;': '\u212f',
'esdot;': '\u2250',
'Esim;': '\u2a73',
'esim;': '\u2242',
'Eta;': '\u0397',
'eta;': '\u03b7',
'ETH': '\xd0',
'eth': '\xf0',
'ETH;': '\xd0',
'eth;': '\xf0',
'Euml': '\xcb',
'euml': '\xeb',
'Euml;': '\xcb',
'euml;': '\xeb',
'euro;': '\u20ac',
'excl;': '!',
'exist;': '\u2203',
'Exists;': '\u2203',
'expectation;': '\u2130',
'ExponentialE;': '\u2147',
'exponentiale;': '\u2147',
'fallingdotseq;': '\u2252',
'Fcy;': '\u0424',
'fcy;': '\u0444',
'female;': '\u2640',
'ffilig;': '\ufb03',
'fflig;': '\ufb00',
'ffllig;': '\ufb04',
'Ffr;': '\U0001d509',
'ffr;': '\U0001d523',
'filig;': '\ufb01',
'FilledSmallSquare;': '\u25fc',
'FilledVerySmallSquare;': '\u25aa',
'fjlig;': 'fj',
'flat;': '\u266d',
'fllig;': '\ufb02',
'fltns;': '\u25b1',
'fnof;': '\u0192',
'Fopf;': '\U0001d53d',
'fopf;': '\U0001d557',
'ForAll;': '\u2200',
'forall;': '\u2200',
'fork;': '\u22d4',
'forkv;': '\u2ad9',
'Fouriertrf;': '\u2131',
'fpartint;': '\u2a0d',
'frac12': '\xbd',
'frac12;': '\xbd',
'frac13;': '\u2153',
'frac14': '\xbc',
'frac14;': '\xbc',
'frac15;': '\u2155',
'frac16;': '\u2159',
'frac18;': '\u215b',
'frac23;': '\u2154',
'frac25;': '\u2156',
'frac34': '\xbe',
'frac34;': '\xbe',
'frac35;': '\u2157',
'frac38;': '\u215c',
'frac45;': '\u2158',
'frac56;': '\u215a',
'frac58;': '\u215d',
'frac78;': '\u215e',
'frasl;': '\u2044',
'frown;': '\u2322',
'Fscr;': '\u2131',
'fscr;': '\U0001d4bb',
'gacute;': '\u01f5',
'Gamma;': '\u0393',
'gamma;': '\u03b3',
'Gammad;': '\u03dc',
'gammad;': '\u03dd',
'gap;': '\u2a86',
'Gbreve;': '\u011e',
'gbreve;': '\u011f',
'Gcedil;': '\u0122',
'Gcirc;': '\u011c',
'gcirc;': '\u011d',
'Gcy;': '\u0413',
'gcy;': '\u0433',
'Gdot;': '\u0120',
'gdot;': '\u0121',
'gE;': '\u2267',
'ge;': '\u2265',
'gEl;': '\u2a8c',
'gel;': '\u22db',
'geq;': '\u2265',
'geqq;': '\u2267',
'geqslant;': '\u2a7e',
'ges;': '\u2a7e',
'gescc;': '\u2aa9',
'gesdot;': '\u2a80',
'gesdoto;': '\u2a82',
'gesdotol;': '\u2a84',
'gesl;': '\u22db\ufe00',
'gesles;': '\u2a94',
'Gfr;': '\U0001d50a',
'gfr;': '\U0001d524',
'Gg;': '\u22d9',
'gg;': '\u226b',
'ggg;': '\u22d9',
'gimel;': '\u2137',
'GJcy;': '\u0403',
'gjcy;': '\u0453',
'gl;': '\u2277',
'gla;': '\u2aa5',
'glE;': '\u2a92',
'glj;': '\u2aa4',
'gnap;': '\u2a8a',
'gnapprox;': '\u2a8a',
'gnE;': '\u2269',
'gne;': '\u2a88',
'gneq;': '\u2a88',
'gneqq;': '\u2269',
'gnsim;': '\u22e7',
'Gopf;': '\U0001d53e',
'gopf;': '\U0001d558',
'grave;': '`',
'GreaterEqual;': '\u2265',
'GreaterEqualLess;': '\u22db',
'GreaterFullEqual;': '\u2267',
'GreaterGreater;': '\u2aa2',
'GreaterLess;': '\u2277',
'GreaterSlantEqual;': '\u2a7e',
'GreaterTilde;': '\u2273',
'Gscr;': '\U0001d4a2',
'gscr;': '\u210a',
'gsim;': '\u2273',
'gsime;': '\u2a8e',
'gsiml;': '\u2a90',
'GT': '>',
'gt': '>',
'GT;': '>',
'Gt;': '\u226b',
'gt;': '>',
'gtcc;': '\u2aa7',
'gtcir;': '\u2a7a',
'gtdot;': '\u22d7',
'gtlPar;': '\u2995',
'gtquest;': '\u2a7c',
'gtrapprox;': '\u2a86',
'gtrarr;': '\u2978',
'gtrdot;': '\u22d7',
'gtreqless;': '\u22db',
'gtreqqless;': '\u2a8c',
'gtrless;': '\u2277',
'gtrsim;': '\u2273',
'gvertneqq;': '\u2269\ufe00',
'gvnE;': '\u2269\ufe00',
'Hacek;': '\u02c7',
'hairsp;': '\u200a',
'half;': '\xbd',
'hamilt;': '\u210b',
'HARDcy;': '\u042a',
'hardcy;': '\u044a',
'hArr;': '\u21d4',
'harr;': '\u2194',
'harrcir;': '\u2948',
'harrw;': '\u21ad',
'Hat;': '^',
'hbar;': '\u210f',
'Hcirc;': '\u0124',
'hcirc;': '\u0125',
'hearts;': '\u2665',
'heartsuit;': '\u2665',
'hellip;': '\u2026',
'hercon;': '\u22b9',
'Hfr;': '\u210c',
'hfr;': '\U0001d525',
'HilbertSpace;': '\u210b',
'hksearow;': '\u2925',
'hkswarow;': '\u2926',
'hoarr;': '\u21ff',
'homtht;': '\u223b',
'hookleftarrow;': '\u21a9',
'hookrightarrow;': '\u21aa',
'Hopf;': '\u210d',
'hopf;': '\U0001d559',
'horbar;': '\u2015',
'HorizontalLine;': '\u2500',
'Hscr;': '\u210b',
'hscr;': '\U0001d4bd',
'hslash;': '\u210f',
'Hstrok;': '\u0126',
'hstrok;': '\u0127',
'HumpDownHump;': '\u224e',
'HumpEqual;': '\u224f',
'hybull;': '\u2043',
'hyphen;': '\u2010',
'Iacute': '\xcd',
'iacute': '\xed',
'Iacute;': '\xcd',
'iacute;': '\xed',
'ic;': '\u2063',
'Icirc': '\xce',
'icirc': '\xee',
'Icirc;': '\xce',
'icirc;': '\xee',
'Icy;': '\u0418',
'icy;': '\u0438',
'Idot;': '\u0130',
'IEcy;': '\u0415',
'iecy;': '\u0435',
'iexcl': '\xa1',
'iexcl;': '\xa1',
'iff;': '\u21d4',
'Ifr;': '\u2111',
'ifr;': '\U0001d526',
'Igrave': '\xcc',
'igrave': '\xec',
'Igrave;': '\xcc',
'igrave;': '\xec',
'ii;': '\u2148',
'iiiint;': '\u2a0c',
'iiint;': '\u222d',
'iinfin;': '\u29dc',
'iiota;': '\u2129',
'IJlig;': '\u0132',
'ijlig;': '\u0133',
'Im;': '\u2111',
'Imacr;': '\u012a',
'imacr;': '\u012b',
'image;': '\u2111',
'ImaginaryI;': '\u2148',
'imagline;': '\u2110',
'imagpart;': '\u2111',
'imath;': '\u0131',
'imof;': '\u22b7',
'imped;': '\u01b5',
'Implies;': '\u21d2',
'in;': '\u2208',
'incare;': '\u2105',
'infin;': '\u221e',
'infintie;': '\u29dd',
'inodot;': '\u0131',
'Int;': '\u222c',
'int;': '\u222b',
'intcal;': '\u22ba',
'integers;': '\u2124',
'Integral;': '\u222b',
'intercal;': '\u22ba',
'Intersection;': '\u22c2',
'intlarhk;': '\u2a17',
'intprod;': '\u2a3c',
'InvisibleComma;': '\u2063',
'InvisibleTimes;': '\u2062',
'IOcy;': '\u0401',
'iocy;': '\u0451',
'Iogon;': '\u012e',
'iogon;': '\u012f',
'Iopf;': '\U0001d540',
'iopf;': '\U0001d55a',
'Iota;': '\u0399',
'iota;': '\u03b9',
'iprod;': '\u2a3c',
'iquest': '\xbf',
'iquest;': '\xbf',
'Iscr;': '\u2110',
'iscr;': '\U0001d4be',
'isin;': '\u2208',
'isindot;': '\u22f5',
'isinE;': '\u22f9',
'isins;': '\u22f4',
'isinsv;': '\u22f3',
'isinv;': '\u2208',
'it;': '\u2062',
'Itilde;': '\u0128',
'itilde;': '\u0129',
'Iukcy;': '\u0406',
'iukcy;': '\u0456',
'Iuml': '\xcf',
'iuml': '\xef',
'Iuml;': '\xcf',
'iuml;': '\xef',
'Jcirc;': '\u0134',
'jcirc;': '\u0135',
'Jcy;': '\u0419',
'jcy;': '\u0439',
'Jfr;': '\U0001d50d',
'jfr;': '\U0001d527',
'jmath;': '\u0237',
'Jopf;': '\U0001d541',
'jopf;': '\U0001d55b',
'Jscr;': '\U0001d4a5',
'jscr;': '\U0001d4bf',
'Jsercy;': '\u0408',
'jsercy;': '\u0458',
'Jukcy;': '\u0404',
'jukcy;': '\u0454',
'Kappa;': '\u039a',
'kappa;': '\u03ba',
'kappav;': '\u03f0',
'Kcedil;': '\u0136',
'kcedil;': '\u0137',
'Kcy;': '\u041a',
'kcy;': '\u043a',
'Kfr;': '\U0001d50e',
'kfr;': '\U0001d528',
'kgreen;': '\u0138',
'KHcy;': '\u0425',
'khcy;': '\u0445',
'KJcy;': '\u040c',
'kjcy;': '\u045c',
'Kopf;': '\U0001d542',
'kopf;': '\U0001d55c',
'Kscr;': '\U0001d4a6',
'kscr;': '\U0001d4c0',
'lAarr;': '\u21da',
'Lacute;': '\u0139',
'lacute;': '\u013a',
'laemptyv;': '\u29b4',
'lagran;': '\u2112',
'Lambda;': '\u039b',
'lambda;': '\u03bb',
'Lang;': '\u27ea',
'lang;': '\u27e8',
'langd;': '\u2991',
'langle;': '\u27e8',
'lap;': '\u2a85',
'Laplacetrf;': '\u2112',
'laquo': '\xab',
'laquo;': '\xab',
'Larr;': '\u219e',
'lArr;': '\u21d0',
'larr;': '\u2190',
'larrb;': '\u21e4',
'larrbfs;': '\u291f',
'larrfs;': '\u291d',
'larrhk;': '\u21a9',
'larrlp;': '\u21ab',
'larrpl;': '\u2939',
'larrsim;': '\u2973',
'larrtl;': '\u21a2',
'lat;': '\u2aab',
'lAtail;': '\u291b',
'latail;': '\u2919',
'late;': '\u2aad',
'lates;': '\u2aad\ufe00',
'lBarr;': '\u290e',
'lbarr;': '\u290c',
'lbbrk;': '\u2772',
'lbrace;': '{',
'lbrack;': '[',
'lbrke;': '\u298b',
'lbrksld;': '\u298f',
'lbrkslu;': '\u298d',
'Lcaron;': '\u013d',
'lcaron;': '\u013e',
'Lcedil;': '\u013b',
'lcedil;': '\u013c',
'lceil;': '\u2308',
'lcub;': '{',
'Lcy;': '\u041b',
'lcy;': '\u043b',
'ldca;': '\u2936',
'ldquo;': '\u201c',
'ldquor;': '\u201e',
'ldrdhar;': '\u2967',
'ldrushar;': '\u294b',
'ldsh;': '\u21b2',
'lE;': '\u2266',
'le;': '\u2264',
'LeftAngleBracket;': '\u27e8',
'LeftArrow;': '\u2190',
'Leftarrow;': '\u21d0',
'leftarrow;': '\u2190',
'LeftArrowBar;': '\u21e4',
'LeftArrowRightArrow;': '\u21c6',
'leftarrowtail;': '\u21a2',
'LeftCeiling;': '\u2308',
'LeftDoubleBracket;': '\u27e6',
'LeftDownTeeVector;': '\u2961',
'LeftDownVector;': '\u21c3',
'LeftDownVectorBar;': '\u2959',
'LeftFloor;': '\u230a',
'leftharpoondown;': '\u21bd',
'leftharpoonup;': '\u21bc',
'leftleftarrows;': '\u21c7',
'LeftRightArrow;': '\u2194',
'Leftrightarrow;': '\u21d4',
'leftrightarrow;': '\u2194',
'leftrightarrows;': '\u21c6',
'leftrightharpoons;': '\u21cb',
'leftrightsquigarrow;': '\u21ad',
'LeftRightVector;': '\u294e',
'LeftTee;': '\u22a3',
'LeftTeeArrow;': '\u21a4',
'LeftTeeVector;': '\u295a',
'leftthreetimes;': '\u22cb',
'LeftTriangle;': '\u22b2',
'LeftTriangleBar;': '\u29cf',
'LeftTriangleEqual;': '\u22b4',
'LeftUpDownVector;': '\u2951',
'LeftUpTeeVector;': '\u2960',
'LeftUpVector;': '\u21bf',
'LeftUpVectorBar;': '\u2958',
'LeftVector;': '\u21bc',
'LeftVectorBar;': '\u2952',
'lEg;': '\u2a8b',
'leg;': '\u22da',
'leq;': '\u2264',
'leqq;': '\u2266',
'leqslant;': '\u2a7d',
'les;': '\u2a7d',
'lescc;': '\u2aa8',
'lesdot;': '\u2a7f',
'lesdoto;': '\u2a81',
'lesdotor;': '\u2a83',
'lesg;': '\u22da\ufe00',
'lesges;': '\u2a93',
'lessapprox;': '\u2a85',
'lessdot;': '\u22d6',
'lesseqgtr;': '\u22da',
'lesseqqgtr;': '\u2a8b',
'LessEqualGreater;': '\u22da',
'LessFullEqual;': '\u2266',
'LessGreater;': '\u2276',
'lessgtr;': '\u2276',
'LessLess;': '\u2aa1',
'lesssim;': '\u2272',
'LessSlantEqual;': '\u2a7d',
'LessTilde;': '\u2272',
'lfisht;': '\u297c',
'lfloor;': '\u230a',
'Lfr;': '\U0001d50f',
'lfr;': '\U0001d529',
'lg;': '\u2276',
'lgE;': '\u2a91',
'lHar;': '\u2962',
'lhard;': '\u21bd',
'lharu;': '\u21bc',
'lharul;': '\u296a',
'lhblk;': '\u2584',
'LJcy;': '\u0409',
'ljcy;': '\u0459',
'Ll;': '\u22d8',
'll;': '\u226a',
'llarr;': '\u21c7',
'llcorner;': '\u231e',
'Lleftarrow;': '\u21da',
'llhard;': '\u296b',
'lltri;': '\u25fa',
'Lmidot;': '\u013f',
'lmidot;': '\u0140',
'lmoust;': '\u23b0',
'lmoustache;': '\u23b0',
'lnap;': '\u2a89',
'lnapprox;': '\u2a89',
'lnE;': '\u2268',
'lne;': '\u2a87',
'lneq;': '\u2a87',
'lneqq;': '\u2268',
'lnsim;': '\u22e6',
'loang;': '\u27ec',
'loarr;': '\u21fd',
'lobrk;': '\u27e6',
'LongLeftArrow;': '\u27f5',
'Longleftarrow;': '\u27f8',
'longleftarrow;': '\u27f5',
'LongLeftRightArrow;': '\u27f7',
'Longleftrightarrow;': '\u27fa',
'longleftrightarrow;': '\u27f7',
'longmapsto;': '\u27fc',
'LongRightArrow;': '\u27f6',
'Longrightarrow;': '\u27f9',
'longrightarrow;': '\u27f6',
'looparrowleft;': '\u21ab',
'looparrowright;': '\u21ac',
'lopar;': '\u2985',
'Lopf;': '\U0001d543',
'lopf;': '\U0001d55d',
'loplus;': '\u2a2d',
'lotimes;': '\u2a34',
'lowast;': '\u2217',
'lowbar;': '_',
'LowerLeftArrow;': '\u2199',
'LowerRightArrow;': '\u2198',
'loz;': '\u25ca',
'lozenge;': '\u25ca',
'lozf;': '\u29eb',
'lpar;': '(',
'lparlt;': '\u2993',
'lrarr;': '\u21c6',
'lrcorner;': '\u231f',
'lrhar;': '\u21cb',
'lrhard;': '\u296d',
'lrm;': '\u200e',
'lrtri;': '\u22bf',
'lsaquo;': '\u2039',
'Lscr;': '\u2112',
'lscr;': '\U0001d4c1',
'Lsh;': '\u21b0',
'lsh;': '\u21b0',
'lsim;': '\u2272',
'lsime;': '\u2a8d',
'lsimg;': '\u2a8f',
'lsqb;': '[',
'lsquo;': '\u2018',
'lsquor;': '\u201a',
'Lstrok;': '\u0141',
'lstrok;': '\u0142',
'LT': '<',
'lt': '<',
'LT;': '<',
'Lt;': '\u226a',
'lt;': '<',
'ltcc;': '\u2aa6',
'ltcir;': '\u2a79',
'ltdot;': '\u22d6',
'lthree;': '\u22cb',
'ltimes;': '\u22c9',
'ltlarr;': '\u2976',
'ltquest;': '\u2a7b',
'ltri;': '\u25c3',
'ltrie;': '\u22b4',
'ltrif;': '\u25c2',
'ltrPar;': '\u2996',
'lurdshar;': '\u294a',
'luruhar;': '\u2966',
'lvertneqq;': '\u2268\ufe00',
'lvnE;': '\u2268\ufe00',
'macr': '\xaf',
'macr;': '\xaf',
'male;': '\u2642',
'malt;': '\u2720',
'maltese;': '\u2720',
'Map;': '\u2905',
'map;': '\u21a6',
'mapsto;': '\u21a6',
'mapstodown;': '\u21a7',
'mapstoleft;': '\u21a4',
'mapstoup;': '\u21a5',
'marker;': '\u25ae',
'mcomma;': '\u2a29',
'Mcy;': '\u041c',
'mcy;': '\u043c',
'mdash;': '\u2014',
'mDDot;': '\u223a',
'measuredangle;': '\u2221',
'MediumSpace;': '\u205f',
'Mellintrf;': '\u2133',
'Mfr;': '\U0001d510',
'mfr;': '\U0001d52a',
'mho;': '\u2127',
'micro': '\xb5',
'micro;': '\xb5',
'mid;': '\u2223',
'midast;': '*',
'midcir;': '\u2af0',
'middot': '\xb7',
'middot;': '\xb7',
'minus;': '\u2212',
'minusb;': '\u229f',
'minusd;': '\u2238',
'minusdu;': '\u2a2a',
'MinusPlus;': '\u2213',
'mlcp;': '\u2adb',
'mldr;': '\u2026',
'mnplus;': '\u2213',
'models;': '\u22a7',
'Mopf;': '\U0001d544',
'mopf;': '\U0001d55e',
'mp;': '\u2213',
'Mscr;': '\u2133',
'mscr;': '\U0001d4c2',
'mstpos;': '\u223e',
'Mu;': '\u039c',
'mu;': '\u03bc',
'multimap;': '\u22b8',
'mumap;': '\u22b8',
'nabla;': '\u2207',
'Nacute;': '\u0143',
'nacute;': '\u0144',
'nang;': '\u2220\u20d2',
'nap;': '\u2249',
'napE;': '\u2a70\u0338',
'napid;': '\u224b\u0338',
'napos;': '\u0149',
'napprox;': '\u2249',
'natur;': '\u266e',
'natural;': '\u266e',
'naturals;': '\u2115',
'nbsp': '\xa0',
'nbsp;': '\xa0',
'nbump;': '\u224e\u0338',
'nbumpe;': '\u224f\u0338',
'ncap;': '\u2a43',
'Ncaron;': '\u0147',
'ncaron;': '\u0148',
'Ncedil;': '\u0145',
'ncedil;': '\u0146',
'ncong;': '\u2247',
'ncongdot;': '\u2a6d\u0338',
'ncup;': '\u2a42',
'Ncy;': '\u041d',
'ncy;': '\u043d',
'ndash;': '\u2013',
'ne;': '\u2260',
'nearhk;': '\u2924',
'neArr;': '\u21d7',
'nearr;': '\u2197',
'nearrow;': '\u2197',
'nedot;': '\u2250\u0338',
'NegativeMediumSpace;': '\u200b',
'NegativeThickSpace;': '\u200b',
'NegativeThinSpace;': '\u200b',
'NegativeVeryThinSpace;': '\u200b',
'nequiv;': '\u2262',
'nesear;': '\u2928',
'nesim;': '\u2242\u0338',
'NestedGreaterGreater;': '\u226b',
'NestedLessLess;': '\u226a',
'NewLine;': '\n',
'nexist;': '\u2204',
'nexists;': '\u2204',
'Nfr;': '\U0001d511',
'nfr;': '\U0001d52b',
'ngE;': '\u2267\u0338',
'nge;': '\u2271',
'ngeq;': '\u2271',
'ngeqq;': '\u2267\u0338',
'ngeqslant;': '\u2a7e\u0338',
'nges;': '\u2a7e\u0338',
'nGg;': '\u22d9\u0338',
'ngsim;': '\u2275',
'nGt;': '\u226b\u20d2',
'ngt;': '\u226f',
'ngtr;': '\u226f',
'nGtv;': '\u226b\u0338',
'nhArr;': '\u21ce',
'nharr;': '\u21ae',
'nhpar;': '\u2af2',
'ni;': '\u220b',
'nis;': '\u22fc',
'nisd;': '\u22fa',
'niv;': '\u220b',
'NJcy;': '\u040a',
'njcy;': '\u045a',
'nlArr;': '\u21cd',
'nlarr;': '\u219a',
'nldr;': '\u2025',
'nlE;': '\u2266\u0338',
'nle;': '\u2270',
'nLeftarrow;': '\u21cd',
'nleftarrow;': '\u219a',
'nLeftrightarrow;': '\u21ce',
'nleftrightarrow;': '\u21ae',
'nleq;': '\u2270',
'nleqq;': '\u2266\u0338',
'nleqslant;': '\u2a7d\u0338',
'nles;': '\u2a7d\u0338',
'nless;': '\u226e',
'nLl;': '\u22d8\u0338',
'nlsim;': '\u2274',
'nLt;': '\u226a\u20d2',
'nlt;': '\u226e',
'nltri;': '\u22ea',
'nltrie;': '\u22ec',
'nLtv;': '\u226a\u0338',
'nmid;': '\u2224',
'NoBreak;': '\u2060',
'NonBreakingSpace;': '\xa0',
'Nopf;': '\u2115',
'nopf;': '\U0001d55f',
'not': '\xac',
'Not;': '\u2aec',
'not;': '\xac',
'NotCongruent;': '\u2262',
'NotCupCap;': '\u226d',
'NotDoubleVerticalBar;': '\u2226',
'NotElement;': '\u2209',
'NotEqual;': '\u2260',
'NotEqualTilde;': '\u2242\u0338',
'NotExists;': '\u2204',
'NotGreater;': '\u226f',
'NotGreaterEqual;': '\u2271',
'NotGreaterFullEqual;': '\u2267\u0338',
'NotGreaterGreater;': '\u226b\u0338',
'NotGreaterLess;': '\u2279',
'NotGreaterSlantEqual;': '\u2a7e\u0338',
'NotGreaterTilde;': '\u2275',
'NotHumpDownHump;': '\u224e\u0338',
'NotHumpEqual;': '\u224f\u0338',
'notin;': '\u2209',
'notindot;': '\u22f5\u0338',
'notinE;': '\u22f9\u0338',
'notinva;': '\u2209',
'notinvb;': '\u22f7',
'notinvc;': '\u22f6',
'NotLeftTriangle;': '\u22ea',
'NotLeftTriangleBar;': '\u29cf\u0338',
'NotLeftTriangleEqual;': '\u22ec',
'NotLess;': '\u226e',
'NotLessEqual;': '\u2270',
'NotLessGreater;': '\u2278',
'NotLessLess;': '\u226a\u0338',
'NotLessSlantEqual;': '\u2a7d\u0338',
'NotLessTilde;': '\u2274',
'NotNestedGreaterGreater;': '\u2aa2\u0338',
'NotNestedLessLess;': '\u2aa1\u0338',
'notni;': '\u220c',
'notniva;': '\u220c',
'notnivb;': '\u22fe',
'notnivc;': '\u22fd',
'NotPrecedes;': '\u2280',
'NotPrecedesEqual;': '\u2aaf\u0338',
'NotPrecedesSlantEqual;': '\u22e0',
'NotReverseElement;': '\u220c',
'NotRightTriangle;': '\u22eb',
'NotRightTriangleBar;': '\u29d0\u0338',
'NotRightTriangleEqual;': '\u22ed',
'NotSquareSubset;': '\u228f\u0338',
'NotSquareSubsetEqual;': '\u22e2',
'NotSquareSuperset;': '\u2290\u0338',
'NotSquareSupersetEqual;': '\u22e3',
'NotSubset;': '\u2282\u20d2',
'NotSubsetEqual;': '\u2288',
'NotSucceeds;': '\u2281',
'NotSucceedsEqual;': '\u2ab0\u0338',
'NotSucceedsSlantEqual;': '\u22e1',
'NotSucceedsTilde;': '\u227f\u0338',
'NotSuperset;': '\u2283\u20d2',
'NotSupersetEqual;': '\u2289',
'NotTilde;': '\u2241',
'NotTildeEqual;': '\u2244',
'NotTildeFullEqual;': '\u2247',
'NotTildeTilde;': '\u2249',
'NotVerticalBar;': '\u2224',
'npar;': '\u2226',
'nparallel;': '\u2226',
'nparsl;': '\u2afd\u20e5',
'npart;': '\u2202\u0338',
'npolint;': '\u2a14',
'npr;': '\u2280',
'nprcue;': '\u22e0',
'npre;': '\u2aaf\u0338',
'nprec;': '\u2280',
'npreceq;': '\u2aaf\u0338',
'nrArr;': '\u21cf',
'nrarr;': '\u219b',
'nrarrc;': '\u2933\u0338',
'nrarrw;': '\u219d\u0338',
'nRightarrow;': '\u21cf',
'nrightarrow;': '\u219b',
'nrtri;': '\u22eb',
'nrtrie;': '\u22ed',
'nsc;': '\u2281',
'nsccue;': '\u22e1',
'nsce;': '\u2ab0\u0338',
'Nscr;': '\U0001d4a9',
'nscr;': '\U0001d4c3',
'nshortmid;': '\u2224',
'nshortparallel;': '\u2226',
'nsim;': '\u2241',
'nsime;': '\u2244',
'nsimeq;': '\u2244',
'nsmid;': '\u2224',
'nspar;': '\u2226',
'nsqsube;': '\u22e2',
'nsqsupe;': '\u22e3',
'nsub;': '\u2284',
'nsubE;': '\u2ac5\u0338',
'nsube;': '\u2288',
'nsubset;': '\u2282\u20d2',
'nsubseteq;': '\u2288',
'nsubseteqq;': '\u2ac5\u0338',
'nsucc;': '\u2281',
'nsucceq;': '\u2ab0\u0338',
'nsup;': '\u2285',
'nsupE;': '\u2ac6\u0338',
'nsupe;': '\u2289',
'nsupset;': '\u2283\u20d2',
'nsupseteq;': '\u2289',
'nsupseteqq;': '\u2ac6\u0338',
'ntgl;': '\u2279',
'Ntilde': '\xd1',
'ntilde': '\xf1',
'Ntilde;': '\xd1',
'ntilde;': '\xf1',
'ntlg;': '\u2278',
'ntriangleleft;': '\u22ea',
'ntrianglelefteq;': '\u22ec',
'ntriangleright;': '\u22eb',
'ntrianglerighteq;': '\u22ed',
'Nu;': '\u039d',
'nu;': '\u03bd',
'num;': '#',
'numero;': '\u2116',
'numsp;': '\u2007',
'nvap;': '\u224d\u20d2',
'nVDash;': '\u22af',
'nVdash;': '\u22ae',
'nvDash;': '\u22ad',
'nvdash;': '\u22ac',
'nvge;': '\u2265\u20d2',
'nvgt;': '>\u20d2',
'nvHarr;': '\u2904',
'nvinfin;': '\u29de',
'nvlArr;': '\u2902',
'nvle;': '\u2264\u20d2',
'nvlt;': '<\u20d2',
'nvltrie;': '\u22b4\u20d2',
'nvrArr;': '\u2903',
'nvrtrie;': '\u22b5\u20d2',
'nvsim;': '\u223c\u20d2',
'nwarhk;': '\u2923',
'nwArr;': '\u21d6',
'nwarr;': '\u2196',
'nwarrow;': '\u2196',
'nwnear;': '\u2927',
'Oacute': '\xd3',
'oacute': '\xf3',
'Oacute;': '\xd3',
'oacute;': '\xf3',
'oast;': '\u229b',
'ocir;': '\u229a',
'Ocirc': '\xd4',
'ocirc': '\xf4',
'Ocirc;': '\xd4',
'ocirc;': '\xf4',
'Ocy;': '\u041e',
'ocy;': '\u043e',
'odash;': '\u229d',
'Odblac;': '\u0150',
'odblac;': '\u0151',
'odiv;': '\u2a38',
'odot;': '\u2299',
'odsold;': '\u29bc',
'OElig;': '\u0152',
'oelig;': '\u0153',
'ofcir;': '\u29bf',
'Ofr;': '\U0001d512',
'ofr;': '\U0001d52c',
'ogon;': '\u02db',
'Ograve': '\xd2',
'ograve': '\xf2',
'Ograve;': '\xd2',
'ograve;': '\xf2',
'ogt;': '\u29c1',
'ohbar;': '\u29b5',
'ohm;': '\u03a9',
'oint;': '\u222e',
'olarr;': '\u21ba',
'olcir;': '\u29be',
'olcross;': '\u29bb',
'oline;': '\u203e',
'olt;': '\u29c0',
'Omacr;': '\u014c',
'omacr;': '\u014d',
'Omega;': '\u03a9',
'omega;': '\u03c9',
'Omicron;': '\u039f',
'omicron;': '\u03bf',
'omid;': '\u29b6',
'ominus;': '\u2296',
'Oopf;': '\U0001d546',
'oopf;': '\U0001d560',
'opar;': '\u29b7',
'OpenCurlyDoubleQuote;': '\u201c',
'OpenCurlyQuote;': '\u2018',
'operp;': '\u29b9',
'oplus;': '\u2295',
'Or;': '\u2a54',
'or;': '\u2228',
'orarr;': '\u21bb',
'ord;': '\u2a5d',
'order;': '\u2134',
'orderof;': '\u2134',
'ordf': '\xaa',
'ordf;': '\xaa',
'ordm': '\xba',
'ordm;': '\xba',
'origof;': '\u22b6',
'oror;': '\u2a56',
'orslope;': '\u2a57',
'orv;': '\u2a5b',
'oS;': '\u24c8',
'Oscr;': '\U0001d4aa',
'oscr;': '\u2134',
'Oslash': '\xd8',
'oslash': '\xf8',
'Oslash;': '\xd8',
'oslash;': '\xf8',
'osol;': '\u2298',
'Otilde': '\xd5',
'otilde': '\xf5',
'Otilde;': '\xd5',
'otilde;': '\xf5',
'Otimes;': '\u2a37',
'otimes;': '\u2297',
'otimesas;': '\u2a36',
'Ouml': '\xd6',
'ouml': '\xf6',
'Ouml;': '\xd6',
'ouml;': '\xf6',
'ovbar;': '\u233d',
'OverBar;': '\u203e',
'OverBrace;': '\u23de',
'OverBracket;': '\u23b4',
'OverParenthesis;': '\u23dc',
'par;': '\u2225',
'para': '\xb6',
'para;': '\xb6',
'parallel;': '\u2225',
'parsim;': '\u2af3',
'parsl;': '\u2afd',
'part;': '\u2202',
'PartialD;': '\u2202',
'Pcy;': '\u041f',
'pcy;': '\u043f',
'percnt;': '%',
'period;': '.',
'permil;': '\u2030',
'perp;': '\u22a5',
'pertenk;': '\u2031',
'Pfr;': '\U0001d513',
'pfr;': '\U0001d52d',
'Phi;': '\u03a6',
'phi;': '\u03c6',
'phiv;': '\u03d5',
'phmmat;': '\u2133',
'phone;': '\u260e',
'Pi;': '\u03a0',
'pi;': '\u03c0',
'pitchfork;': '\u22d4',
'piv;': '\u03d6',
'planck;': '\u210f',
'planckh;': '\u210e',
'plankv;': '\u210f',
'plus;': '+',
'plusacir;': '\u2a23',
'plusb;': '\u229e',
'pluscir;': '\u2a22',
'plusdo;': '\u2214',
'plusdu;': '\u2a25',
'pluse;': '\u2a72',
'PlusMinus;': '\xb1',
'plusmn': '\xb1',
'plusmn;': '\xb1',
'plussim;': '\u2a26',
'plustwo;': '\u2a27',
'pm;': '\xb1',
'Poincareplane;': '\u210c',
'pointint;': '\u2a15',
'Popf;': '\u2119',
'popf;': '\U0001d561',
'pound': '\xa3',
'pound;': '\xa3',
'Pr;': '\u2abb',
'pr;': '\u227a',
'prap;': '\u2ab7',
'prcue;': '\u227c',
'prE;': '\u2ab3',
'pre;': '\u2aaf',
'prec;': '\u227a',
'precapprox;': '\u2ab7',
'preccurlyeq;': '\u227c',
'Precedes;': '\u227a',
'PrecedesEqual;': '\u2aaf',
'PrecedesSlantEqual;': '\u227c',
'PrecedesTilde;': '\u227e',
'preceq;': '\u2aaf',
'precnapprox;': '\u2ab9',
'precneqq;': '\u2ab5',
'precnsim;': '\u22e8',
'precsim;': '\u227e',
'Prime;': '\u2033',
'prime;': '\u2032',
'primes;': '\u2119',
'prnap;': '\u2ab9',
'prnE;': '\u2ab5',
'prnsim;': '\u22e8',
'prod;': '\u220f',
'Product;': '\u220f',
'profalar;': '\u232e',
'profline;': '\u2312',
'profsurf;': '\u2313',
'prop;': '\u221d',
'Proportion;': '\u2237',
'Proportional;': '\u221d',
'propto;': '\u221d',
'prsim;': '\u227e',
'prurel;': '\u22b0',
'Pscr;': '\U0001d4ab',
'pscr;': '\U0001d4c5',
'Psi;': '\u03a8',
'psi;': '\u03c8',
'puncsp;': '\u2008',
'Qfr;': '\U0001d514',
'qfr;': '\U0001d52e',
'qint;': '\u2a0c',
'Qopf;': '\u211a',
'qopf;': '\U0001d562',
'qprime;': '\u2057',
'Qscr;': '\U0001d4ac',
'qscr;': '\U0001d4c6',
'quaternions;': '\u210d',
'quatint;': '\u2a16',
'quest;': '?',
'questeq;': '\u225f',
'QUOT': '"',
'quot': '"',
'QUOT;': '"',
'quot;': '"',
'rAarr;': '\u21db',
'race;': '\u223d\u0331',
'Racute;': '\u0154',
'racute;': '\u0155',
'radic;': '\u221a',
'raemptyv;': '\u29b3',
'Rang;': '\u27eb',
'rang;': '\u27e9',
'rangd;': '\u2992',
'range;': '\u29a5',
'rangle;': '\u27e9',
'raquo': '\xbb',
'raquo;': '\xbb',
'Rarr;': '\u21a0',
'rArr;': '\u21d2',
'rarr;': '\u2192',
'rarrap;': '\u2975',
'rarrb;': '\u21e5',
'rarrbfs;': '\u2920',
'rarrc;': '\u2933',
'rarrfs;': '\u291e',
'rarrhk;': '\u21aa',
'rarrlp;': '\u21ac',
'rarrpl;': '\u2945',
'rarrsim;': '\u2974',
'Rarrtl;': '\u2916',
'rarrtl;': '\u21a3',
'rarrw;': '\u219d',
'rAtail;': '\u291c',
'ratail;': '\u291a',
'ratio;': '\u2236',
'rationals;': '\u211a',
'RBarr;': '\u2910',
'rBarr;': '\u290f',
'rbarr;': '\u290d',
'rbbrk;': '\u2773',
'rbrace;': '}',
'rbrack;': ']',
'rbrke;': '\u298c',
'rbrksld;': '\u298e',
'rbrkslu;': '\u2990',
'Rcaron;': '\u0158',
'rcaron;': '\u0159',
'Rcedil;': '\u0156',
'rcedil;': '\u0157',
'rceil;': '\u2309',
'rcub;': '}',
'Rcy;': '\u0420',
'rcy;': '\u0440',
'rdca;': '\u2937',
'rdldhar;': '\u2969',
'rdquo;': '\u201d',
'rdquor;': '\u201d',
'rdsh;': '\u21b3',
'Re;': '\u211c',
'real;': '\u211c',
'realine;': '\u211b',
'realpart;': '\u211c',
'reals;': '\u211d',
'rect;': '\u25ad',
'REG': '\xae',
'reg': '\xae',
'REG;': '\xae',
'reg;': '\xae',
'ReverseElement;': '\u220b',
'ReverseEquilibrium;': '\u21cb',
'ReverseUpEquilibrium;': '\u296f',
'rfisht;': '\u297d',
'rfloor;': '\u230b',
'Rfr;': '\u211c',
'rfr;': '\U0001d52f',
'rHar;': '\u2964',
'rhard;': '\u21c1',
'rharu;': '\u21c0',
'rharul;': '\u296c',
'Rho;': '\u03a1',
'rho;': '\u03c1',
'rhov;': '\u03f1',
'RightAngleBracket;': '\u27e9',
'RightArrow;': '\u2192',
'Rightarrow;': '\u21d2',
'rightarrow;': '\u2192',
'RightArrowBar;': '\u21e5',
'RightArrowLeftArrow;': '\u21c4',
'rightarrowtail;': '\u21a3',
'RightCeiling;': '\u2309',
'RightDoubleBracket;': '\u27e7',
'RightDownTeeVector;': '\u295d',
'RightDownVector;': '\u21c2',
'RightDownVectorBar;': '\u2955',
'RightFloor;': '\u230b',
'rightharpoondown;': '\u21c1',
'rightharpoonup;': '\u21c0',
'rightleftarrows;': '\u21c4',
'rightleftharpoons;': '\u21cc',
'rightrightarrows;': '\u21c9',
'rightsquigarrow;': '\u219d',
'RightTee;': '\u22a2',
'RightTeeArrow;': '\u21a6',
'RightTeeVector;': '\u295b',
'rightthreetimes;': '\u22cc',
'RightTriangle;': '\u22b3',
'RightTriangleBar;': '\u29d0',
'RightTriangleEqual;': '\u22b5',
'RightUpDownVector;': '\u294f',
'RightUpTeeVector;': '\u295c',
'RightUpVector;': '\u21be',
'RightUpVectorBar;': '\u2954',
'RightVector;': '\u21c0',
'RightVectorBar;': '\u2953',
'ring;': '\u02da',
'risingdotseq;': '\u2253',
'rlarr;': '\u21c4',
'rlhar;': '\u21cc',
'rlm;': '\u200f',
'rmoust;': '\u23b1',
'rmoustache;': '\u23b1',
'rnmid;': '\u2aee',
'roang;': '\u27ed',
'roarr;': '\u21fe',
'robrk;': '\u27e7',
'ropar;': '\u2986',
'Ropf;': '\u211d',
'ropf;': '\U0001d563',
'roplus;': '\u2a2e',
'rotimes;': '\u2a35',
'RoundImplies;': '\u2970',
'rpar;': ')',
'rpargt;': '\u2994',
'rppolint;': '\u2a12',
'rrarr;': '\u21c9',
'Rrightarrow;': '\u21db',
'rsaquo;': '\u203a',
'Rscr;': '\u211b',
'rscr;': '\U0001d4c7',
'Rsh;': '\u21b1',
'rsh;': '\u21b1',
'rsqb;': ']',
'rsquo;': '\u2019',
'rsquor;': '\u2019',
'rthree;': '\u22cc',
'rtimes;': '\u22ca',
'rtri;': '\u25b9',
'rtrie;': '\u22b5',
'rtrif;': '\u25b8',
'rtriltri;': '\u29ce',
'RuleDelayed;': '\u29f4',
'ruluhar;': '\u2968',
'rx;': '\u211e',
'Sacute;': '\u015a',
'sacute;': '\u015b',
'sbquo;': '\u201a',
'Sc;': '\u2abc',
'sc;': '\u227b',
'scap;': '\u2ab8',
'Scaron;': '\u0160',
'scaron;': '\u0161',
'sccue;': '\u227d',
'scE;': '\u2ab4',
'sce;': '\u2ab0',
'Scedil;': '\u015e',
'scedil;': '\u015f',
'Scirc;': '\u015c',
'scirc;': '\u015d',
'scnap;': '\u2aba',
'scnE;': '\u2ab6',
'scnsim;': '\u22e9',
'scpolint;': '\u2a13',
'scsim;': '\u227f',
'Scy;': '\u0421',
'scy;': '\u0441',
'sdot;': '\u22c5',
'sdotb;': '\u22a1',
'sdote;': '\u2a66',
'searhk;': '\u2925',
'seArr;': '\u21d8',
'searr;': '\u2198',
'searrow;': '\u2198',
'sect': '\xa7',
'sect;': '\xa7',
'semi;': ';',
'seswar;': '\u2929',
'setminus;': '\u2216',
'setmn;': '\u2216',
'sext;': '\u2736',
'Sfr;': '\U0001d516',
'sfr;': '\U0001d530',
'sfrown;': '\u2322',
'sharp;': '\u266f',
'SHCHcy;': '\u0429',
'shchcy;': '\u0449',
'SHcy;': '\u0428',
'shcy;': '\u0448',
'ShortDownArrow;': '\u2193',
'ShortLeftArrow;': '\u2190',
'shortmid;': '\u2223',
'shortparallel;': '\u2225',
'ShortRightArrow;': '\u2192',
'ShortUpArrow;': '\u2191',
'shy': '\xad',
'shy;': '\xad',
'Sigma;': '\u03a3',
'sigma;': '\u03c3',
'sigmaf;': '\u03c2',
'sigmav;': '\u03c2',
'sim;': '\u223c',
'simdot;': '\u2a6a',
'sime;': '\u2243',
'simeq;': '\u2243',
'simg;': '\u2a9e',
'simgE;': '\u2aa0',
'siml;': '\u2a9d',
'simlE;': '\u2a9f',
'simne;': '\u2246',
'simplus;': '\u2a24',
'simrarr;': '\u2972',
'slarr;': '\u2190',
'SmallCircle;': '\u2218',
'smallsetminus;': '\u2216',
'smashp;': '\u2a33',
'smeparsl;': '\u29e4',
'smid;': '\u2223',
'smile;': '\u2323',
'smt;': '\u2aaa',
'smte;': '\u2aac',
'smtes;': '\u2aac\ufe00',
'SOFTcy;': '\u042c',
'softcy;': '\u044c',
'sol;': '/',
'solb;': '\u29c4',
'solbar;': '\u233f',
'Sopf;': '\U0001d54a',
'sopf;': '\U0001d564',
'spades;': '\u2660',
'spadesuit;': '\u2660',
'spar;': '\u2225',
'sqcap;': '\u2293',
'sqcaps;': '\u2293\ufe00',
'sqcup;': '\u2294',
'sqcups;': '\u2294\ufe00',
'Sqrt;': '\u221a',
'sqsub;': '\u228f',
'sqsube;': '\u2291',
'sqsubset;': '\u228f',
'sqsubseteq;': '\u2291',
'sqsup;': '\u2290',
'sqsupe;': '\u2292',
'sqsupset;': '\u2290',
'sqsupseteq;': '\u2292',
'squ;': '\u25a1',
'Square;': '\u25a1',
'square;': '\u25a1',
'SquareIntersection;': '\u2293',
'SquareSubset;': '\u228f',
'SquareSubsetEqual;': '\u2291',
'SquareSuperset;': '\u2290',
'SquareSupersetEqual;': '\u2292',
'SquareUnion;': '\u2294',
'squarf;': '\u25aa',
'squf;': '\u25aa',
'srarr;': '\u2192',
'Sscr;': '\U0001d4ae',
'sscr;': '\U0001d4c8',
'ssetmn;': '\u2216',
'ssmile;': '\u2323',
'sstarf;': '\u22c6',
'Star;': '\u22c6',
'star;': '\u2606',
'starf;': '\u2605',
'straightepsilon;': '\u03f5',
'straightphi;': '\u03d5',
'strns;': '\xaf',
'Sub;': '\u22d0',
'sub;': '\u2282',
'subdot;': '\u2abd',
'subE;': '\u2ac5',
'sube;': '\u2286',
'subedot;': '\u2ac3',
'submult;': '\u2ac1',
'subnE;': '\u2acb',
'subne;': '\u228a',
'subplus;': '\u2abf',
'subrarr;': '\u2979',
'Subset;': '\u22d0',
'subset;': '\u2282',
'subseteq;': '\u2286',
'subseteqq;': '\u2ac5',
'SubsetEqual;': '\u2286',
'subsetneq;': '\u228a',
'subsetneqq;': '\u2acb',
'subsim;': '\u2ac7',
'subsub;': '\u2ad5',
'subsup;': '\u2ad3',
'succ;': '\u227b',
'succapprox;': '\u2ab8',
'succcurlyeq;': '\u227d',
'Succeeds;': '\u227b',
'SucceedsEqual;': '\u2ab0',
'SucceedsSlantEqual;': '\u227d',
'SucceedsTilde;': '\u227f',
'succeq;': '\u2ab0',
'succnapprox;': '\u2aba',
'succneqq;': '\u2ab6',
'succnsim;': '\u22e9',
'succsim;': '\u227f',
'SuchThat;': '\u220b',
'Sum;': '\u2211',
'sum;': '\u2211',
'sung;': '\u266a',
'sup1': '\xb9',
'sup1;': '\xb9',
'sup2': '\xb2',
'sup2;': '\xb2',
'sup3': '\xb3',
'sup3;': '\xb3',
'Sup;': '\u22d1',
'sup;': '\u2283',
'supdot;': '\u2abe',
'supdsub;': '\u2ad8',
'supE;': '\u2ac6',
'supe;': '\u2287',
'supedot;': '\u2ac4',
'Superset;': '\u2283',
'SupersetEqual;': '\u2287',
'suphsol;': '\u27c9',
'suphsub;': '\u2ad7',
'suplarr;': '\u297b',
'supmult;': '\u2ac2',
'supnE;': '\u2acc',
'supne;': '\u228b',
'supplus;': '\u2ac0',
'Supset;': '\u22d1',
'supset;': '\u2283',
'supseteq;': '\u2287',
'supseteqq;': '\u2ac6',
'supsetneq;': '\u228b',
'supsetneqq;': '\u2acc',
'supsim;': '\u2ac8',
'supsub;': '\u2ad4',
'supsup;': '\u2ad6',
'swarhk;': '\u2926',
'swArr;': '\u21d9',
'swarr;': '\u2199',
'swarrow;': '\u2199',
'swnwar;': '\u292a',
'szlig': '\xdf',
'szlig;': '\xdf',
'Tab;': '\t',
'target;': '\u2316',
'Tau;': '\u03a4',
'tau;': '\u03c4',
'tbrk;': '\u23b4',
'Tcaron;': '\u0164',
'tcaron;': '\u0165',
'Tcedil;': '\u0162',
'tcedil;': '\u0163',
'Tcy;': '\u0422',
'tcy;': '\u0442',
'tdot;': '\u20db',
'telrec;': '\u2315',
'Tfr;': '\U0001d517',
'tfr;': '\U0001d531',
'there4;': '\u2234',
'Therefore;': '\u2234',
'therefore;': '\u2234',
'Theta;': '\u0398',
'theta;': '\u03b8',
'thetasym;': '\u03d1',
'thetav;': '\u03d1',
'thickapprox;': '\u2248',
'thicksim;': '\u223c',
'ThickSpace;': '\u205f\u200a',
'thinsp;': '\u2009',
'ThinSpace;': '\u2009',
'thkap;': '\u2248',
'thksim;': '\u223c',
'THORN': '\xde',
'thorn': '\xfe',
'THORN;': '\xde',
'thorn;': '\xfe',
'Tilde;': '\u223c',
'tilde;': '\u02dc',
'TildeEqual;': '\u2243',
'TildeFullEqual;': '\u2245',
'TildeTilde;': '\u2248',
'times': '\xd7',
'times;': '\xd7',
'timesb;': '\u22a0',
'timesbar;': '\u2a31',
'timesd;': '\u2a30',
'tint;': '\u222d',
'toea;': '\u2928',
'top;': '\u22a4',
'topbot;': '\u2336',
'topcir;': '\u2af1',
'Topf;': '\U0001d54b',
'topf;': '\U0001d565',
'topfork;': '\u2ada',
'tosa;': '\u2929',
'tprime;': '\u2034',
'TRADE;': '\u2122',
'trade;': '\u2122',
'triangle;': '\u25b5',
'triangledown;': '\u25bf',
'triangleleft;': '\u25c3',
'trianglelefteq;': '\u22b4',
'triangleq;': '\u225c',
'triangleright;': '\u25b9',
'trianglerighteq;': '\u22b5',
'tridot;': '\u25ec',
'trie;': '\u225c',
'triminus;': '\u2a3a',
'TripleDot;': '\u20db',
'triplus;': '\u2a39',
'trisb;': '\u29cd',
'tritime;': '\u2a3b',
'trpezium;': '\u23e2',
'Tscr;': '\U0001d4af',
'tscr;': '\U0001d4c9',
'TScy;': '\u0426',
'tscy;': '\u0446',
'TSHcy;': '\u040b',
'tshcy;': '\u045b',
'Tstrok;': '\u0166',
'tstrok;': '\u0167',
'twixt;': '\u226c',
'twoheadleftarrow;': '\u219e',
'twoheadrightarrow;': '\u21a0',
'Uacute': '\xda',
'uacute': '\xfa',
'Uacute;': '\xda',
'uacute;': '\xfa',
'Uarr;': '\u219f',
'uArr;': '\u21d1',
'uarr;': '\u2191',
'Uarrocir;': '\u2949',
'Ubrcy;': '\u040e',
'ubrcy;': '\u045e',
'Ubreve;': '\u016c',
'ubreve;': '\u016d',
'Ucirc': '\xdb',
'ucirc': '\xfb',
'Ucirc;': '\xdb',
'ucirc;': '\xfb',
'Ucy;': '\u0423',
'ucy;': '\u0443',
'udarr;': '\u21c5',
'Udblac;': '\u0170',
'udblac;': '\u0171',
'udhar;': '\u296e',
'ufisht;': '\u297e',
'Ufr;': '\U0001d518',
'ufr;': '\U0001d532',
'Ugrave': '\xd9',
'ugrave': '\xf9',
'Ugrave;': '\xd9',
'ugrave;': '\xf9',
'uHar;': '\u2963',
'uharl;': '\u21bf',
'uharr;': '\u21be',
'uhblk;': '\u2580',
'ulcorn;': '\u231c',
'ulcorner;': '\u231c',
'ulcrop;': '\u230f',
'ultri;': '\u25f8',
'Umacr;': '\u016a',
'umacr;': '\u016b',
'uml': '\xa8',
'uml;': '\xa8',
'UnderBar;': '_',
'UnderBrace;': '\u23df',
'UnderBracket;': '\u23b5',
'UnderParenthesis;': '\u23dd',
'Union;': '\u22c3',
'UnionPlus;': '\u228e',
'Uogon;': '\u0172',
'uogon;': '\u0173',
'Uopf;': '\U0001d54c',
'uopf;': '\U0001d566',
'UpArrow;': '\u2191',
'Uparrow;': '\u21d1',
'uparrow;': '\u2191',
'UpArrowBar;': '\u2912',
'UpArrowDownArrow;': '\u21c5',
'UpDownArrow;': '\u2195',
'Updownarrow;': '\u21d5',
'updownarrow;': '\u2195',
'UpEquilibrium;': '\u296e',
'upharpoonleft;': '\u21bf',
'upharpoonright;': '\u21be',
'uplus;': '\u228e',
'UpperLeftArrow;': '\u2196',
'UpperRightArrow;': '\u2197',
'Upsi;': '\u03d2',
'upsi;': '\u03c5',
'upsih;': '\u03d2',
'Upsilon;': '\u03a5',
'upsilon;': '\u03c5',
'UpTee;': '\u22a5',
'UpTeeArrow;': '\u21a5',
'upuparrows;': '\u21c8',
'urcorn;': '\u231d',
'urcorner;': '\u231d',
'urcrop;': '\u230e',
'Uring;': '\u016e',
'uring;': '\u016f',
'urtri;': '\u25f9',
'Uscr;': '\U0001d4b0',
'uscr;': '\U0001d4ca',
'utdot;': '\u22f0',
'Utilde;': '\u0168',
'utilde;': '\u0169',
'utri;': '\u25b5',
'utrif;': '\u25b4',
'uuarr;': '\u21c8',
'Uuml': '\xdc',
'uuml': '\xfc',
'Uuml;': '\xdc',
'uuml;': '\xfc',
'uwangle;': '\u29a7',
'vangrt;': '\u299c',
'varepsilon;': '\u03f5',
'varkappa;': '\u03f0',
'varnothing;': '\u2205',
'varphi;': '\u03d5',
'varpi;': '\u03d6',
'varpropto;': '\u221d',
'vArr;': '\u21d5',
'varr;': '\u2195',
'varrho;': '\u03f1',
'varsigma;': '\u03c2',
'varsubsetneq;': '\u228a\ufe00',
'varsubsetneqq;': '\u2acb\ufe00',
'varsupsetneq;': '\u228b\ufe00',
'varsupsetneqq;': '\u2acc\ufe00',
'vartheta;': '\u03d1',
'vartriangleleft;': '\u22b2',
'vartriangleright;': '\u22b3',
'Vbar;': '\u2aeb',
'vBar;': '\u2ae8',
'vBarv;': '\u2ae9',
'Vcy;': '\u0412',
'vcy;': '\u0432',
'VDash;': '\u22ab',
'Vdash;': '\u22a9',
'vDash;': '\u22a8',
'vdash;': '\u22a2',
'Vdashl;': '\u2ae6',
'Vee;': '\u22c1',
'vee;': '\u2228',
'veebar;': '\u22bb',
'veeeq;': '\u225a',
'vellip;': '\u22ee',
'Verbar;': '\u2016',
'verbar;': '|',
'Vert;': '\u2016',
'vert;': '|',
'VerticalBar;': '\u2223',
'VerticalLine;': '|',
'VerticalSeparator;': '\u2758',
'VerticalTilde;': '\u2240',
'VeryThinSpace;': '\u200a',
'Vfr;': '\U0001d519',
'vfr;': '\U0001d533',
'vltri;': '\u22b2',
'vnsub;': '\u2282\u20d2',
'vnsup;': '\u2283\u20d2',
'Vopf;': '\U0001d54d',
'vopf;': '\U0001d567',
'vprop;': '\u221d',
'vrtri;': '\u22b3',
'Vscr;': '\U0001d4b1',
'vscr;': '\U0001d4cb',
'vsubnE;': '\u2acb\ufe00',
'vsubne;': '\u228a\ufe00',
'vsupnE;': '\u2acc\ufe00',
'vsupne;': '\u228b\ufe00',
'Vvdash;': '\u22aa',
'vzigzag;': '\u299a',
'Wcirc;': '\u0174',
'wcirc;': '\u0175',
'wedbar;': '\u2a5f',
'Wedge;': '\u22c0',
'wedge;': '\u2227',
'wedgeq;': '\u2259',
'weierp;': '\u2118',
'Wfr;': '\U0001d51a',
'wfr;': '\U0001d534',
'Wopf;': '\U0001d54e',
'wopf;': '\U0001d568',
'wp;': '\u2118',
'wr;': '\u2240',
'wreath;': '\u2240',
'Wscr;': '\U0001d4b2',
'wscr;': '\U0001d4cc',
'xcap;': '\u22c2',
'xcirc;': '\u25ef',
'xcup;': '\u22c3',
'xdtri;': '\u25bd',
'Xfr;': '\U0001d51b',
'xfr;': '\U0001d535',
'xhArr;': '\u27fa',
'xharr;': '\u27f7',
'Xi;': '\u039e',
'xi;': '\u03be',
'xlArr;': '\u27f8',
'xlarr;': '\u27f5',
'xmap;': '\u27fc',
'xnis;': '\u22fb',
'xodot;': '\u2a00',
'Xopf;': '\U0001d54f',
'xopf;': '\U0001d569',
'xoplus;': '\u2a01',
'xotime;': '\u2a02',
'xrArr;': '\u27f9',
'xrarr;': '\u27f6',
'Xscr;': '\U0001d4b3',
'xscr;': '\U0001d4cd',
'xsqcup;': '\u2a06',
'xuplus;': '\u2a04',
'xutri;': '\u25b3',
'xvee;': '\u22c1',
'xwedge;': '\u22c0',
'Yacute': '\xdd',
'yacute': '\xfd',
'Yacute;': '\xdd',
'yacute;': '\xfd',
'YAcy;': '\u042f',
'yacy;': '\u044f',
'Ycirc;': '\u0176',
'ycirc;': '\u0177',
'Ycy;': '\u042b',
'ycy;': '\u044b',
'yen': '\xa5',
'yen;': '\xa5',
'Yfr;': '\U0001d51c',
'yfr;': '\U0001d536',
'YIcy;': '\u0407',
'yicy;': '\u0457',
'Yopf;': '\U0001d550',
'yopf;': '\U0001d56a',
'Yscr;': '\U0001d4b4',
'yscr;': '\U0001d4ce',
'YUcy;': '\u042e',
'yucy;': '\u044e',
'yuml': '\xff',
'Yuml;': '\u0178',
'yuml;': '\xff',
'Zacute;': '\u0179',
'zacute;': '\u017a',
'Zcaron;': '\u017d',
'zcaron;': '\u017e',
'Zcy;': '\u0417',
'zcy;': '\u0437',
'Zdot;': '\u017b',
'zdot;': '\u017c',
'zeetrf;': '\u2128',
'ZeroWidthSpace;': '\u200b',
'Zeta;': '\u0396',
'zeta;': '\u03b6',
'Zfr;': '\u2128',
'zfr;': '\U0001d537',
'ZHcy;': '\u0416',
'zhcy;': '\u0436',
'zigrarr;': '\u21dd',
'Zopf;': '\u2124',
'zopf;': '\U0001d56b',
'Zscr;': '\U0001d4b5',
'zscr;': '\U0001d4cf',
'zwj;': '\u200d',
'zwnj;': '\u200c',
}
| neno1978/pelisalacarta | python/main-classic/core/entities.py | Python | gpl-3.0 | 57,381 | [
"Bowtie"
] | 96c63fa11c1b93400146d69d92e20ff5d5c7607e5cd6b8a8ce152f0667fc9966 |
#!/usr/local/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Analyze YANK output file.
"""
#=============================================================================================
# MODULE IMPORTS
#=============================================================================================
from yank import utils, analyze
#=============================================================================================
# COMMAND DISPATCH
#=============================================================================================
def dispatch(args):
utils.config_root_logger(args['--verbose'])
if args['extract-trajectory']:
return dispatch_extract_trajectory(args)
analyze.analyze(args['--store'])
return True
def dispatch_extract_trajectory(args):
# Paths
output_path = args['--trajectory']
nc_path = args['--netcdf']
# Get keyword arguments to pass to extract_trajectory()
kwargs = {}
if args['--state']:
kwargs['state_index'] = int(args['--state'])
else:
kwargs['replica_index'] = int(args['--replica'])
if args['--start']:
kwargs['start_frame'] = int(args['--start'])
if args['--skip']:
kwargs['skip_frame'] = int(args['--skip'])
if args['--end']:
kwargs['end_frame'] = int(args['--end'])
if args['--nosolvent']:
kwargs['keep_solvent'] = False
if args['--discardequil']:
kwargs['discard_equilibration'] = True
# Extract trajectory
analyze.extract_trajectory(output_path, nc_path, **kwargs)
return True
| jchodera/yank | Yank/commands/analyze.py | Python | lgpl-3.0 | 1,754 | [
"NetCDF"
] | 47016022d1ce5598337215c5ceecbd24586ccf2af43f70a3ae2d9db63b849cc5 |
""" core implementation of testing process: init, session, runtest loop. """
from __future__ import absolute_import, division, print_function
import functools
import os
import sys
import _pytest
import _pytest._code
import py
try:
from collections import MutableMapping as MappingMixin
except ImportError:
from UserDict import DictMixin as MappingMixin
from _pytest.config import directory_arg, UsageError, hookimpl
from _pytest.runner import collect_one_node
from _pytest.outcomes import exit
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
EXIT_NOTESTSCOLLECTED = 5
def pytest_addoption(parser):
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv'])
parser.addini("testpaths", "directories to search for tests when no files or directories are given in the "
"command line.",
type="args", default=[])
# parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
# )
group = parser.getgroup("general", "running and selection options")
group._addoption('-x', '--exitfirst', action="store_const",
dest="maxfail", const=1,
help="exit instantly on first error or failed test."),
group._addoption('--maxfail', metavar="num",
action="store", type=int, dest="maxfail", default=0,
help="exit after first num failures or errors.")
group._addoption('--strict', action="store_true",
help="marks not registered in configuration file raise errors.")
group._addoption("-c", metavar="file", type=str, dest="inifilename",
help="load configuration from `file` instead of trying to locate one of the implicit "
"configuration files.")
group._addoption("--continue-on-collection-errors", action="store_true",
default=False, dest="continue_on_collection_errors",
help="Force test execution even if collection errors occur.")
group = parser.getgroup("collect", "collection")
group.addoption('--collectonly', '--collect-only', action="store_true",
help="only collect tests, don't execute them."),
group.addoption('--pyargs', action="store_true",
help="try to interpret all arguments as python packages.")
group.addoption("--ignore", action="append", metavar="path",
help="ignore path during collection (multi-allowed).")
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
# needs upgrading as well
group.addoption('--confcutdir', dest="confcutdir", default=None,
metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"),
help="only load conftest.py's relative to specified dir.")
group.addoption('--noconftest', action="store_true",
dest="noconftest", default=False,
help="Don't load any conftest.py files.")
group.addoption('--keepduplicates', '--keep-duplicates', action="store_true",
dest="keepduplicates", default=False,
help="Keep duplicate tests.")
group.addoption('--collect-in-virtualenv', action='store_true',
dest='collect_in_virtualenv', default=False,
help="Don't ignore tests in a local virtualenv directory")
group = parser.getgroup("debugconfig",
"test session debugging and configuration")
group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
help="base temporary directory for this test run.")
def pytest_namespace():
"""keeping this one works around a deeper startup issue in pytest
i tried to find it for a while but the amount of time turned unsustainable,
so i put a hack in to revisit later
"""
return {}
def pytest_configure(config):
__import__('pytest').config = config # compatibiltiy
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config._do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
session.exitstatus = doit(config, session) or 0
except UsageError:
raise
except KeyboardInterrupt:
excinfo = _pytest._code.ExceptionInfo()
if initstate < 2 and isinstance(excinfo.value, exit.Exception):
sys.stderr.write('{0}: {1}\n'.format(
excinfo.typename, excinfo.value.msg))
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = EXIT_INTERRUPTED
except:
excinfo = _pytest._code.ExceptionInfo()
config.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
finally:
excinfo = None # Explicitly break reference cycle.
session.startdir.chdir()
if initstate >= 2:
config.hook.pytest_sessionfinish(
session=session,
exitstatus=session.exitstatus)
config._ensure_unconfigure()
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
if session.testsfailed:
return EXIT_TESTSFAILED
elif session.testscollected == 0:
return EXIT_NOTESTSCOLLECTED
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if (session.testsfailed and
not session.config.option.continue_on_collection_errors):
raise session.Interrupted(
"%d errors during collection" % session.testsfailed)
if session.config.option.collectonly:
return True
for i, item in enumerate(session.items):
nextitem = session.items[i + 1] if i + 1 < len(session.items) else None
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def _in_venv(path):
"""Attempts to detect if ``path`` is the root of a Virtual Environment by
checking for the existence of the appropriate activate script"""
bindir = path.join('Scripts' if sys.platform.startswith('win') else 'bin')
if not bindir.exists():
return False
activates = ('activate', 'activate.csh', 'activate.fish',
'Activate', 'Activate.bat', 'Activate.ps1')
return any([fname.basename in activates for fname in bindir.listdir()])
def pytest_ignore_collect(path, config):
ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath())
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
if py.path.local(path) in ignore_paths:
return True
allow_in_venv = config.getoption("collect_in_virtualenv")
if _in_venv(path) and not allow_in_venv:
return True
# Skip duplicate paths.
keepduplicates = config.getoption("keepduplicates")
duplicate_paths = config.pluginmanager._duplicatepaths
if not keepduplicates:
if path in duplicate_paths:
return True
else:
duplicate_paths.add(path)
return False
class FSHookProxy:
def __init__(self, fspath, pm, remove_mods):
self.fspath = fspath
self.pm = pm
self.remove_mods = remove_mods
def __getattr__(self, name):
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x
return x
class _CompatProperty(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, owner):
if obj is None:
return self
# TODO: reenable in the features branch
# warnings.warn(
# "usage of {owner!r}.{name} is deprecated, please use pytest.{name} instead".format(
# name=self.name, owner=type(owner).__name__),
# PendingDeprecationWarning, stacklevel=2)
return getattr(__import__('pytest'), self.name)
class NodeKeywords(MappingMixin):
def __init__(self, node):
self.node = node
self.parent = node.parent
self._markers = {node.name: True}
def __getitem__(self, key):
try:
return self._markers[key]
except KeyError:
if self.parent is None:
raise
return self.parent.keywords[key]
def __setitem__(self, key, value):
self._markers[key] = value
def __delitem__(self, key):
raise ValueError("cannot delete key in keywords dict")
def __iter__(self):
seen = set(self._markers)
if self.parent is not None:
seen.update(self.parent.keywords)
return iter(seen)
def __len__(self):
return len(self.__iter__())
def keys(self):
return list(self)
def __repr__(self):
return "<NodeKeywords for node %s>" % (self.node, )
class Node(object):
""" base class for Collector and Item the test collection tree.
Collector subclasses have children, Items are terminal nodes."""
def __init__(self, name, parent=None, config=None, session=None):
#: a unique name within the scope of the parent node
self.name = name
#: the parent collector node.
self.parent = parent
#: the pytest config object
self.config = config or parent.config
#: the session this node is part of
self.session = session or parent.session
#: filesystem path where this node was collected from (can be None)
self.fspath = getattr(parent, 'fspath', None)
#: keywords/markers collected from all scopes
self.keywords = NodeKeywords(self)
#: allow adding of extra keywords to use for matching
self.extra_keyword_matches = set()
# used for storing artificial fixturedefs for direct parametrization
self._name2pseudofixturedef = {}
@property
def ihook(self):
""" fspath sensitive hook proxy used to call pytest hooks"""
return self.session.gethookproxy(self.fspath)
Module = _CompatProperty("Module")
Class = _CompatProperty("Class")
Instance = _CompatProperty("Instance")
Function = _CompatProperty("Function")
File = _CompatProperty("File")
Item = _CompatProperty("Item")
def _getcustomclass(self, name):
maybe_compatprop = getattr(type(self), name)
if isinstance(maybe_compatprop, _CompatProperty):
return getattr(__import__('pytest'), name)
else:
cls = getattr(self, name)
# TODO: reenable in the features branch
# warnings.warn("use of node.%s is deprecated, "
# "use pytest_pycollect_makeitem(...) to create custom "
# "collection nodes" % name, category=DeprecationWarning)
return cls
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__,
getattr(self, 'name', None))
def warn(self, code, message):
""" generate a warning with the given code and message for this
item. """
assert isinstance(code, str)
fslocation = getattr(self, "location", None)
if fslocation is None:
fslocation = getattr(self, "fspath", None)
self.ihook.pytest_logwarning.call_historic(kwargs=dict(
code=code, message=message,
nodeid=self.nodeid, fslocation=fslocation))
# methods for ordering nodes
@property
def nodeid(self):
""" a ::-separated string denoting its collection tree address. """
try:
return self._nodeid
except AttributeError:
self._nodeid = x = self._makeid()
return x
def _makeid(self):
return self.parent.nodeid + "::" + self.name
def __hash__(self):
return hash(self.nodeid)
def setup(self):
pass
def teardown(self):
pass
def _memoizedcall(self, attrname, function):
exattrname = "_ex_" + attrname
failure = getattr(self, exattrname, None)
if failure is not None:
py.builtin._reraise(failure[0], failure[1], failure[2])
if hasattr(self, attrname):
return getattr(self, attrname)
try:
res = function()
except py.builtin._sysex:
raise
except:
failure = sys.exc_info()
setattr(self, exattrname, failure)
raise
setattr(self, attrname, res)
return res
def listchain(self):
""" return list of all parent collectors up to self,
starting from root of collection tree. """
chain = []
item = self
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def add_marker(self, marker):
""" dynamically add a marker object to the node.
``marker`` can be a string or pytest.mark.* instance.
"""
from _pytest.mark import MarkDecorator, MARK_GEN
if isinstance(marker, py.builtin._basestring):
marker = getattr(MARK_GEN, marker)
elif not isinstance(marker, MarkDecorator):
raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker.name] = marker
def get_marker(self, name):
""" get a marker object from this node or None if
the node doesn't have a marker with that name. """
val = self.keywords.get(name, None)
if val is not None:
from _pytest.mark import MarkInfo, MarkDecorator
if isinstance(val, (MarkDecorator, MarkInfo)):
return val
def listextrakeywords(self):
""" Return a set of all extra keywords in self and any parents."""
extra_keywords = set()
item = self
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
def listnames(self):
return [x.name for x in self.listchain()]
def addfinalizer(self, fin):
""" register a function to be called when this node is finalized.
This method can only be called when this node is active
in a setup chain, for example during self.setup().
"""
self.session._setupstate.addfinalizer(fin, self)
def getparent(self, cls):
""" get the next parent node (including ourself)
which is an instance of the given class"""
current = self
while current and not isinstance(current, cls):
current = current.parent
return current
def _prunetraceback(self, excinfo):
pass
def _repr_failure_py(self, excinfo, style=None):
fm = self.session._fixturemanager
if excinfo.errisinstance(fm.FixtureLookupError):
return excinfo.value.formatrepr()
tbfilter = True
if self.config.option.fulltrace:
style = "long"
else:
tb = _pytest._code.Traceback([excinfo.traceback[-1]])
self._prunetraceback(excinfo)
if len(excinfo.traceback) == 0:
excinfo.traceback = tb
tbfilter = False # prunetraceback already does it
if style == "auto":
style = "long"
# XXX should excinfo.getrepr record all data and toterminal() process it?
if style is None:
if self.config.option.tbstyle == "short":
style = "short"
else:
style = "long"
try:
os.getcwd()
abspath = False
except OSError:
abspath = True
return excinfo.getrepr(funcargs=True, abspath=abspath,
showlocals=self.config.option.showlocals,
style=style, tbfilter=tbfilter)
repr_failure = _repr_failure_py
class Collector(Node):
""" Collector instances create children through collect()
and thus iteratively build a tree.
"""
class CollectError(Exception):
""" an error during collection, contains a custom message. """
def collect(self):
""" returns a list of children (items and collectors)
for this collection node.
"""
raise NotImplementedError("abstract")
def repr_failure(self, excinfo):
""" represent a collection failure. """
if excinfo.errisinstance(self.CollectError):
exc = excinfo.value
return str(exc.args[0])
return self._repr_failure_py(excinfo, style="short")
def _prunetraceback(self, excinfo):
if hasattr(self, 'fspath'):
traceback = excinfo.traceback
ntraceback = traceback.cut(path=self.fspath)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
excinfo.traceback = ntraceback.filter()
class FSCollector(Collector):
def __init__(self, fspath, parent=None, config=None, session=None):
fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
name = fspath.basename
if parent is not None:
rel = fspath.relto(parent.fspath)
if rel:
name = rel
name = name.replace(os.sep, "/")
super(FSCollector, self).__init__(name, parent, config, session)
self.fspath = fspath
def _makeid(self):
relpath = self.fspath.relto(self.config.rootdir)
if os.sep != "/":
relpath = relpath.replace(os.sep, "/")
return relpath
class File(FSCollector):
""" base class for collecting tests from a file. """
class Item(Node):
""" a basic test invocation item. Note that for a single function
there might be multiple test invocation items.
"""
nextitem = None
def __init__(self, name, parent=None, config=None, session=None):
super(Item, self).__init__(name, parent, config, session)
self._report_sections = []
def add_report_section(self, when, key, content):
"""
Adds a new report section, similar to what's done internally to add stdout and
stderr captured output::
item.add_report_section("call", "stdout", "report section contents")
:param str when:
One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``.
:param str key:
Name of the section, can be customized at will. Pytest uses ``"stdout"`` and
``"stderr"`` internally.
:param str content:
The full contents as a string.
"""
if content:
self._report_sections.append((when, key, content))
def reportinfo(self):
return self.fspath, None, ""
@property
def location(self):
try:
return self._location
except AttributeError:
location = self.reportinfo()
# bestrelpath is a quite slow function
cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
try:
fspath = cache[location[0]]
except KeyError:
fspath = self.session.fspath.bestrelpath(location[0])
cache[location[0]] = fspath
location = (fspath, location[1], str(location[2]))
self._location = location
return location
class NoMatch(Exception):
""" raised if matching cannot locate a matching names. """
class Interrupted(KeyboardInterrupt):
""" signals an interrupted test run. """
__module__ = 'builtins' # for py3
class Session(FSCollector):
Interrupted = Interrupted
def __init__(self, config):
FSCollector.__init__(self, config.rootdir, parent=None,
config=config, session=self)
self.testsfailed = 0
self.testscollected = 0
self.shouldstop = False
self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs")
self.startdir = py.path.local()
self.config.pluginmanager.register(self, name="session")
def _makeid(self):
return ""
@hookimpl(tryfirst=True)
def pytest_collectstart(self):
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
@hookimpl(tryfirst=True)
def pytest_runtest_logreport(self, report):
if report.failed and not hasattr(report, 'wasxfail'):
self.testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self.testsfailed >= maxfail:
self.shouldstop = "stopping after %d failures" % (
self.testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path):
return path in self._initialpaths
def gethookproxy(self, fspath):
# check if we have the common case of running
# hooks with all conftest.py filesall conftest.py
pm = self.config.pluginmanager
my_conftestmodules = pm._getconftestmodules(fspath)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
# one or more conftests are not in use at this fspath
proxy = FSHookProxy(fspath, pm, remove_mods)
else:
# all plugis are active for this fspath
proxy = self.config.hook
return proxy
def perform_collect(self, args=None, genitems=True):
hook = self.config.hook
try:
items = self._perform_collect(args, genitems)
self.config.pluginmanager.check_pending()
hook.pytest_collection_modifyitems(session=self,
config=self.config, items=items)
finally:
hook.pytest_collection_finish(session=self)
self.testscollected = len(items)
return items
def _perform_collect(self, args, genitems):
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = []
self._initialpaths = set()
self._initialparts = []
self.items = items = []
for arg in args:
parts = self._parsearg(arg)
self._initialparts.append(parts)
self._initialpaths.add(parts[0])
rep = collect_one_node(self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
errors = []
for arg, exc in self._notfound:
line = "(no name %r in any of %r)" % (arg, exc.args[0])
errors.append("not found: %s\n%s" % (arg, line))
# XXX: test this
raise UsageError(*errors)
if not genitems:
return rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
return items
def collect(self):
for parts in self._initialparts:
arg = "::".join(map(str, parts))
self.trace("processing argument", arg)
self.trace.root.indent += 1
try:
for x in self._collect(arg):
yield x
except NoMatch:
# we are inside a make_report hook so
# we cannot directly pass through the exception
self._notfound.append((arg, sys.exc_info()[1]))
self.trace.root.indent -= 1
def _collect(self, arg):
names = self._parsearg(arg)
path = names.pop(0)
if path.check(dir=1):
assert not names, "invalid arg %r" % (arg,)
for path in path.visit(fil=lambda x: x.check(file=1),
rec=self._recurse, bf=True, sort=True):
for x in self._collectfile(path):
yield x
else:
assert path.check(file=1)
for x in self.matchnodes(self._collectfile(path), names):
yield x
def _collectfile(self, path):
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, path):
ihook = self.gethookproxy(path.dirpath())
if ihook.pytest_ignore_collect(path=path, config=self.config):
return
for pat in self._norecursepatterns:
if path.check(fnmatch=pat):
return False
ihook = self.gethookproxy(path)
ihook.pytest_collect_directory(path=path, parent=self)
return True
def _tryconvertpyarg(self, x):
"""Convert a dotted module name to path.
"""
import pkgutil
try:
loader = pkgutil.find_loader(x)
except ImportError:
return x
if loader is None:
return x
# This method is sometimes invoked when AssertionRewritingHook, which
# does not define a get_filename method, is already in place:
try:
path = loader.get_filename(x)
except AttributeError:
# Retrieve path from AssertionRewritingHook:
path = loader.modules[x][0].co_filename
if loader.is_package(x):
path = os.path.dirname(path)
return path
def _parsearg(self, arg):
""" return (fspath, names) tuple after checking the file exists. """
parts = str(arg).split("::")
if self.config.option.pyargs:
parts[0] = self._tryconvertpyarg(parts[0])
relpath = parts[0].replace("/", os.sep)
path = self.config.invocation_dir.join(relpath, abs=True)
if not path.check():
if self.config.option.pyargs:
raise UsageError(
"file or package not found: " + arg +
" (missing __init__.py?)")
else:
raise UsageError("file not found: " + arg)
parts[0] = path
return parts
def matchnodes(self, matching, names):
self.trace("matchnodes", matching, names)
self.trace.root.indent += 1
nodes = self._matchnodes(matching, names)
num = len(nodes)
self.trace("matchnodes finished -> ", num, "nodes")
self.trace.root.indent -= 1
if num == 0:
raise NoMatch(matching, names[:1])
return nodes
def _matchnodes(self, matching, names):
if not matching or not names:
return matching
name = names[0]
assert name
nextnames = names[1:]
resultnodes = []
for node in matching:
if isinstance(node, Item):
if not names:
resultnodes.append(node)
continue
assert isinstance(node, Collector)
rep = collect_one_node(node)
if rep.passed:
has_matched = False
for x in rep.result:
# TODO: remove parametrized workaround once collection structure contains parametrization
if x.name == name or x.name.split("[")[0] == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
# XXX accept IDs that don't have "()" for class instances
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
else:
# report collection failures here to avoid failing to run some test
# specified in the command line because the module could not be
# imported (#134)
node.ihook.pytest_collectreport(report=rep)
return resultnodes
def genitems(self, node):
self.trace("genitems", node)
if isinstance(node, Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, Collector)
rep = collect_one_node(node)
if rep.passed:
for subnode in rep.result:
for x in self.genitems(subnode):
yield x
node.ihook.pytest_collectreport(report=rep)
| hoehnp/navit_test | lib/python2.7/site-packages/_pytest/main.py | Python | gpl-2.0 | 29,707 | [
"VisIt"
] | 805fc5b74c317f9c536097074fe55b469f5df77084275aebabd2700dee454b70 |
"""
@name: PyHouse/src/Modules/Computer/Web/_test/test_web_utils.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com>
@copyright: (c) 2014-2017 by D. Brian Kimmel
@license: MIT License
@note: Created on Jun 29, 2013
@Summary: Test web utilities module
Passed all 7 tests - DBK - 2017-01-12
"""
__updated__ = '2017-01-19'
# Import system type stuff
import xml.etree.ElementTree as ET
from twisted.trial import unittest
import jsonpickle
# Import PyMh files and modules.
from test.xml_data import XML_LONG
from test.testing_mixin import SetupPyHouseObj
from Modules.Computer.Web import web_utils
from Modules.Housing.rooms import Xml as roomsXML
from Modules.Core.Utilities import json_tools
from Modules.Housing.test.xml_housing import \
TESTING_HOUSE_NAME, \
TESTING_HOUSE_KEY, \
TESTING_HOUSE_ACTIVE
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
PY_DATA = [ { 'a123': u'A', 'b': (2, 4), 'c': 3.0 }, 'def D E F' ]
JS_DATA = '{' + '}'
class SetupMixin(object):
def setUp(self, p_root):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj(p_root)
self.m_xml = SetupPyHouseObj().BuildXml(p_root)
def jsonPair(self, p_json, p_key):
""" Extract key, value from json
"""
l_json = json_tools.decode_json_unicode(p_json)
try:
l_val = l_json[p_key]
except (KeyError, ValueError) as e_err:
l_val = 'ERRor on JsonPair for key "{}" {} {}'.format(p_key, e_err, l_json)
print(l_val)
return l_val
class Attribs(object):
def_attr = 'Hello World!'
class A0(unittest.TestCase):
def setUp(self):
pass
def test_00_Print(self):
print('Id: test_web_utils')
class C1_Rooms(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
self.m_api = roomsXML()
def test_01_Room(self):
l_rooms = self.m_api.read_rooms_xml(self.m_pyhouse_obj)
l_json = unicode(json_tools.encode_json(l_rooms))
# print(PrettyFormatAny.form(l_json, 'C1-01-A - Decode'))
# self.assertEqual(self.jsonPair(l_json, 0), l_rooms)
def test_02_Rooms(self):
l_rooms = self.m_api.read_rooms_xml(self.m_pyhouse_obj)
l_json = unicode(json_tools.encode_json(l_rooms))
# print(PrettyFormatAny.form(l_json, 'C1-02-A - Decode'))
class C2_House(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_Before(self):
l_house = self.m_pyhouse_obj.House
# print(PrettyFormatAny.form(l_house, 'C2-01-A - House'))
l_house2 = {}
class D1_Json(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_HouseInfo(self):
l_json = web_utils.GetJSONHouseInfo(self.m_pyhouse_obj)
l_obj = jsonpickle.decode(l_json)
# print(PrettyFormatAny.form(l_obj, 'D1-01-A - House'))
self.assertEquals(l_obj['Name'], TESTING_HOUSE_NAME)
self.assertEquals(l_obj['Key'], TESTING_HOUSE_KEY)
self.assertEquals(l_obj['Active'], TESTING_HOUSE_ACTIVE)
self.assertEquals(l_obj['Controllers'], {})
def test_02_ComputerInfo(self):
l_json = web_utils.GetJSONComputerInfo(self.m_pyhouse_obj)
_l_obj = jsonpickle.decode(l_json)
class E1_Json(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_RoomInfo(self):
l_json = web_utils.GetJSONHouseInfo(self.m_pyhouse_obj)
l_obj = jsonpickle.decode(l_json)
# print(PrettyFormatAny.form(l_obj, 'E1-01-A - Decode'))
# ## END DBK
| DBrianKimmel/PyHouse | Project/src/Modules/Computer/Web/test/test_web_utils.py | Python | mit | 3,750 | [
"Brian"
] | b251bfd49949009a1fc4d6d414c165bd2f4029cc265cf9d2165c013628e58226 |
"""
Recode homopolymer regions, adding/subtracting bases if the difference is
within some threshold
"""
import argparse
from contextlib import closing
import itertools
import logging
import operator
import pysam
from .. import gff3, pwalign, rle, samutil
from ..util import isolate_region
log = logging.getLogger(__name__.rpartition('.')[-1])
BAM_CMATCH = 0
class MissingReferenceSequenceError(ValueError):
pass
def parse_gff3(fp):
"""
Read mutations from a GFF3 file
"""
records = gff3.parse(fp)
records = (record for record in records
if record.type == 'possible_base_call_error')
return records
def rle_bases(rle_encoded):
return ''.join(i.c for i in rle_encoded)
def correct_region(aligned_pairs, max_diff=1):
aligned_pairs = list(aligned_pairs)
# Check for fully-aligning sequence
if all(i.qpos is not None and i.rpos is not None for i in aligned_pairs):
return aligned_pairs
ref_bases = ''.join(i.rbase for i in aligned_pairs if i.rbase)
read_bases = ''.join(i.qbase for i in aligned_pairs if i.qbase)
if not read_bases:
return aligned_pairs
ref_rle = rle.encode(ref_bases)
ref_rle_bases = rle_bases(ref_rle)
read_rle = rle.encode(read_bases)
read_rle_bases = rle_bases(read_rle)
aref, aqry, _ = pwalign.pw_global(ref_rle_bases, read_rle_bases)
if '-' in aref or '-' in aqry:
# No perfect alignment after RLE
return aligned_pairs
# Maximum number of hp changes to make
max_hp = max(abs(r.length - q.length) for q, r in zip(read_rle, ref_rle))
if max_hp == 0 or max_hp > max_diff:
# Too much to correct / nothing to correct
return aligned_pairs
# Okay, actually fix
ref_idx = itertools.count(next(i.rpos for i in aligned_pairs if i.rpos is not None)).next
qry_idx = itertools.count(next(i.qpos for i in aligned_pairs if i.qpos is not None)).next
qqual = {i.qpos: i.qual for i in aligned_pairs if i.qpos is not None}
result = [samutil.AlignedPair(qpos=qry_idx(),
rpos=ref_idx(),
qbase=q.c,
rbase=r.c,
qual=chr(33+5),
cigar_op=BAM_CMATCH)
for q, r in zip(read_rle, ref_rle)
for i in xrange(r.length)]
result = [i._replace(qual=qqual.get(i.qpos, chr(33+1)))
if i.qpos is not None else i
for i in result]
return result
def hp_correct(read, reference, regions, max_diff=1):
all_pairs = list(samutil.all_pairs_iter(read, reference))
ap = [(i.qpos, i.rpos) for i in all_pairs]
in_region = frozenset(i for (s, e) in regions
for i in isolate_region(ap, s, e))
grouped = itertools.groupby(all_pairs, lambda x: (x.qpos, x.rpos) in in_region)
result = [i for g, v in grouped
for i in (correct_region(v, max_diff=max_diff) if g else v)]
read.seq = ''.join(i.qbase for i in result if i.qbase is not None)
read.qual = ''.join(i.qual for i in result if i.qbase is not None)
read.cigar = [(op, sum(True for i in v))
for op, v in itertools.groupby(i.cigar_op for i in result)]
return read
def main():
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('-m', '--max-diff', type=int, default=1, help="""Maximum
difference between observed HP length and expected HP length
to correct""")
p.add_argument('reference', type=pysam.Fastafile)
p.add_argument('gff3_file', type=argparse.FileType('r'))
p.add_argument('input_bam', type=pysam.Samfile)
p.add_argument('output_bam')
a = p.parse_args()
with a.gff3_file as fp:
k = operator.attrgetter('seqid')
regions = {s: list(g)
for s, g in itertools.groupby(sorted(parse_gff3(fp), key=k), k)}
with closing(a.input_bam), closing(a.reference), \
closing(pysam.Samfile(a.output_bam, 'wb', template=a.input_bam)) as out_bam:
available_references = {i: (r, a.reference.fetch(r)) for i, r in enumerate(a.input_bam.references)}
for read in a.input_bam:
ref_name, ref_bases = available_references[read.tid]
if not ref_bases:
raise MissingReferenceSequenceError(ref_name)
reg = [(region.start0, region.end) for region in regions[ref_name]]
read = hp_correct(read, ref_bases, reg, max_diff=a.max_diff)
out_bam.write(read)
| fhcrc/prepdrm | python/prep_drm/scripts/recode_homopolymer_regions.py | Python | gpl-3.0 | 4,593 | [
"pysam"
] | 702a2b59f8dad6fd055decf045a84e15c227e52b8526b2d5370b6f6df5fbd438 |
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2012-2015 Christian Schwarz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# required external modules
import unittest
import os
# required modules from pycast
from pycast.common.profileme import profileMe
class ProfileMeDecoratorTest(unittest.TestCase):
"""Test class containing all tests for the @profileMe decorator."""
def setUp(self):
"""Initializes the environment for each test."""
self.statfiles = ["statfile1", "statfile2"]
def tearDown(self):
"""This function gets called after each test function."""
for statfile in self.statfiles:
if os.path.isfile(statfile):
os.remove(statfile)
def profile_data_creation_test(self):
"""Testing successfull profile data creation."""
statfile = self.statfiles[0]
@profileMe(statfile)
def dummy_func():
"""This is an (nearly) empty dummy function that nees to be profiled.
The functions evaluates, if the formula for the gaussian sum is correct.
"""
sumUpTo = 1000
summedVals = sum(xrange(sumUpTo + 1))
easySum = (sumUpTo * (sumUpTo + 1)) / 2
return easySum == summedVals
booleanVal = dummy_func()
if not (booleanVal):
raise AssertionError
if not (os.path.isfile(statfile)):
raise AssertionError
def profile_function_name_test(self):
"""Test the validity of __name__ for any decorated function."""
statfile = self.statfiles[0]
@profileMe(statfile)
def dummy_func():
"""This is an (nearly) empty dummy function that nees to be profiled.
The functions evaluates, if the formula for the gaussian sum is correct.
"""
sumUpTo = 1000
summedVals = sum(xrange(sumUpTo + 1))
easySum = (sumUpTo * (sumUpTo + 1)) / 2
return easySum == summedVals
booleanVal = dummy_func()
if not (booleanVal):
raise AssertionError
if not (dummy_func.__name__ == "dummy_func"):
raise AssertionError
def profile_doc_string_test(self):
"""Test the validity of __doc__ for any decorated function."""
statfile = self.statfiles[0]
@profileMe(statfile)
def dummy_func():
"""StupidDocString"""
sumUpTo = 1000
summedVals = sum(xrange(sumUpTo + 1))
easySum = (sumUpTo * (sumUpTo + 1)) / 2
return easySum == summedVals
booleanVal = dummy_func()
if not (booleanVal):
raise AssertionError
if not (dummy_func.__doc__ == """StupidDocString"""):
raise AssertionError
| T-002/pycast | pycast/tests/profilemetest.py | Python | mit | 3,823 | [
"Gaussian"
] | f5ed12abaeeaefa3410be6213b0d115ec7f38062c7b2387caf4fb77cf4abdc98 |
import json
from base64 import b64decode
from urllib.parse import urlencode, quote_plus
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from .objects import *
from .util import parse_bugzilla_datetime
class BugzillaException(Exception):
def __init__(self, code, *args, **kw):
Exception.__init__(self, *args, **kw)
self.error_code = code
def __str__(self):
return "Error code %i: %s" % (self.error_code, Exception.__str__(self))
def get_error_code(self):
return self.error_code
class Bugzilla:
def __init__(self, url, api_key = None):
if not url.endswith("/"):
raise ValueError("Url has to end with /")
if not url.endswith("rest/"):
url += "rest/"
self.url = url
self.api_key = api_key
self.charset = "utf-8"
def get_api_key(self):
return self.api_key
def set_api_key(self, key):
self.api_key = key
# a little helper function to encode url-parameters
def _quote(self, string):
return quote_plus(string)
def _get(self, path, **kw):
return self._read_request("GET", path, None, **kw)
def _post(self, path, data, **kw):
return self._read_request("POST", path, data, **kw)
def _put(self, path, data, **kw):
return self._read_request("PUT", path, data, **kw)
def _delete(self, path, data, **kw):
return self._read_request("DELETE", path, data, **kw)
def _read_request(self, method, path, post_data, **kw):
if self.api_key:
kw["api_key"] = self.api_key
# sequences supplied for the in/exclude_fields will automatically be comma-joined
if not isinstance(kw.get("include_fields", ""), str):
kw["include_fields"] = ",".join(kw["include_fields"])
if not isinstance(kw.get("exclude_fields", ""), str):
kw["exclude_fields"] = ",".join(kw["exclude_fields"])
if post_data is not None: post_data = json.dumps(post_data).encode("utf-8")
query = urlencode(kw, True)
if query: query = "?" + query
url = self.url + path + query
try:
request = Request(url, post_data)
request.get_method = lambda: method
if post_data is not None:
request.add_header("Content-type", "application/json")
data = urlopen(request).read()
obj = json.loads(data.decode(self.charset))
except HTTPError as e:
# some api-errors set the http-status, so here we might still get
# a valid json-object that will result in a bugzilla-error
data = e.fp.read()
try:
obj = json.loads(data.decode(self.charset))
except ValueError:
# no valid api-response, maybe a http-500 or something else
raise e
if isinstance(obj, dict) and obj.get("error"):
raise BugzillaException(obj["code"], obj["message"])
return obj
def _map(self, dct, key, func):
if key in dct:
if isinstance(dct[key], list):
dct[key] = [func(obj) for obj in dct[key]]
else:
dct[key] = func(dct[key])
def _get_attachment(self, data):
self._map(data, "creation_time", parse_bugzilla_datetime)
self._map(data, "last_change_time", parse_bugzilla_datetime)
self._map(data, "data", b64decode)
self._map(data, "is_private", bool)
self._map(data, "is_obsolete", bool)
self._map(data, "is_patch", bool)
self._map(data, "flags", self._get_attachment_flag)
if "size" in data: del data["size"]
return Attachment(data)
def _get_attachment_flag(self, data):
self._map(data, "creation_date", parse_bugzilla_datetime)
self._map(data, "modification_date", parse_bugzilla_datetime)
return AttachmentFlag(data)
def _get_bug(self, data):
self._map(data, "creation_time", parse_bugzilla_datetime)
self._map(data, "flags", self._get_attachment_flag)
self._map(data, "is_cc_accessible", bool)
self._map(data, "is_confirmed", bool)
self._map(data, "is_open", bool)
self._map(data, "is_creator_accessible", bool)
self._map(data, "last_change_time", parse_bugzilla_datetime)
return Bug(data)
def _get_history(self, data):
self._map(data, "when", parse_bugzilla_datetime)
self._map(data, "changes", Change)
return History(data)
def _get_product(self, data):
self._map(data, "components", self._get_component)
self._map(data, "versions", self._get_version)
self._map(data, "milestones", self._get_milestone)
return Product(data)
def _get_component(self, data):
if "flag_types" in data:
self._map(data["flag_types"], "bug", self._get_flag_type)
self._map(data["flag_types"], "attachment", self._get_flag_type)
return Component(data)
def _get_flag_type(self, data):
return FlagType(data)
def _get_version(self, data):
return Version(data)
def _get_milestone(self, data):
return Milestone(data)
def _get_classification(self, data):
return Classification(data)
def _get_update_result(self, data):
self._map(data, "last_change_time", parse_bugzilla_datetime)
return UpdateResult(data)
def _get_comment(self, data):
self._map(data, "time", parse_bugzilla_datetime)
self._map(data, "creation_time", parse_bugzilla_datetime)
return Comment(data)
def _get_field(self, data):
self._map(data, "values", BugFieldValue)
return BugField(data)
def _get_user(self, data):
self._map(data, "groups", self._get_group)
self._map(data, "saved_searches", Search)
self._map(data, "saved_reports", Search)
return User(data)
def _get_group(self, data):
self._map(data, "membership", self._get_user)
return Group(data)
def get_version(self):
"""
Gets the bugzilla-version, usually in the format X.X or X.X.X
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/bugzilla.html#version
"""
return self._get("version")["version"]
def get_extensions(self):
"""
Gets all the installed extensions. Returns a dict in which the keys describe
the extension name, the values are also a dict. The value has one key, "version",
containing the extension-version.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/bugzilla.html#extensions
"""
return self._get("extensions")["extensions"]
def get_time(self):
"""
Returns the local times for the bugzilla web- and the database-server. The return
value is a dict, in which the key "db_time" refers to the database-time and the
key "web_time" refers to the webserver-time. Also, older versions of bugzilla
might return more fields. For that refer to the documentation-link provided.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/bugzilla.html#time
"""
data = self._get("time")
self._map(data, "db_time", parse_bugzilla_datetime)
self._map(data, "web_time", parse_bugzilla_datetime)
self._map(data, "web_time_utc", parse_bugzilla_datetime)
return data
def get_parameters(self, **kw):
"""
Returns a dict containing the configuration-parameters of the bugzilla-instance.
If no api-key is specified only a few parameters will be returned. For a
complete list of parameters see the link.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/bugzilla.html#parameters
"""
return self._get("parameters", **kw)["parameters"]
def get_last_audit_time(self, class_ = None):
"""
Returns the last audit time for a given class. The class can be "Bugzilla::Component"
or something similar. Appearently, if no class is given, "Bugzilla::Product" will be
assumed.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/bugzilla.html#last-audit-time
"""
kw = {}
if class_ is not None: kw["class"] = class_
return parse_bugzilla_datetime(self._get("last_audit_time", **kw)["last_audit_time"])
def get_attachment(self, attachment_id, **kw):
"""
Returns the attachment with the given id. The id has to be an int.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/attachment.html#get-attachment
"""
return self._get_attachment(self._get("bug/attachment/%i" % attachment_id, **kw)["attachments"][str(attachment_id)])
def get_attachments_by_bug(self, bug, **kw):
"""
Returns the attachment for a given bug. The parameter bug can be a bug-object,
a bug-id or a bug-alias.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/attachment.html#get-attachment
"""
bug_id = str(bug.id if isinstance(bug, Bug) else bug)
return [self._get_attachment(data) for data in self._get("bug/%s/attachment" % self._quote(bug_id), **kw)["bugs"][bug_id]]
def get_bug(self, bug_id, **kw):
"""
Returns the bug for the given id. The parameter bug_id can be a numeric id or
a bug alias.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/bug.html#get-bug
"""
bug_id = str(bug_id)
return self._get_bug(self._get("bug/" + self._quote(bug_id), **kw)["bugs"][0])
def search_bugs(self, **kw):
"""
Search bugzilla for bugs. Several keyword-parameters can be passed to specify your search.
Because of my lazyness you have to click the link below.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/bug.html#search-bugs
"""
return [self._get_bug(data) for data in self._get("bug", **kw)["bugs"]]
def get_bug_history(self, bug_id, **kw):
"""
Return the history for a specific bug. The bug_id can be a numeric id or a bug-alias.
The optional keyword-parameter new_since can be passed to only get the history after
a specific date (must be an encoded date, NO DATETIME)
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/bug.html#bug-history
"""
bug_id = str(bug_id)
return [self._get_history(history) for history in self._get("bug/%s/history" % self._quote(bug_id), **kw)["bugs"][0]["history"]]
def get_selectable_product_ids(self):
"""
Return a list of selectable product's ids.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/product.html#list-products
"""
return sorted(map(int, self._get("product_selectable")["ids"]))
def get_accessible_product_ids(self):
"""
Returns a list of accessible product's ids
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/product.html#list-products
"""
return sorted(map(int, self._get("product_accessible")["ids"]))
def get_enterable_product_ids(self):
"""
Returns a list of enterable product's ids
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/product.html#list-products
"""
return sorted(map(int, self._get("product_enterable")["ids"]))
def get_product(self, product_id = None, **kw):
"""
Get products by id or search paramters. The product_id can be a product-id or a
product-name. Optional keyword-parameters are:
ids: A list of ids to get the products for
names: A list of product-names to get the corresponding products
type: A product type, can be "accessible", "selectable" or "enterable".
It can be a list containing multible of these values.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/product.html#get-product
"""
path = "product"
if product_id is not None: path += "/" + self._quote(str(product_id))
return self._get_product(self._get(path, **kw)["products"][0])
def get_classification(self, c_id, **kw):
"""
Get a classification by its numeric id or name. The parameter c_id can be both.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/classification.html#get-classification
"""
c_id = str(c_id)
return [self._get_classification(obj) for obj in self._get("classification/" + self._quote(c_id), **kw)["classifications"]]
def get_comments_by_bug(self, bug_id, **kw):
"""
Get the list of comments for a given bug. The parameter bug_id can be a bug-object, a bug-id
or a bug-alias. The optional parameter new_since can be passed to filter for comments
after this datetime. The parameter has to be an encoded datetime.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/comment.html#get-comments
"""
bug_id = str(bug.id if isinstance(bug, Bug) else bug)
return [self._get_comment(obj) for obj in self._get("bug/%s/comment" % self._quote(bug_id), **kw)["bugs"][bug_id]["comments"]]
def get_comment(self, c_id, **kw):
"""
Gets the comment with the given id. The id has to be an int. This method has two
optional keyword-parameters:
comment_ids: A list of comment_ids to get additional comments
ids: A list of bug-ids to get additional comments for
new_since: Filter and only return comments after a specific datetime. The value for this
parameter has to be an encoded datetime.
Note that comment_ids overwrites new_since, comments with ids from comment_ids will be
returned even if they are older than new_since.
If only c_id is given a comment is returned, otherwise a list of comments.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/comment.html#get-comments
"""
data = self._get("bug/comment/%i" % c_id, **kw)
comments = [self._get_comment(data["comments"][key]) for key in data["comments"]]
for bug_id in data["bugs"]:
comments.extend(self._get_comment(obj) for obj in data["bugs"][bug_id]["comments"])
if not kw:
return comments[0] # only the id was given
else:
return comments
def search_comment_tags(self, query, **kw):
"""
Search for tags which contain the given substring. The keyword-parameter limit
specifies the maximum number of results. It defaults to 10.
A list of strings is returned.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/comment.html#search-comment-tags
"""
return self._get("bug/comment/tags/" + self._quote(query), **kw)
def get_last_visited(self, bug_ids = None, **kw):
"""
Get the last-visited timestamp for one or multiple bugs. The parameter bug_ids
can be a bug-id, a list of bug-ids or not set if you want the last 20 visited bugs.
The return value is a list of dicts, each containing the keys "id" (the bug id)
and "last_visit_ts" (the last-visit-timestamp)
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/bug-user-last-visit.html#get-last-visited
"""
if bug_ids is None or isinstance(bug_ids, int):
url = "bug_user_last_visit" + ("" if bug_ids is None else "/" + str(bug_ids))
else:
url = "bug_user_last_visit/%i" % bug_ids[0]
kw["ids"] = bug_ids[1:]
data = self._get(url, **kw)
for obj in data: self._map(data, "last_visit_ts", parse_bugzilla_datetime)
return data
def get_fields(self, id_or_name = None, **kw):
"""
Get all fields or a specific one. To specify a field, pass its id or name
as parameter.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/field.html#fields
"""
path = "field/bug"
if id_or_name is not None: path += "/" + self._quote(str(id_or_name))
return [self._get_field(field) for field in self._get(path, **kw)["fields"]]
def get_user(self, user_id = None, **kw):
"""
Get one user by id or name. The parameter user_id can be the user name or
its id.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/user.html#get-user
"""
user_id = str(user_id)
return self._get_user(self._get("user/" + self._quote(user_id), **kw)["users"][0])
def get_flag_types(self, product, component = None, **kw):
"""
Get flag-types for a product and optionally a products component.
As for now, both parameters have to be strings, the products and components
name respectively.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/flagtype.html#get-flag-type
"""
path = "flag_types/" + self._quote(product)
if component is not None: path += "/" + self._quote(component)
data = self._get(path, **kw)
if "bug" in data: data["bug"] = [self._get_flag_type(obj) for obj in data["bug"]]
if "attachment" in data: data["attachment"] = [self._get_flag_type(obj) for obj in data["attachment"]]
return data
def get_attachment_flag_types(self, product, component = None, **kw):
"""
Get flag-types as in get_flag_types, but limit the return value to the
attachment-flag-types. All parameters are the same as in get_flag_types.
"""
return self.get_flag_types(product, component, **kw)["attachment"]
def get_bug_flag_types(self, product, component = None, **kw):
"""
Get flag-types as in get_flag_types, but limit the return value to the
bug-flag-types. All parameters are the same as in get_flag_types.
"""
return self.get_flag_types(product, component, **kw)["bug"]
def search_users(self, **kw):
"""
Search for one or more users by one or more search-parameters. The result
will be a list of users.
Valid keyword-parameters are:
ids: A list of user-ids. You have to be logged in to use this.
names: A list of user-names.
match: A list of strings. Bugzilla will search for users whose login-name or
real-name contains one of these strings. You have to be logged in to use that.
limit: A limit of users matched by the match-parameter. Be vary that bugzilla
itself has its own limit and will use it if your limit is higher.
group_ids: A list of group-ids that users can be in.
groups: Same as group_ids.
include_disabled: include disabled users, even if the do not match the match-parameter
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/user.html#get-user
"""
return [self._get_user(data) for data in self._get("user", **kw)["users"]]
def whoami(self, **kw):
"""
Returns the user you are currently logged in. Therefore you have to set
the api-key since this library does not support other methods of authentication.
https://bugzilla.readthedocs.io/en/latest/api/core/v1/user.html#who-am-i
"""
return User(self._get("whoami", **kw))
def get_group(self, group_id, **kw):
"""
Get the group specified by the given id. The parameter group_id can be a numeric id or
a group-name. Since this method currently uses a workaround every group-id passed to
this method has to be an int. Passing "42" will search for the name "42", not the id.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/group.html#get-group
"""
# TODO: find the solution
# for some reason, bugzilla 5.0 does not find the resource, so this workaround has to do
# this was the original code:
# group_id = str(group_id)
# return [self._get_group(data) for data in self._get("group/" + self._quote(group_id), **kw)["groups"]][0]
if isinstance(group_id, str):
return self.search_groups(names = [group_id], **kw)[0]
else:
return self.search_groups(ids = [group_id], **kw)[0]
def search_groups(self, **kw):
"""
Search for one or more groups by one or more search-parameters. The result will be
a list of groups.
Valid keyword-parameters are:
ids: A list of group-ids
names: A list of group-names
membership: If set to 1, then a list of members is returned for each group
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/group.html#get-group
"""
return [self._get_group(data) for data in self._get("group", **kw)["groups"]]
def update_last_visited(self, bug_ids, **kw):
"""
Update the last-visited time for the given bug-ids. You have to be logged in to use
this method. bug_ids can be a single bug_id or a list of ids. The return value is a
list of dicts, each having two keys. The key "id" contains the bug-id updated and
"last_visit_ts" contains the new last-visited timestamp.
https://bugzilla.readthedocs.io/en/latest/api/core/v1/bug-user-last-visit.html#update-last-visited
"""
if isinstance(bug_ids, int):
url = "bug_user_last_visit/" + str(bug_ids)
data = None
else:
url = "bug_user_last_visit"
data = {"ids": bug_ids}
data = self._post(url, data, **kw)
for obj in data: self._map(obj, "last_visit_ts", parse_bugzilla_datetime)
return data
def add_attachment(self, attachment, ids, comment = ""):
"""
Add the given attachment to one or more bug, given its id/ids. The attachment has to
have all required fields set to valid values. These fields are data, file_name, summary
and content_type. If these are not set bugzilla will not even send a request.
The ids-parameter has to be a bug-id or a list of those. Optionally, a comment can
be passed to add a comment along with the attachment.
If successful, an attachment is added for each given bug-id and a list of newly created
attachment-ids is returned.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/attachment.html#create-attachment
"""
if isinstance(ids, int): ids = [ids]
if not attachment.can_be_added():
raise BugzillaException(-1, "This attachment does not have the required fields set")
data = attachment.add_json()
data["ids"] = ids
data["comment"] = comment
return [int(i) for i in self._post("bug/%i/attachment" % ids[0], data)["ids"]]
def update_attachment(self, attachment, ids = None, comment = ""):
"""
Update one or more attachments. You can specify a list of attachment-ids whose respective
attachment shall be updated with this attachment's fields. If none are given, only the
given attachment will be updated. Optionally you can specify a comment to add to the
attachment(s).
The return-value will be a list of UpdateResult's, describing the changes for each attachment.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/attachment.html#update-attachment
"""
if ids is None: ids = attachment.id
if isinstance(ids, int): ids = [ids]
if not attachment.can_be_updated():
raise BugzillaException(-1, "This attachment does not have the required fields set")
data = attachment.update_json()
data["ids"] = ids
data["comment"] = comment
return [self._get_update_result(obj) for obj in self._put("bug/attachment/%i" % ids[0], data)["attachments"]]
def add_bug(self, bug, **kw):
'https://bugzilla.readthedocs.io/en/latest/api/core/v1/bug.html'
if not bug.can_be_added():
raise BugzillaException(-1, "This bug does not have the required fields set")
data = bug.add_json()
data.update(kw)
return int(self._post("bug", data)["id"])
# since there are so many array fields that are updated via an add/remove/set-object,
# this method has 3 additional parameters.
# specifying add = {"keywords": ["key", "word"]} will result in {"keywords": {"add": ["key", "word"]}}
# in the final update-object. These parameters will be removed once I find a better way.
# Note: these 3 parameters overwrite keyword-parameters
# TODO: find a better way.
def update_bug(self, bug, ids = None, add = {}, remove = {}, set_ = {}, **kw):
'https://bugzilla.readthedocs.io/en/latest/api/core/v1/bug.html#update-bug'
if ids is None: ids = bug.id
if isinstance(ids, int): ids = [ids]
# the use of add/remove and set at the same time is not permitted
if (set(add.keys()) | set(remove.keys())) & set(set_.keys()):
raise ValueError("You can not use the same keys in _set and add/remove")
asr = {} # no, you find a better variable name
for key in (set(add.keys()) | set(remove.keys())): asr[key] = {}
for key in add: asr[key]["add"] = add[key]
for key in remove: asr[key]["remove"] = remove[key]
for key in set_: asr[key] = {"set": set_[key]}
if not bug.can_be_updated():
raise BugzillaException(-1, "This bug does not have the required fields set")
data = bug.update_json()
data["ids"] = ids
data.update(kw)
data.update(asr)
return [self._get_update_result(obj) for obj in self._put("bug/%i" % ids[0], data)["bugs"]]
def add_comment(self, comment, bug_id, **kw):
"""
Add a comment to a bug. The comment's text-field has to be set and mustn't
consist of whitespace only. You can also pass the work_time-keyword-parameter
to add that many "hours worked" on the bug. That parameter must be a float.
On success the newly created comment's id is returned.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/comment.html#create-comments
"""
if not comment.can_be_added():
raise BugzillaException(-1, "This comment does not have the required fields set")
bug_id = str(bug_id)
data = comment.add_json()
data.update(kw)
return int(self._post("bug/%s/comment" % self._quote(bug_id), data)["id"])
def update_comment_tags(self, comment_id, add = [], remove = []):
"""
Update a comments tags by passing the comments id and a list of tags to add or to
remove. Also, try to add and remove the same tag at the same time, I'm curious.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/comment.html#update-comment-tags
"""
comment_id = str(comment_id)
data = {"comment_id": comment_id, "add": add, "remove": remove}
return self._put("bug/comment/%s/tags" % comment_id, data)
def add_component(self, component, product, **kw):
"""
Add a component to a product. The component must have the required fields set, which
are name, description and default_assignee. The product must be a product-object or
a string. This method has to optional keyword-parameters, default_cc, a list of login-
names, and is_open, 0 or 1. On success, the id of the newly created component is returned.
"""
if not component.can_be_added():
raise BugzillaException(-1, "This component does not have the required fields set")
if not isinstance(product, str):
product = product.name
data = component.add_json()
data["product"] = product
data.update(kw)
return int(self._post("component", data)["id"])
# both of the methods are not tested yet because i was too lazy to install the latest version
def update_component(self, component, product = None, **kw):
'https://bugzilla.readthedocs.io/en/latest/api/core/v1/component.html#update-component'
data = component.update_json()
if product is None:
path = str(component.id)
data["ids"] = component.id
else:
path = self._quote(product) + "/" + self._quote(component.name)
data["names"] = [{"product": product, "component": component.name}]
data.update(kw)
return self._put("component/" + path, data)["components"]
def delete_component(self, component_id, product = None):
"""
Delete a component. The component is either specified by its id or by its and its product
name. Therefore this method can be called with an int or two strings. The return-value
is the id of the deleted component.
https://bugzilla.readthedocs.io/en/latest/api/core/v1/component.html#delete-component
"""
if product is None:
path = str(component_id)
else:
path = self._quote(product) + "/" + self._quote(str(component_id))
return self._delete("component/" + path, None)["components"][0]["id"]
def add_group(self, group, **kw):
"""
Add a group to bugzilla. That group must have its fields name and description set.
Also, the keyword-parameter icon_url can be passed, specifying an URL pointing to an
icon that will be used for this group. The return-value will be the newly created
group-id.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/group.html#create-group
"""
if not group.can_be_added():
raise BugzillaException(-1, "This group does not have the required fields set")
data = group.add_json()
data.update(kw)
return int(self._post("group", data)["id"])
def update_group(self, group, ids = None, **kw):
"""
Update one or several groups. By default, only the given group will be updated. If
you pass a list of group-ids in the ids-parameter then these groups will be updated.
Also, this method accepts the same keyword-parameters as add_group.
The return value will be a list of UpdateResult's containing all changes.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/group.html#update-group
"""
if ids is None: ids = group.id
if isinstance(ids, int): ids = [ids]
if not group.can_be_updated():
raise BugzillaException(-1, "This group does not have the required fields set")
data = group.update_json()
data.update(kw)
data["ids"] = ids
return [UpdateResult(data) for data in self._put("group/%i" % ids[0], data)["groups"]]
def add_user(self, user, **kw):
"""
Add a user to bugzilla. The user must have its email-field and its full_name set.
You also have to pass the password-keyword-parameter. See the note on the password
parameter for that.
The return-value will be the id of the newly created user.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/user.html#create-user
"""
if not user.can_be_added():
raise BugzillaException(-1, "This user does not have the required fields set")
data = user.add_json()
data.update(kw)
return int(self._post("user", data)["ids"])
# TODO: until now, the group-objects for this call have to be crafted yourself.
# find some way to make this more elegant
def update_user(self, user, ids = None, **kw):
"""
Update one or more users. By default only the given user will be updated. If you pass
a list of user-ids then the respective users will all be updated. You can also pass
a list of login-names in the names-keyword-parameter to update these users too. This
method has two more keyword-parameters, groups and bless_groups. For their description
and data-format see the documentation.
The return-value will be a list of UpdateResult's containing all the changes.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/user.html#update-user
"""
if ids is None: ids = user.id
if isinstance(ids, int): ids = [ids]
if not user.can_be_updated():
raise BugzillaException(-1, "This user does not have the required fields set")
data = user.update_json()
data["ids"] = ids
data.update(kw)
return [UpdateResult(data) for data in self._put("user/%i" % ids[0], data)["users"]]
def add_product(self, product, **kw):
"""
Add a product to bugzilla. The product must have the fields name, description and
version set. There are two optional keyword-parameters, is_open and create_series.
For their use, see the documentation.
The return-value will be the id of the newly created product.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/product.html#create-product
"""
if not product.can_be_added():
raise BugzillaException(-1, "This product does not have the required fields set")
data = product.add_json()
data.update(kw)
return int(self._post("product", data)["ids"])
def update_product(self, product, ids = None, **kw):
"""
Update one or more products. If ids is not given, only the given product will be updated.
Otherwise all products identified by their ids will be updated. This method has two
keyword-parameters, is_open and create_series. If specified, these fields will be updated
too.
The return-value will be a list of UpdateResult's describing the changes.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/product.html#create-product
"""
if ids is None: ids = product.id
if isinstance(ids, int): ids = [ids]
if not product.can_be_updated():
raise BugzillaException(-1, "This product does not have the required fields set")
data = product.update_json()
data["ids"] = ids
data.update(kw)
return [UpdateResult(data) for data in self._put("product/%i" % ids[0], data)["products"]]
def add_flag_type(self, flag_type, target_type, **kw):
"""
Add a flag-type to bugzilla. The flag-type must have the fields name and description set.
You have to set a target_type, specifying which object the flag-type will be for. Valid
values are "bug" and "attachment". This method also has two keyword-parameters,
inclusion and exclusion. For their description and usage, read the documentation.
The return value will be the id of the newly created flag-type.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/flagtype.html#create-flag-type
"""
if not flag_type.can_be_added():
raise BugzillaException(-1, "This FlagType does not have the required fields set")
data = flag_type.add_json()
data.update(kw)
data["target_type"] = target_type
return int(self._post("flag_type", data)["ids"])
def update_flag_type(self, flag_type, ids = None, **kw):
"""
Update one or several flag-type. By default only the given flag-type will be updated.
If you specify ids then the flag-types with these id's will be updated. This method
has two optional keyword-parameters, inclusions and exclusions. For their description
and data format, please see the documentation.
The return-value will be a list of UpdateResult's describing the changes.
https://bugzilla.readthedocs.io/en/5.0/api/core/v1/flagtype.html#update-flag-type
"""
if ids is None: ids = flag_type.id
if isinstance(ids, int): ids = [ids]
if not flag_type.can_be_updated():
raise BugzillaException(-1, "This product does not have the required fields set")
data = flag_type.update_json()
data["ids"] = ids
data.update(kw)
return [UpdateResult(data) for data in self._put("flag_type/%i" % ids[0], data)["flagtypes"]]
| DenSA-Inc/bugzilla-python | bugzilla/__init__.py | Python | gpl-3.0 | 36,709 | [
"VisIt"
] | ccf1415a5593d668ae016119af725ce139a0f2961fcbf41a249aa7a711ab03f6 |
from __future__ import division
from statsmodels.compat.python import iteritems, range, string_types, lmap, long
import numpy as np
from numpy import dot, identity
from numpy.linalg import inv, slogdet
from scipy.stats import norm
from statsmodels.regression.linear_model import OLS
from statsmodels.tsa.tsatools import (lagmat, add_trend,
_ar_transparams, _ar_invtransparams)
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.model as base
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly, cache_writable)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tsa.kalmanf.kalmanfilter import KalmanFilter
import statsmodels.base.wrapper as wrap
from statsmodels.tsa.vector_ar import util
from statsmodels.tsa.base.datetools import _index_date
__all__ = ['AR']
def sumofsq(x, axis=0):
"""Helper function to calculate sum of squares along first axis"""
return np.sum(x**2, axis=0)
def _check_ar_start(start, k_ar, method, dynamic):
if (method == 'cmle' or dynamic) and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE "
"or dynamic forecast. Got %d" % start)
def _validate(start, k_ar, dates, method):
"""
Checks the date and then returns an integer
"""
from datetime import datetime
if isinstance(start, (string_types, datetime)):
start_date = start
start = _index_date(start, dates)
if 'mle' not in method and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE or "
"dynamic forecast. Got %s" % start_date)
return start
def _ar_predict_out_of_sample(y, params, p, k_trend, steps, start=0):
mu = params[:k_trend] or 0 # only have to worry about constant
arparams = params[k_trend:][::-1] # reverse for dot
# dynamic endogenous variable
endog = np.zeros(p + steps) # this is one too big but doesn't matter
if start:
endog[:p] = y[start-p:start]
else:
endog[:p] = y[-p:]
forecast = np.zeros(steps)
for i in range(steps):
fcast = mu + np.dot(arparams, endog[i:i+p])
forecast[i] = fcast
endog[i + p] = fcast
return forecast
class AR(tsbase.TimeSeriesModel):
__doc__ = tsbase._tsa_doc % {"model" : "Autoregressive AR(p) model",
"params" : """endog : array-like
1-d endogenous response variable. The independent variable.""",
"extra_params" : base._missing_param_doc,
"extra_sections" : ""}
def __init__(self, endog, dates=None, freq=None, missing='none'):
super(AR, self).__init__(endog, None, dates, freq, missing=missing)
endog = self.endog # original might not have been an ndarray
if endog.ndim == 1:
endog = endog[:, None]
self.endog = endog # to get shapes right
elif endog.ndim > 1 and endog.shape[1] != 1:
raise ValueError("Only the univariate case is implemented")
def initialize(self):
pass
def _transparams(self, params):
"""
Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)
"""
p = self.k_ar
k = self.k_trend
newparams = params.copy()
newparams[k:k+p] = _ar_transparams(params[k:k+p].copy())
return newparams
def _invtransparams(self, start_params):
"""
Inverse of the Jones reparameterization
"""
p = self.k_ar
k = self.k_trend
newparams = start_params.copy()
newparams[k:k+p] = _ar_invtransparams(start_params[k:k+p].copy())
return newparams
def _presample_fit(self, params, start, p, end, y, predictedvalues):
"""
Return the pre-sample predicted values using the Kalman Filter
Notes
-----
See predict method for how to use start and p.
"""
k = self.k_trend
# build system matrices
T_mat = KalmanFilter.T(params, p, k, p)
R_mat = KalmanFilter.R(params, p, k, 0, p)
# Initial State mean and variance
alpha = np.zeros((p, 1))
Q_0 = dot(inv(identity(p**2)-np.kron(T_mat, T_mat)),
dot(R_mat, R_mat.T).ravel('F'))
Q_0 = Q_0.reshape(p, p, order='F') # TODO: order might need to be p+k
P = Q_0
Z_mat = KalmanFilter.Z(p)
for i in range(end): # iterate p-1 times to fit presample
v_mat = y[i] - dot(Z_mat, alpha)
F_mat = dot(dot(Z_mat, P), Z_mat.T)
Finv = 1./F_mat # inv. always scalar
K = dot(dot(dot(T_mat, P), Z_mat.T), Finv)
# update state
alpha = dot(T_mat, alpha) + dot(K, v_mat)
L = T_mat - dot(K, Z_mat)
P = dot(dot(T_mat, P), L.T) + dot(R_mat, R_mat.T)
#P[0,0] += 1 # for MA part, R_mat.R_mat.T above
if i >= start - 1: # only record if we ask for it
predictedvalues[i + 1 - start] = dot(Z_mat, alpha)
def _get_predict_start(self, start, dynamic):
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
if start is None:
if method == 'mle' and not dynamic:
start = 0
else: # can't do presample fit for cmle or dynamic
start = k_ar
elif isinstance(start, (int, long)):
start = super(AR, self)._get_predict_start(start)
else: # should be a date
start = _validate(start, k_ar, self.data.dates, method)
start = super(AR, self)._get_predict_start(start)
_check_ar_start(start, k_ar, method, dynamic)
self._set_predict_start_date(start)
return start
def predict(self, params, start=None, end=None, dynamic=False):
"""
Returns in-sample and out-of-sample prediction.
Parameters
----------
params : array
The fitted model parameters.
start : int, str, or datetime
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
end : int, str, or datetime
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
dynamic : bool
The `dynamic` keyword affects in-sample prediction. If dynamic
is False, then the in-sample lagged values are used for
prediction. If `dynamic` is True, then in-sample forecasts are
used in place of lagged dependent variables. The first forecasted
value is `start`.
Returns
-------
predicted values : array
Notes
-----
The linear Gaussian Kalman filter is used to return pre-sample fitted
values. The exact initial Kalman Filter is used. See Durbin and Koopman
in the references for more information.
"""
# will return an index of a date
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end)
if start - end > 1:
raise ValueError("end is before start")
k_ar = self.k_ar
k_trend = self.k_trend
method = self.method
endog = self.endog.squeeze()
if dynamic:
out_of_sample += end - start + 1
return _ar_predict_out_of_sample(endog, params, k_ar,
k_trend, out_of_sample, start)
predictedvalues = np.zeros(end + 1 - start)
# fit pre-sample
if method == 'mle': # use Kalman Filter to get initial values
if k_trend:
mu = params[0]/(1-np.sum(params[k_trend:]))
# modifies predictedvalues in place
if start < k_ar:
self._presample_fit(params, start, k_ar, min(k_ar-1, end),
endog[:k_ar] - mu, predictedvalues)
predictedvalues[:k_ar-start] += mu
if end < k_ar:
return predictedvalues
# just do the whole thing and truncate
fittedvalues = dot(self.X, params)
pv_start = max(k_ar - start, 0)
fv_start = max(start - k_ar, 0)
fv_end = min(len(fittedvalues), end-k_ar+1)
predictedvalues[pv_start:] = fittedvalues[fv_start:fv_end]
if out_of_sample:
forecastvalues = _ar_predict_out_of_sample(endog, params,
k_ar, k_trend,
out_of_sample)
predictedvalues = np.r_[predictedvalues, forecastvalues]
return predictedvalues
def _presample_varcov(self, params):
"""
Returns the inverse of the presample variance-covariance.
Notes
-----
See Hamilton p. 125
"""
k = self.k_trend
p = self.k_ar
p1 = p+1
# get inv(Vp) Hamilton 5.3.7
params0 = np.r_[-1, params[k:]]
Vpinv = np.zeros((p, p), dtype=params.dtype)
for i in range(1, p1):
Vpinv[i-1, i-1:] = np.correlate(params0, params0[:i],)[:-1]
Vpinv[i-1, i-1:] -= np.correlate(params0[-i:], params0,)[:-1]
Vpinv = Vpinv + Vpinv.T - np.diag(Vpinv.diagonal())
return Vpinv
def _loglike_css(self, params):
"""
Loglikelihood of AR(p) process using conditional sum of squares
"""
nobs = self.nobs
Y = self.Y
X = self.X
ssr = sumofsq(Y.squeeze() - np.dot(X, params))
sigma2 = ssr/nobs
return (-nobs/2 * (np.log(2 * np.pi) + np.log(sigma2)) -
ssr/(2 * sigma2))
def _loglike_mle(self, params):
"""
Loglikelihood of AR(p) process using exact maximum likelihood
"""
nobs = self.nobs
X = self.X
endog = self.endog
k_ar = self.k_ar
k_trend = self.k_trend
# reparameterize according to Jones (1980) like in ARMA/Kalman Filter
if self.transparams:
params = self._transparams(params)
# get mean and variance for pre-sample lags
yp = endog[:k_ar].copy()
if k_trend:
c = [params[0]] * k_ar
else:
c = [0]
mup = np.asarray(c / (1 - np.sum(params[k_trend:])))
diffp = yp - mup[:, None]
# get inv(Vp) Hamilton 5.3.7
Vpinv = self._presample_varcov(params)
diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
ssr = sumofsq(endog[k_ar:].squeeze() - np.dot(X, params))
# concentrating the likelihood means that sigma2 is given by
sigma2 = 1./nobs * (diffpVpinv + ssr)
self.sigma2 = sigma2
logdet = slogdet(Vpinv)[1] # TODO: add check for singularity
loglike = -1/2. * (nobs * (np.log(2 * np.pi) + np.log(sigma2)) -
logdet + diffpVpinv / sigma2 + ssr / sigma2)
return loglike
def loglike(self, params):
"""
The loglikelihood of an AR(p) process
Parameters
----------
params : array
The fitted parameters of the AR model
Returns
-------
llf : float
The loglikelihood evaluated at `params`
Notes
-----
Contains constant term. If the model is fit by OLS then this returns
the conditonal maximum likelihood.
.. math:: \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)+\\log\\left(\\sigma^{2}\\right)\\right)-\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}
If it is fit by MLE then the (exact) unconditional maximum likelihood
is returned.
.. math:: -\\frac{n}{2}log\\left(2\\pi\\right)-\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)+\\frac{1}{2}\\left|V_{p}^{-1}\\right|-\\frac{1}{2\\sigma^{2}}\\left(y_{p}-\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)-\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}
where
:math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the
mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)
variance-covariance matrix of the first `p` observations.
"""
#TODO: Math is on Hamilton ~pp 124-5
if self.method == "cmle":
return self._loglike_css(params)
else:
return self._loglike_mle(params)
def score(self, params):
"""
Return the gradient of the loglikelihood at params.
Parameters
----------
params : array-like
The parameter values at which to evaluate the score function.
Notes
-----
Returns numerical gradient.
"""
loglike = self.loglike
return approx_fprime(params, loglike, epsilon=1e-8)
def information(self, params):
"""
Not Implemented Yet
"""
return
def hessian(self, params):
"""
Returns numerical hessian for now.
"""
loglike = self.loglike
return approx_hess(params, loglike)
def _stackX(self, k_ar, trend):
"""
Private method to build the RHS matrix for estimation.
Columns are trend terms then lags.
"""
endog = self.endog
X = lagmat(endog, maxlag=k_ar, trim='both')
k_trend = util.get_trendorder(trend)
if k_trend:
X = add_trend(X, prepend=True, trend=trend)
self.k_trend = k_trend
return X
def select_order(self, maxlag, ic, trend='c', method='mle'):
"""
Select the lag order according to the information criterion.
Parameters
----------
maxlag : int
The highest lag length tried. See `AR.fit`.
ic : str {'aic','bic','hqic','t-stat'}
Criterion used for selecting the optimal lag length.
See `AR.fit`.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
Returns
-------
bestlag : int
Best lag according to IC.
"""
endog = self.endog
# make Y and X with same nobs to compare ICs
Y = endog[maxlag:]
self.Y = Y # attach to get correct fit stats
X = self._stackX(maxlag, trend) # sets k_trend
self.X = X
k = self.k_trend # k_trend set in _stackX
k = max(1, k) # handle if startlag is 0
results = {}
if ic != 't-stat':
for lag in range(k, maxlag+1):
# have to reinstantiate the model to keep comparable models
endog_tmp = endog[maxlag-lag:]
fit = AR(endog_tmp).fit(maxlag=lag, method=method,
full_output=0, trend=trend,
maxiter=100, disp=0)
results[lag] = eval('fit.'+ic)
bestic, bestlag = min((res, k) for k, res in iteritems(results))
else: # choose by last t-stat.
stop = 1.6448536269514722 # for t-stat, norm.ppf(.95)
for lag in range(maxlag, k - 1, -1):
# have to reinstantiate the model to keep comparable models
endog_tmp = endog[maxlag - lag:]
fit = AR(endog_tmp).fit(maxlag=lag, method=method,
full_output=0, trend=trend,
maxiter=35, disp=-1)
bestlag = 0
if np.abs(fit.tvalues[-1]) >= stop:
bestlag = lag
break
return bestlag
def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
transparams=True, start_params=None, solver='lbfgs', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
maxlag : int
If `ic` is None, then maxlag is the lag length used in fit. If
`ic` is specified then maxlag is the highest lag order used to
select the correct lag order. If maxlag is None, the default is
round(12*(nobs/100.)**(1/4.))
method : str {'cmle', 'mle'}, optional
cmle - Conditional maximum likelihood using OLS
mle - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
ic : str {'aic','bic','hic','t-stat'}
Criterion used for selecting the optimal lag length.
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
hqic - Hannan-Quinn Information Criterion
If any of the information criteria are selected, the lag length
which results in the lowest value is selected. If t-stat, the
model starts with maxlag and drops a lag until the highest lag
has a t-stat that is significant at the 95 % level.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
The below can be specified if method is 'mle'
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980).
start_params : array-like, optional
A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional
Solver to be used if method is 'mle'. The default is 'lbfgs'
(limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices
are 'bfgs', 'newton' (Newton-Raphson), 'nm' (Nelder-Mead),
'cg' - (conjugate gradient), 'ncg' (non-conjugate gradient),
and 'powell'.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is output.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
References
----------
Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to time
series with missing observations." `Technometrics`. 22.3.
389-95.
See also
--------
statsmodels.base.model.LikelihoodModel.fit
"""
method = method.lower()
if method not in ['cmle', 'yw', 'mle']:
raise ValueError("Method %s not recognized" % method)
self.method = method
self.trend = trend
self.transparams = transparams
nobs = len(self.endog) # overwritten if method is 'cmle'
endog = self.endog
if maxlag is None:
maxlag = int(round(12*(nobs/100.)**(1/4.)))
k_ar = maxlag # stays this if ic is None
# select lag length
if ic is not None:
ic = ic.lower()
if ic not in ['aic', 'bic', 'hqic', 't-stat']:
raise ValueError("ic option %s not understood" % ic)
k_ar = self.select_order(k_ar, ic, trend, method)
self.k_ar = k_ar # change to what was chosen by ic
# redo estimation for best lag
# make LHS
Y = endog[k_ar:, :]
# make lagged RHS
X = self._stackX(k_ar, trend) # sets self.k_trend
k_trend = self.k_trend
self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)
self.Y = Y
self.X = X
if method == "cmle": # do OLS
arfit = OLS(Y, X).fit()
params = arfit.params
self.nobs = nobs - k_ar
self.sigma2 = arfit.ssr/arfit.nobs # needed for predict fcasterr
elif method == "mle":
solver = solver.lower()
self.nobs = nobs
if start_params is None:
start_params = OLS(Y, X).fit().params
else:
if len(start_params) != k_trend + k_ar:
raise ValueError("Length of start params is %d. There"
" are %d parameters." %
(len(start_params), k_trend + k_ar))
start_params = self._invtransparams(start_params)
if solver == 'lbfgs':
kwargs.setdefault('pgtol', 1e-8)
kwargs.setdefault('factr', 1e2)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(AR, self).fit(start_params=start_params,
method=solver, maxiter=maxiter,
full_output=full_output, disp=disp,
callback=callback, **kwargs)
params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False # turn off now for other results
# don't use yw, because we can't estimate the constant
#elif method == "yw":
# params, omega = yule_walker(endog, order=maxlag,
# method="mle", demean=False)
# how to handle inference after Yule-Walker?
# self.params = params #TODO: don't attach here
# self.omega = omega
pinv_exog = np.linalg.pinv(X)
normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
arfit = ARResults(self, params, normalized_cov_params)
if method == 'mle' and full_output:
arfit.mle_retvals = mlefit.mle_retvals
arfit.mle_settings = mlefit.mle_settings
return ARResultsWrapper(arfit)
class ARResults(tsbase.TimeSeriesModelResults):
"""
Class to hold results from fitting an AR model.
Parameters
----------
model : AR Model instance
Reference to the model that is fit.
params : array
The fitted parameters from the AR Model.
normalized_cov_params : array
inv(dot(X.T,X)) where X is the lagged values.
scale : float, optional
An estimate of the scale of the model.
Returns
-------
**Attributes**
aic : float
Akaike Information Criterion using Lutkephol's definition.
:math:`log(sigma) + 2*(1 + k_ar + k_trend)/nobs`
bic : float
Bayes Information Criterion
:math:`\\log(\\sigma) + (1 + k_ar + k_trend)*\\log(nobs)/nobs`
bse : array
The standard errors of the estimated parameters. If `method` is 'cmle',
then the standard errors that are returned are the OLS standard errors
of the coefficients. If the `method` is 'mle' then they are computed
using the numerical Hessian.
fittedvalues : array
The in-sample predicted values of the fitted AR model. The `k_ar`
initial values are computed via the Kalman Filter if the model is
fit by `mle`.
fpe : float
Final prediction error using Lutkepohl's definition
((n_totobs+k_trend)/(n_totobs-k_ar-k_trend))*sigma
hqic : float
Hannan-Quinn Information Criterion.
k_ar : float
Lag length. Sometimes used as `p` in the docs.
k_trend : float
The number of trend terms included. 'nc'=0, 'c'=1.
llf : float
The loglikelihood of the model evaluated at `params`. See `AR.loglike`
model : AR model instance
A reference to the fitted AR model.
nobs : float
The number of available observations `nobs` - `k_ar`
n_totobs : float
The number of total observations in `endog`. Sometimes `n` in the docs.
params : array
The fitted parameters of the model.
pvalues : array
The p values associated with the standard errors.
resid : array
The residuals of the model. If the model is fit by 'mle' then the
pre-sample residuals are calculated using fittedvalues from the Kalman
Filter.
roots : array
The roots of the AR process are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0
Stability requires that the roots in modulus lie outside the unit
circle.
scale : float
Same as sigma2
sigma2 : float
The variance of the innovations (residuals).
trendorder : int
The polynomial order of the trend. 'nc' = None, 'c' or 't' = 0,
'ct' = 1, etc.
tvalues : array
The t-values associated with `params`.
"""
_cache = {} # for scale setter
def __init__(self, model, params, normalized_cov_params=None, scale=1.):
super(ARResults, self).__init__(model, params, normalized_cov_params,
scale)
self._cache = resettable_cache()
self.nobs = model.nobs
n_totobs = len(model.endog)
self.n_totobs = n_totobs
self.X = model.X # copy?
self.Y = model.Y
k_ar = model.k_ar
self.k_ar = k_ar
k_trend = model.k_trend
self.k_trend = k_trend
trendorder = None
if k_trend > 0:
trendorder = k_trend - 1
self.trendorder = trendorder
#TODO: cmle vs mle?
self.df_model = k_ar + k_trend
self.df_resid = self.model.df_resid = n_totobs - self.df_model
@cache_writable()
def sigma2(self):
model = self.model
if model.method == "cmle": # do DOF correction
return 1. / self.nobs * sumofsq(self.resid)
else:
return self.model.sigma2
@cache_writable() # for compatability with RegressionResults
def scale(self):
return self.sigma2
@cache_readonly
def bse(self): # allow user to specify?
if self.model.method == "cmle": # uses different scale/sigma def.
resid = self.resid
ssr = np.dot(resid, resid)
ols_scale = ssr / (self.nobs - self.k_ar - self.k_trend)
return np.sqrt(np.diag(self.cov_params(scale=ols_scale)))
else:
hess = approx_hess(self.params, self.model.loglike)
return np.sqrt(np.diag(-np.linalg.inv(hess)))
@cache_readonly
def pvalues(self):
return norm.sf(np.abs(self.tvalues))*2
@cache_readonly
def aic(self):
#JP: this is based on loglike with dropped constant terms ?
# Lutkepohl
#return np.log(self.sigma2) + 1./self.model.nobs * self.k_ar
# Include constant as estimated free parameter and double the loss
return np.log(self.sigma2) + 2 * (1 + self.df_model)/self.nobs
# Stata defintion
#nobs = self.nobs
#return -2 * self.llf/nobs + 2 * (self.k_ar+self.k_trend)/nobs
@cache_readonly
def hqic(self):
nobs = self.nobs
# Lutkepohl
# return np.log(self.sigma2)+ 2 * np.log(np.log(nobs))/nobs * self.k_ar
# R uses all estimated parameters rather than just lags
return (np.log(self.sigma2) + 2 * np.log(np.log(nobs))/nobs *
(1 + self.df_model))
# Stata
#nobs = self.nobs
#return -2 * self.llf/nobs + 2 * np.log(np.log(nobs))/nobs * \
# (self.k_ar + self.k_trend)
@cache_readonly
def fpe(self):
nobs = self.nobs
df_model = self.df_model
#Lutkepohl
return ((nobs+df_model)/(nobs-df_model))*self.sigma2
@cache_readonly
def bic(self):
nobs = self.nobs
# Lutkepohl
#return np.log(self.sigma2) + np.log(nobs)/nobs * self.k_ar
# Include constant as est. free parameter
return np.log(self.sigma2) + (1 + self.df_model) * np.log(nobs)/nobs
# Stata
# return -2 * self.llf/nobs + np.log(nobs)/nobs * (self.k_ar + \
# self.k_trend)
@cache_readonly
def resid(self):
#NOTE: uses fittedvalues because it calculate presample values for mle
model = self.model
endog = model.endog.squeeze()
if model.method == "cmle": # elimate pre-sample
return endog[self.k_ar:] - self.fittedvalues
else:
return model.endog.squeeze() - self.fittedvalues
#def ssr(self):
# resid = self.resid
# return np.dot(resid, resid)
@cache_readonly
def roots(self):
k = self.k_trend
return np.roots(np.r_[1, -self.params[k:]]) ** -1
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params)
def predict(self, start=None, end=None, dynamic=False):
params = self.params
predictedvalues = self.model.predict(params, start, end, dynamic)
return predictedvalues
#start = self.model._get_predict_start(start)
#end, out_of_sample = self.model._get_predict_end(end)
##TODO: return forecast errors and confidence intervals
#from statsmodels.tsa.arima_process import arma2ma
#ma_rep = arma2ma(np.r_[1,-params[::-1]], [1], out_of_sample)
#fcasterr = np.sqrt(self.sigma2 * np.cumsum(ma_rep**2))
preddoc = AR.predict.__doc__.split('\n')
extra_doc = (""" confint : bool, float
Whether to return confidence intervals. If `confint` == True,
95 % confidence intervals are returned. Else if `confint` is a
float, then it is assumed to be the alpha value of the confidence
interval. That is confint == .05 returns a 95% confidence
interval, and .10 would return a 90% confidence interval."""
).split('\n')
#ret_doc = """
# fcasterr : array-like
# confint : array-like
#"""
predict.__doc__ = '\n'.join(preddoc[:5] + preddoc[7:20] + extra_doc +
preddoc[20:])
class ARResultsWrapper(wrap.ResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(ARResultsWrapper, ARResults)
if __name__ == "__main__":
import statsmodels.api as sm
sunspots = sm.datasets.sunspots.load()
# Why does R demean the data by defaut?
ar_ols = AR(sunspots.endog)
res_ols = ar_ols.fit(maxlag=9)
ar_mle = AR(sunspots.endog)
res_mle_bfgs = ar_mle.fit(maxlag=9, method="mle", solver="bfgs",
maxiter=500, gtol=1e-10)
# res_mle2 = ar_mle.fit(maxlag=1, method="mle", maxiter=500, penalty=True,
# tol=1e-13)
# ar_yw = AR(sunspots.endog)
# res_yw = ar_yw.fit(maxlag=4, method="yw")
# # Timings versus talkbox
# from timeit import default_timer as timer
# print "Time AR fit vs. talkbox"
# # generate a long series of AR(2) data
#
# nobs = 1000000
# y = np.empty(nobs)
# y[0:2] = 0
# for i in range(2,nobs):
# y[i] = .25 * y[i-1] - .75 * y[i-2] + np.random.rand()
#
# mod_sm = AR(y)
# t = timer()
# res_sm = mod_sm.fit(method="yw", trend="nc", demean=False, maxlag=2)
# t_end = timer()
# print str(t_end - t) + " seconds for sm.AR with yule-walker, 2 lags"
# try:
# import scikits.talkbox as tb
# except:
# raise ImportError("You need scikits.talkbox installed for timings")
# t = timer()
# mod_tb = tb.lpc(y, 2)
# t_end = timer()
# print str(t_end - t) + " seconds for talkbox.lpc"
# print """For higher lag lengths ours quickly fills up memory and starts
#thrashing the swap. Should we include talkbox C code or Cythonize the
#Levinson recursion algorithm?"""
## Try with a pandas series
import pandas
import scikits.timeseries as ts
d1 = ts.Date(year=1700, freq='A')
#NOTE: have to have yearBegin offset for annual data until parser rewrite
#should this be up to the user, or should it be done in TSM init?
#NOTE: not anymore, it's end of year now
ts_dr = ts.date_array(start_date=d1, length=len(sunspots.endog))
pandas_dr = pandas.DatetimeIndex(start=d1.datetime,
periods=len(sunspots.endog),
freq='A-DEC')
#pandas_dr = pandas_dr.shift(-1, pandas.datetools.yearBegin)
dates = np.arange(1700, 1700 + len(sunspots.endog))
dates = ts.date_array(dates, freq='A')
#sunspots = pandas.Series(sunspots.endog, index=dates)
#NOTE: pandas only does business days for dates it looks like
import datetime
dt_dates = np.asarray(lmap(datetime.datetime.fromordinal,
ts_dr.toordinal().astype(int)))
sunspots = pandas.Series(sunspots.endog, index=dt_dates)
#NOTE: pandas can't handle pre-1900 dates
mod = AR(sunspots, freq='A')
res = mod.fit(method='mle', maxlag=9)
# some data for an example in Box Jenkins
IBM = np.asarray([460, 457, 452, 459, 462, 459, 463, 479, 493, 490.])
w = np.diff(IBM)
theta = .5
| yl565/statsmodels | statsmodels/tsa/ar_model.py | Python | bsd-3-clause | 33,978 | [
"Gaussian"
] | 6d4730507514f7b4605e9740e8fab9a647db2e358ef74f4ede504a02c39e2d80 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import os
import sys
from multiprocessing import Pool
def bam(args):
(inpath, bed, exonbed, t, ref, R1, R2, sampleID, outpath) = (args[0],
args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8])
sam = "%s/%s.sam" % (inpath,sampleID)
bam_sort = "%s/%s_sort.bam" % (inpath,sampleID)
bam_mark = "%s/%s_mark.bam" % (inpath,sampleID)
cov_sort = "%s/%s_sort.txt" % (outpath,sampleID)
cov_mark = "%s/%s_mark.txt" % (outpath,sampleID)
mapq_value = "%s/%s_mapq.txt" % (outpath,sampleID)
temp_len = "%s/%s_templen.txt" % (outpath,sampleID)
map_sort = "%s/%s_sort.maptxt" % (outpath,sampleID)
map_mark = "%s/%s_mark.maptxt" % (outpath,sampleID)
stats = "%s/%s_stats.txt" % (outpath,sampleID)
read_len = "%s/%s_readlen.txt" % (outpath,sampleID)
AGCT_content = "%s/%s_AGCT.txt" % (outpath,sampleID)
capture_sort = "{0}/{1}_sort_capture_stat.txt".format(outpath,sampleID)
capture_mark = "{0}/{1}_mark_capture_stat.txt".format(outpath,sampleID)
if not os.path.exists(bam_sort):
os.system("bwa mem -t %s %s %s %s > %s" % (t,ref,R1,R2,sam))
os.system("java -Xmx10g -jar /tools/GATK/picard/SortSam.jar INPUT=%s OUTPUT=%s SORT_ORDER=coordinate" % (sam,bam_sort))
os.system("samtools index %s" % bam_sort)
if not os.path.exists(bam_mark):
os.system("/haplox/users/huang/mypy/data-analysis/ctdna_exome_pipeline/dedup.py -1 %s -o %s" % (bam_sort,bam_mark))
os.system("samtools index %s" % bam_mark)
if not os.path.exists(cov_mark):
os.system("bedtools coverage -d -abam %s -b %s > %s" % (bam_sort,exonbed,cov_sort))
os.system("bedtools coverage -d -abam %s -b %s > %s" % (bam_mark,exonbed,cov_mark))
if not os.path.exists(mapq_value):
os.system("samtools view %s | cut -f 5 > %s" % (bam_sort,mapq_value))
os.system("samtools view %s | cut -f 9 > %s" % (bam_mark,temp_len))
if not os.path.exists(map_mark):
os.system("samtools flagstat %s > %s" % (bam_sort,map_sort))
os.system("samtools flagstat %s > %s" % (bam_mark,map_mark))
if not os.path.exists(read_len):
os.system("samtools stats -r %s %s > %s" % (ref,bam_sort,stats))
os.system("grep 'RL' %s | cut -f 2- > %s" % (stats,read_len))
os.system("grep 'GCC' %s | cut -f 2- > %s" % (stats,AGCT_content))
if not os.path.exists(capture_sort and capture_mark):
os.system("pypy /haplox/users/longrw/mypython/check_capture_percentage.py -b {0} -i {1} -o {2}".format(bed,bam_sort,outpath))
os.system("pypy /haplox/users/longrw/mypython/check_capture_percentage.py -b {0} -i {1} -o {2}".format(bed,bam_mark,outpath))
def cfdna2gdna():
(inpath, bed, exonbed, t, ref, gR1, gR2, gID, cfR1, cfR2, cfID) = (sys.argv[1],
sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6],
sys.argv[7], sys.argv[8], sys.argv[9], sys.argv[10], sys.argv[11])
# bamQC_dir = "%s/BamQC" % inpath
# if not os.path.exists(bamQC_dir):os.mkdir(bamQC_dir)
if len(sys.argv) == 13:
outpath = sys.argv[12]
else:
outpath = "{0}/BamQC".format(inpath)
if not os.path.exists(outpath):os.mkdir(outpath)
argss = [(inpath,bed,exonbed,t,ref,gR1,gR2,gID,outpath), (inpath,bed,exonbed,t,ref,cfR1,cfR2,cfID,outpath)]
p = Pool()
p.map(bam, argss)
p.close()
p.join()
# bam(inpath,bed,exonbed,t,ref,gR1,gR2,gID,outpath)
# bam(inpath,bed,exonbed,t,ref,cfR1,cfR2,cfID,outpath)
gdna_sort_cov = "%s/%s_sort.txt" % (outpath,gID)
gdna_mark_cov = "%s/%s_mark.txt" % (outpath,gID)
cfdna_sort_cov = "%s/%s_sort.txt" % (outpath,cfID)
cfdna_mark_cov = "%s/%s_mark.txt" % (outpath,cfID)
cfdna_mapq_value = "%s/%s_mapq.txt" % (outpath,cfID)
gdna_mapq_value = "%s/%s_mapq.txt" % (outpath,gID)
cfdna_templen = "%s/%s_templen.txt" % (outpath,cfID)
gdna_templen = "%s/%s_templen.txt" % (outpath,gID)
gdna_sort_map = "%s/%s_sort.maptxt" % (outpath,gID)
gdna_mark_map = "%s/%s_mark.maptxt" % (outpath,gID)
cfdna_sort_map = "%s/%s_sort.maptxt" % (outpath,cfID)
cfdna_mark_map = "%s/%s_mark.maptxt" % (outpath,cfID)
gdna_read_content = "{0}/{1}_AGCT.txt".format(outpath,gID)
cfdna_read_content = "{0}/{1}_AGCT.txt".format(outpath,cfID)
cov_info = "%s/covInfo.csv" % outpath
mapq_png = "%s/mapping_quality_distribution-MAPQ.png" % outpath
templen_png = "%s/template_length_distribution.png" % outpath
map_info = "%s/mapInfo.csv" % outpath
gene_target_list = "/haplox/users/longrw/myR/files/gene_target_list.txt"
gene_target_points = "/haplox/users/longrw/myR/files/gene_target_points.txt"
os.system("Rscript /haplox/users/longrw/myR/cov_group_exon.R %s %s %s %s" % (exonbed,gene_target_points,gene_target_list,outpath))
os.system("python /haplox/users/longrw/mypython/cov.py %s %s %s %s > %s" % (gdna_sort_cov,gdna_mark_cov,cfdna_sort_cov,cfdna_mark_cov,cov_info))
os.system("Rscript /haplox/users/longrw/myR/depth_cov.R %s %s" % (outpath,cfID.split('_')[0]))
os.system("Rscript /haplox/users/longrw/myR/mapq.R %s %s %s" % (cfdna_mapq_value,gdna_mapq_value,mapq_png))
os.system("Rscript /haplox/users/longrw/myR/template_length.R %s %s %s" % (cfdna_templen,gdna_templen,templen_png))
os.system("python /haplox/users/longrw/mypython/flagstat.py %s %s %s %s > %s" % (gdna_sort_map,gdna_mark_map,cfdna_sort_map,cfdna_mark_map,map_info))
os.system("Rscript /haplox/users/longrw/myR/read_content.R {0} {1} {2}".format(outpath,gID,gdna_read_content))
os.system("Rscript /haplox/users/longrw/myR/read_content.R {0} {1} {2}".format(outpath,cfID,cfdna_read_content))
os.system("python /haplox/users/longrw/mypython/bamQCreport.py %s > %s/bamQCreport.html" % (outpath,outpath))
os.system("rm -rf %s/*_mapq.txt" % outpath)
os.system("rm -rf %s/*_templen.txt" % outpath)
def ffpe():
(inpath,bed,exonbed,t,ref,R1,R2,sampleID) = (sys.argv[1],
sys.argv[2],sys.argv[3],sys.argv[4],sys.argv[5],sys.argv[6],sys.argv[7],sys.argv[8])
# bamQC_dir = "%s/BamQC" % inpath
# if not os.path.exists(bamQC_dir):os.mkdir(bamQC_dir)
if len(sys.argv) == 10:
outpath = sys.argv[9]
else:
outpath = "{0}/BamQC".format(inpath)
if not os.path.exists(outpath):os.mkdir(outpath)
bam((inpath,bed,exonbed,t,ref,R1,R2,sampleID,outpath))
cov_sort = "%s/%s_sort.txt" % (outpath,sampleID)
cov_mark = "%s/%s_mark.txt" % (outpath,sampleID)
mapq_value = "%s/%s_mapq.txt" % (outpath,sampleID)
temp_len = "%s/%s_templen.txt" % (outpath,sampleID)
map_sort = "%s/%s_sort.maptxt" % (outpath,sampleID)
map_mark = "%s/%s_mark.maptxt" % (outpath,sampleID)
read_content = "{0}/{1}_AGCT.txt".format(outpath,sampleID)
cov_info = "%s/covInfo.csv" % outpath
mapq_png = "%s/mapping_quality_distribution-MAPQ.png" % outpath
templen_png = "%s/template_length_distribution.png" % outpath
map_info = "%s/mapInfo.csv" % outpath
gene_target_points = "/haplox/users/longrw/myR/files/gene_target_points.txt"
gene_target_list = "/haplox/users/longrw/myR/files/gene_target_list.txt"
os.system("Rscript /haplox/users/longrw/myR/cov_group_exon.R %s %s %s %s" % (exonbed,gene_target_points,gene_target_list,outpath))
os.system("python /haplox/users/longrw/mypython/cov_ffpe.py %s %s > %s" % (cov_sort,cov_mark,cov_info))
os.system("Rscript /haplox/users/longrw/myR/depth_cov_ffpe.R %s %s" % (outpath,sampleID))
os.system("Rscript /haplox/users/longrw/myR/mapq_ffpe.R %s %s" % (mapq_value,mapq_png))
os.system("Rscript /haplox/users/longrw/myR/template_length_ffpe.R %s %s" % (temp_len,templen_png))
os.system("python /haplox/users/longrw/mypython/flagstat_ffpe.py %s %s > %s" % (map_sort,map_mark,map_info))
os.system("Rscript /haplox/users/longrw/myR/read_content.R {0} {1} {2}".format(outpath,sampleID,read_content))
os.system("python /haplox/users/longrw/mypython/bamQCreport_ffpe.py %s > %s/bamQCreport.html" % (outpath,outpath))
os.system("rm -rf %s/*_mapq.txt" % outpath)
os.system("rm -rf %s/*_templen.txt" % outpath)
if __name__ == "__main__":
if len(sys.argv) >= 12:
cfdna2gdna()
else:
ffpe()
| longrw/mypython | bamQC.py | Python | gpl-3.0 | 8,267 | [
"BWA"
] | 24e4bbd8d304b6434f9570f76ea9085798de3811a5ab86d7c4c9aa433ac9d7cb |
import numpy
import warnings
from simphony.core.keywords import KEYWORDS
from simphony.core.cuba import CUBA
def supported_cuba():
""" Return the list of CUBA keys that can be supported by vtk.
"""
return {
cuba for cuba in CUBA
if default_cuba_value(cuba) is not None}
def default_cuba_value(cuba):
""" Return the default value of the CUBA key as a scalar or numpy array.
Int type values have ``-1`` as default, while float type values
have ``numpy.nan``.
.. note::
Only vector and scalar values are currently supported.
"""
description = KEYWORDS[cuba.name]
if description.dtype is None:
return None
if description.shape == [1]:
if numpy.issubdtype(description.dtype, numpy.float):
return numpy.nan
elif numpy.issubdtype(description.dtype, numpy.int):
return -1
else:
message = 'ignored property {!r} : not a float or int'
warnings.warn(message.format(cuba))
elif description.shape == [3]:
if numpy.issubdtype(description.dtype, numpy.float):
return numpy.array(
[numpy.nan, numpy.nan, numpy.nan], dtype=description.dtype)
elif numpy.issubdtype(description.dtype, numpy.int):
return numpy.array([-1, -1, -1], dtype=description.dtype)
else:
message = 'ignored property {!r} : not a float or int'
warnings.warn(message.format(cuba))
else:
message = 'ignored property {!r} : not a vector or scalar'
warnings.warn(message.format(cuba))
def empty_array(cuba, length, fill=None):
""" Return an array filled with the default value for CUBA.
Parameters
----------
cuba : CUBA
The CUBA key to use to base the array data.
length : tuple
The length of the array in CUBA value items.
fill :
The scalar or array value to fill for every CUBA item.
Returns
-------
data : ndarray
"""
description = KEYWORDS[cuba.name]
shape = [length] + description.shape
data = numpy.empty(shape=shape, dtype=description.dtype)
default = default_cuba_value(cuba) if fill is None else fill
if shape[1] == 1:
data.fill(default)
else:
for index, value in enumerate(default):
data[:, index] = value
return data
| simphony/simphony-mayavi | simphony_mayavi/core/cuba_utils.py | Python | bsd-2-clause | 2,376 | [
"VTK"
] | 4ad7651bb0c1b2d8f881add513bb1ad0c0322aa79af8ed2d9d1925a21594742f |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
r"""Radial Distribution Functions --- :mod:`MDAnalysis.analysis.rdf`
================================================================
This module contains two classes to calculate radial
`pair distribution functions`_ (`radial distribution functions`_ or "RDF").
The RDF :math:`g_{ab}(r)` between types of particles :math:`a` and :math:`b` is
.. _equation-gab:
.. math::
g_{ab}(r) = (N_{a} N_{b})^{-1} \sum_{i=1}^{N_a} \sum_{j=1}^{N_b}
\langle \delta(|\mathbf{r}_i - \mathbf{r}_j| - r) \rangle
which is normalized so that the RDF becomes 1 for large separations in a
homogenous system. The RDF effectively counts the average number of :math:`b`
neighbours in a shell at distance :math:`r` around a :math:`a` particle and
represents it as a density.
The radial cumulative distribution function is
.. math::
G_{ab}(r) = \int_0^r \!\!dr' 4\pi r'^2 g_{ab}(r')
and the average number of :math:`b` particles within radius :math:`r`
.. _equation-countab:
.. math::
N_{ab}(r) = \rho G_{ab}(r)
(with the appropriate density :math:`\rho`). The latter function can be used to
compute, for instance, coordination numbers such as the number of neighbors in
the first solvation shell :math:`N(r_1)` where :math:`r_1` is the position of
the first minimum in :math:`g(r)`.
In :class:`InterRDF_s`, we provide an option `density`. When `density` is
``False``, it will return the RDF :math:`g_{ab}(r)`; when `density` is
``True``, it will return the density of particle :math:`b` in a shell at
distance :math:`r` around a :math:`a` particle, which is
.. _equation-nab:
.. math::
n_{ab}(r) = \rho g_{ab}(r)
.. _`pair distribution functions`:
https://en.wikipedia.org/wiki/Pair_distribution_function
.. _`radial distribution functions`:
https://en.wikipedia.org/wiki/Radial_distribution_function
Average radial distribution function
------------------------------------
:class:`InterRDF` is a tool to calculate average radial distribution functions
between two groups of atoms. Suppose we have two AtomGroups ``A`` and
``B``. ``A`` contains atom ``A1``, ``A2``, and ``B`` contains ``B1``,
``B2``. Given ``A`` and ``B`` to :class:`InterRDF`, the output will be the
average of RDFs between ``A1`` and ``B1``, ``A1`` and ``B2``, ``A2`` and
``B1``, ``A2`` and ``B2``. A typical application is to calculate the RDF of
solvent with itself or with another solute.
.. autoclass:: InterRDF
:members:
:inherited-members:
.. attribute:: results.bins
:class:`numpy.ndarray` of the centers of the `nbins` histogram
bins.
.. versionadded:: 2.0.0
.. attribute:: bins
Alias to the :attr:`results.bins` attribute.
.. deprecated:: 2.0.0
This attribute will be removed in 3.0.0.
Use :attr:`results.bins` instead.
.. attribute:: results.edges
:class:`numpy.ndarray` of the `nbins + 1` edges of the histogram
bins.
.. versionadded:: 2.0.0
.. attribute:: edges
Alias to the :attr:`results.edges` attribute.
.. deprecated:: 2.0.0
This attribute will be removed in 3.0.0.
Use :attr:`results.edges` instead.
.. attribute:: results.rdf
:class:`numpy.ndarray` of the :ref:`radial distribution
function<equation-gab>` values for the :attr:`results.bins`.
.. versionadded:: 2.0.0
.. attribute:: rdf
Alias to the :attr:`results.rdf` attribute.
.. deprecated:: 2.0.0
This attribute will be removed in 3.0.0.
Use :attr:`results.rdf` instead.
.. attribute:: results.count
:class:`numpy.ndarray` representing the radial histogram, i.e.,
the raw counts, for all :attr:`results.bins`.
.. versionadded:: 2.0.0
.. attribute:: count
Alias to the :attr:`results.count` attribute.
.. deprecated:: 2.0.0
This attribute will be removed in 3.0.0.
Use :attr:`results.count` instead.
Site-specific radial distribution function
------------------------------------------
:class:`InterRDF_s` calculates site-specific radial distribution
functions. Instead of two groups of atoms it takes as input a list of pairs of
AtomGroup, ``[[A, B], [C, D], ...]``. Given the same ``A`` and ``B`` to
:class:`InterRDF_s`, the output will be a list of individual RDFs between
``A1`` and ``B1``, ``A1`` and ``B2``, ``A2`` and ``B1``, ``A2`` and ``B2`` (and
similarly for ``C`` and ``D``). These site-specific radial distribution
functions are typically calculated if one is interested in the solvation shells
of a ligand in a binding site or the solvation of specific residues in a
protein.
.. autoclass:: InterRDF_s
:members:
:inherited-members:
.. attribute:: results.bins
:class:`numpy.ndarray` of the centers of the `nbins` histogram
bins; all individual site-specific RDFs have the same bins.
.. versionadded:: 2.0.0
.. attribute:: bins
Alias to the :attr:`results.bins` attribute.
.. deprecated:: 2.0.0
This attribute will be removed in 3.0.0.
Use :attr:`results.bins` instead.
.. attribute:: results.edges
:class:`numpy.ndarray` of the `nbins + 1` edges of the histogram
bins; all individual site-specific RDFs have the same bins.
.. versionadded:: 2.0.0
.. attribute:: edges
Alias to the :attr:`results.edges` attribute.
.. deprecated:: 2.0.0
This attribute will be removed in 3.0.0.
Use :attr:`results.edges` instead.
.. attribute:: results.rdf
:class:`list` of the site-specific :ref:`radial distribution
functions<equation-gab>` or :ref:`density
functions<equation-nab>` for the :attr:`bins`. The list contains
``len(ags)`` entries. Each entry for the ``i``-th pair ``[A, B]
= ags[i]`` in `ags` is a :class:`numpy.ndarray` with shape
``(len(A), len(B))``, i.e., a stack of RDFs. For example,
``results.rdf[i][0, 2]`` is the RDF between atoms ``A[0]``
and ``B[2]``.
.. versionadded:: 2.0.0
.. attribute:: rdf
Alias to the :attr:`results.rdf` attribute.
.. deprecated:: 2.0.0
This attribute will be removed in 3.0.0.
Use :attr:`results.rdf` instead.
.. attribute:: results.count
:class:`list` of the site-specific radial histograms, i.e., the
raw counts, for all :attr:`results.bins`. The data have the same
structure as :attr:`results.rdf` except that the arrays contain
the raw counts.
.. versionadded:: 2.0.0
.. attribute:: count
Alias to the :attr:`results.count` attribute.
.. deprecated:: 2.0.0
This attribute will be removed in 3.0.0.
Use :attr:`results.count` instead.
.. attribute:: results.cdf
:class:`list` of the site-specific :ref:`cumulative
counts<equation-countab>`, for all :attr:`results.bins`. The data
have the same structure as :attr:`results.rdf` except that the arrays
contain the cumulative counts.
This attribute only exists after :meth:`get_cdf` has been run.
.. versionadded:: 2.0.0
.. attribute:: cdf
Alias to the :attr:`results.cdf` attribute.
.. deprecated:: 2.0.0
This attribute will be removed in 3.0.0.
Use :attr:`results.cdf` instead.
.. Not Implemented yet:
.. - Structure factor?
.. - Coordination number
"""
import warnings
import numpy as np
from ..lib.util import blocks_of
from ..lib import distances
from .base import AnalysisBase
class InterRDF(AnalysisBase):
r"""Intermolecular pair distribution function
The :ref:`radial distribution function<equation-gab>` is calculated by
histogramming distances between all particles in `g1` and `g2` while taking
periodic boundary conditions into account via the minimum image
convention.
The `exclusion_block` keyword may be used to exclude a set of distances
from the calculations.
Results are available in the attributes :attr:`rdf` and :attr:`count`.
Arguments
---------
g1 : AtomGroup
First AtomGroup
g2 : AtomGroup
Second AtomGroup
nbins : int (optional)
Number of bins in the histogram
range : tuple or list (optional)
The size of the RDF
exclusion_block : tuple (optional)
A tuple representing the tile to exclude from the distance
array.
verbose : bool (optional)
Show detailed progress of the calculation if set to ``True``
Example
-------
First create the :class:`InterRDF` object, by supplying two
AtomGroups then use the :meth:`run` method ::
rdf = InterRDF(ag1, ag2)
rdf.run()
Results are available through the :attr:`results.bins` and
:attr:`results.rdf` attributes::
plt.plot(rdf.results.bins, rdf.results.rdf)
The `exclusion_block` keyword allows the masking of pairs from
within the same molecule. For example, if there are 7 of each
atom in each molecule, the exclusion mask `(7, 7)` can be used.
.. versionadded:: 0.13.0
.. versionchanged:: 1.0.0
Support for the ``start``, ``stop``, and ``step`` keywords has been
removed. These should instead be passed to :meth:`InterRDF.run`.
.. versionchanged:: 2.0.0
Store results as attributes ``bins``, ``edges``, ``rdf`` and ``count``
of the ``results`` attribute of
:class:`~MDAnalysis.analysis.AnalysisBase`.
"""
def __init__(self, g1, g2,
nbins=75, range=(0.0, 15.0), exclusion_block=None,
**kwargs):
super(InterRDF, self).__init__(g1.universe.trajectory, **kwargs)
self.g1 = g1
self.g2 = g2
self.u = g1.universe
self.rdf_settings = {'bins': nbins,
'range': range}
self._exclusion_block = exclusion_block
def _prepare(self):
# Empty histogram to store the RDF
count, edges = np.histogram([-1], **self.rdf_settings)
count = count.astype(np.float64)
count *= 0.0
self.results.count = count
self.results.edges = edges
self.results.bins = 0.5 * (edges[:-1] + edges[1:])
# Need to know average volume
self.volume = 0.0
# Set the max range to filter the search radius
self._maxrange = self.rdf_settings['range'][1]
def _single_frame(self):
pairs, dist = distances.capped_distance(self.g1.positions,
self.g2.positions,
self._maxrange,
box=self.u.dimensions)
# Maybe exclude same molecule distances
if self._exclusion_block is not None:
idxA = pairs[:, 0]//self._exclusion_block[0]
idxB = pairs[:, 1]//self._exclusion_block[1]
mask = np.where(idxA != idxB)[0]
dist = dist[mask]
count = np.histogram(dist, **self.rdf_settings)[0]
self.results.count += count
self.volume += self._ts.volume
def _conclude(self):
# Number of each selection
nA = len(self.g1)
nB = len(self.g2)
N = nA * nB
# If we had exclusions, take these into account
if self._exclusion_block:
xA, xB = self._exclusion_block
nblocks = nA / xA
N -= xA * xB * nblocks
# Volume in each radial shell
vols = np.power(self.results.edges, 3)
vol = 4/3 * np.pi * np.diff(vols)
# Average number density
box_vol = self.volume / self.n_frames
density = N / box_vol
rdf = self.results.count / (density * vol * self.n_frames)
self.results.rdf = rdf
@property
def edges(self):
wmsg = ("The `edges` attribute was deprecated in MDAnalysis 2.0.0 "
"and will be removed in MDAnalysis 3.0.0. Please use "
"`results.bins` instead")
warnings.warn(wmsg, DeprecationWarning)
return self.results.edges
@property
def count(self):
wmsg = ("The `count` attribute was deprecated in MDAnalysis 2.0.0 "
"and will be removed in MDAnalysis 3.0.0. Please use "
"`results.bins` instead")
warnings.warn(wmsg, DeprecationWarning)
return self.results.count
@property
def bins(self):
wmsg = ("The `bins` attribute was deprecated in MDAnalysis 2.0.0 "
"and will be removed in MDAnalysis 3.0.0. Please use "
"`results.bins` instead")
warnings.warn(wmsg, DeprecationWarning)
return self.results.bins
@property
def rdf(self):
wmsg = ("The `rdf` attribute was deprecated in MDAnalysis 2.0.0 "
"and will be removed in MDAnalysis 3.0.0. Please use "
"`results.rdf` instead")
warnings.warn(wmsg, DeprecationWarning)
return self.results.rdf
class InterRDF_s(AnalysisBase):
r"""Site-specific intermolecular pair distribution function
Arguments
---------
u : Universe
a Universe that contains atoms in `ags`
ags : list
a list of pairs of :class:`~MDAnalysis.core.groups.AtomGroup`
instances
nbins : int (optional)
Number of bins in the histogram
range : tuple or list (optional)
The size of the RDF
density : bool (optional)
``False``: calculate :math:`g_{ab}(r)`; ``True``: calculate
the true :ref:`single particle density<equation-nab>`
:math:`n_{ab}(r)`.
.. versionadded:: 1.0.1
This keyword was available since 0.19.0 but was not
documented. Furthermore, it had the opposite
meaning. Since 1.0.1 it is officially supported as
documented.
Example
-------
First create the :class:`InterRDF_s` object, by supplying one Universe and
one list of pairs of AtomGroups, then use the :meth:`~InterRDF_s.run`
method::
from MDAnalysisTests.datafiles import GRO_MEMPROT, XTC_MEMPROT
u = mda.Universe(GRO_MEMPROT, XTC_MEMPROT)
s1 = u.select_atoms('name ZND and resid 289')
s2 = u.select_atoms('(name OD1 or name OD2) and resid 51 and sphzone 5.0 (resid 289)')
s3 = u.select_atoms('name ZND and (resid 291 or resid 292)')
s4 = u.select_atoms('(name OD1 or name OD2) and sphzone 5.0 (resid 291)')
ags = [[s1, s2], [s3, s4]]
rdf = InterRDF_s(u, ags)
rdf.run()
Results are available through the :attr:`results.bins`
and :attr:`results.rdf` attributes::
plt.plot(rdf.results.bins, rdf.results.rdf[0][0, 0])
(Which plots the rdf between the first atom in ``s1`` and the first atom in
``s2``)
To generate the *cumulative distribution function* (cdf) in the sense of
"particles within radius :math:`r`", i.e., :math:`N_{ab}(r)`, use the
:meth:`~InterRDF_s.get_cdf` method ::
cdf = rdf.get_cdf()
Results are available through the :attr:`results.cdf` attribute::
plt.plot(rdf.results.bins, rdf.results.cdf[0][0, 0])
(Which plots the cdf between the first atom in ``s1`` and the first atom in
``s2``)
.. versionadded:: 0.19.0
.. versionchanged:: 1.0.0
Support for the ``start``, ``stop``, and ``step`` keywords has been
removed. These should instead be passed to :meth:`InterRDF_s.run`.
.. versionchanged:: 2.0.0
Store results as attributes ``bins``, ``edges``, ``rdf``, ``count``
and ``cdf`` of the ``results`` attribute
of :class:`~MDAnalysis.analysis.AnalysisBase`.
"""
def __init__(self, u, ags,
nbins=75, range=(0.0, 15.0), density=False, **kwargs):
super(InterRDF_s, self).__init__(u.universe.trajectory, **kwargs)
# List of pairs of AtomGroups
self.ags = ags
self.u = u
self._density = density
self.rdf_settings = {'bins': nbins,
'range': range}
def _prepare(self):
# Empty list to store the RDF
count_list = []
count, edges = np.histogram([-1], **self.rdf_settings)
self.results.count = [np.zeros((ag1.n_atoms, ag2.n_atoms, len(count)),
dtype=np.float64) for ag1, ag2 in self.ags]
self.results.edges = edges
self.results.bins = 0.5 * (edges[:-1] + edges[1:])
# Need to know average volume
self.volume = 0.0
self._maxrange = self.rdf_settings['range'][1]
def _single_frame(self):
for i, (ag1, ag2) in enumerate(self.ags):
pairs, dist = distances.capped_distance(ag1.positions,
ag2.positions,
self._maxrange,
box=self.u.dimensions)
for j, (idx1, idx2) in enumerate(pairs):
self.results.count[i][idx1, idx2, :] += np.histogram(dist[j],
**self.rdf_settings)[0]
self.volume += self._ts.volume
def _conclude(self):
# Volume in each radial shell
vols = np.power(self.results.edges, 3)
vol = 4/3 * np.pi * np.diff(vols)
# Empty lists to restore indices, RDF
indices = []
rdf = []
for i, (ag1, ag2) in enumerate(self.ags):
# Number of each selection
indices.append([ag1.indices, ag2.indices])
# Average number density
box_vol = self.volume / self.n_frames
density = 1 / box_vol
if self._density:
rdf.append(self.results.count[i] / (vol * self.n_frames))
else:
rdf.append(
self.results.count[i] / (density * vol * self.n_frames))
self.results.rdf = rdf
self.results.indices = indices
def get_cdf(self):
r"""Calculate the cumulative counts for all sites.
This is the :ref:`cumulative count<equation-countab>` within a given
radius, i.e., :math:`N_{ab}(r)`.
The result is returned and also stored in the attribute
:attr:`results.cdf`.
Returns
-------
cdf : list
list of arrays with the same structure as :attr:`results.rdf`
"""
# Calculate cumulative distribution function
# Empty list to restore CDF
cdf = []
for count in self.results.count:
cdf.append(np.cumsum(count, axis=2) / self.n_frames)
# Results stored in self.results.cdf
# self.results.cdf is a list of cdf between pairs of AtomGroups in ags
self.results.cdf = cdf
return cdf
@property
def edges(self):
wmsg = ("The `edges` attribute was deprecated in MDAnalysis 2.0.0 "
"and will be removed in MDAnalysis 3.0.0. Please use "
"`results.bins` instead")
warnings.warn(wmsg, DeprecationWarning)
return self.results.edges
@property
def count(self):
wmsg = ("The `count` attribute was deprecated in MDAnalysis 2.0.0 "
"and will be removed in MDAnalysis 3.0.0. Please use "
"`results.bins` instead")
warnings.warn(wmsg, DeprecationWarning)
return self.results.count
@property
def bins(self):
wmsg = ("The `bins` attribute was deprecated in MDAnalysis 2.0.0 "
"and will be removed in MDAnalysis 3.0.0. Please use "
"`results.bins` instead")
warnings.warn(wmsg, DeprecationWarning)
return self.results.bins
@property
def rdf(self):
wmsg = ("The `rdf` attribute was deprecated in MDAnalysis 2.0.0 "
"and will be removed in MDAnalysis 3.0.0. Please use "
"`results.rdf` instead")
warnings.warn(wmsg, DeprecationWarning)
return self.results.rdf
@property
def cdf(self):
wmsg = ("The `cdf` attribute was deprecated in MDAnalysis 2.0.0 "
"and will be removed in MDAnalysis 3.0.0. Please use "
"`results.cdf` instead")
warnings.warn(wmsg, DeprecationWarning)
return self.results.cdf
| MDAnalysis/mdanalysis | package/MDAnalysis/analysis/rdf.py | Python | gpl-2.0 | 21,430 | [
"MDAnalysis"
] | 2b2290c0c9e2427a1fff4b682cdb7d1544114351eed65a0ab431514437982ec1 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class assetCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'analyze_iam_policy': ('analysis_query', 'options', ),
'export_iam_policy_analysis': ('analysis_query', 'output_config', 'options', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=assetCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the asset client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| googleapis/python-asset | scripts/fixup_asset_v1p4beta1_keywords.py | Python | apache-2.0 | 6,046 | [
"VisIt"
] | b9bb8d971d65eecb20902b9113f3fc02371dfcd4a83849dca8faa9064cee71d8 |
import pathlib
import os
import weakref
import numpy as np
import pytest
import vtk
import pyvista
from pyvista import examples
from pyvista.plotting import system_supports_plotting
test_path = os.path.dirname(os.path.abspath(__file__))
VTK9 = vtk.vtkVersion().GetVTKMajorVersion() >= 9
# must be manually set until pytest adds parametrize with fixture feature
HEXBEAM_CELLS_BOOL = np.ones(40, np.bool_) # matches hexbeam.n_cells == 40
STRUCTGRID_CELLS_BOOL = np.ones(729, np.bool_) # struct_grid.n_cells == 729
def test_volume(hexbeam):
assert hexbeam.volume > 0.0
@pytest.mark.skipif(not system_supports_plotting(), reason="Requires system to support plotting")
def test_struct_example():
# create and plot structured grid
grid = examples.load_structured()
cpos = grid.plot(off_screen=True) # basic plot
assert isinstance(cpos, pyvista.CameraPosition)
# Plot mean curvature
cpos_curv = grid.plot_curvature(off_screen=True)
assert isinstance(cpos_curv, pyvista.CameraPosition)
def test_init_from_structured(struct_grid):
unstruct_grid = pyvista.UnstructuredGrid(struct_grid)
assert unstruct_grid.points.shape[0] == struct_grid.x.size
assert np.all(unstruct_grid.celltypes == 12)
def test_init_from_unstructured(hexbeam):
grid = pyvista.UnstructuredGrid(hexbeam, deep=True)
grid.points += 1
assert not np.any(grid.points == hexbeam.points)
def test_init_from_numpy_arrays():
offset = np.array([0, 9])
cells = [
[8, 0, 1, 2, 3, 4, 5, 6, 7],
[8, 8, 9, 10, 11, 12, 13, 14, 15]
]
cells = np.array(cells).ravel()
cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON])
cell1 = np.array(
[
[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1],
]
)
cell2 = np.array(
[
[0, 0, 2],
[1, 0, 2],
[1, 1, 2],
[0, 1, 2],
[0, 0, 3],
[1, 0, 3],
[1, 1, 3],
[0, 1, 3],
]
)
points = np.vstack((cell1, cell2))
if VTK9:
grid = pyvista.UnstructuredGrid(cells, cell_type, points)
else:
grid = pyvista.UnstructuredGrid(offset, cells, cell_type, points)
assert grid.number_of_points == 16
assert grid.number_of_cells == 2
def test_init_bad_input():
with pytest.raises(TypeError):
unstruct_grid = pyvista.UnstructuredGrid(np.array(1))
with pytest.raises(TypeError):
unstruct_grid = pyvista.UnstructuredGrid(np.array(1),
np.array(1),
np.array(1),
'woa')
def create_hex_example():
cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])
cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int32)
cell1 = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
cell2 = np.array([[0, 0, 2],
[1, 0, 2],
[1, 1, 2],
[0, 1, 2],
[0, 0, 3],
[1, 0, 3],
[1, 1, 3],
[0, 1, 3]])
points = np.vstack((cell1, cell2)).astype(np.int32)
offset = np.array([0, 9], np.int8)
return offset, cells, cell_type, points
#Try both with and without an offset array
@pytest.mark.parametrize('specify_offset', [False, True])
def test_init_from_arrays(specify_offset):
offset, cells, cell_type, points = create_hex_example()
if VTK9:
grid = pyvista.UnstructuredGrid(cells, cell_type, points, deep=False)
else:
if specify_offset:
grid = pyvista.UnstructuredGrid(offset, cells, cell_type, points, deep=False)
else:
grid = pyvista.UnstructuredGrid(cells, cell_type, points, deep=False)
assert np.allclose(grid.offset, offset)
assert grid.n_cells == 2
assert np.allclose(cells, grid.cells)
if VTK9:
assert np.allclose(grid.cell_connectivity, np.arange(16))
else:
with pytest.raises(AttributeError):
grid.cell_connectivity
@pytest.mark.parametrize('multiple_cell_types', [False, True])
@pytest.mark.parametrize('flat_cells', [False, True])
def test_init_from_dict(multiple_cell_types, flat_cells):
#Try mixed construction
vtk8_offsets, vtk_cell_format, cell_type, points = create_hex_example()
vtk9_offsets = np.array([0, 8, 16])
cells_hex = np.array([[0, 1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15]])
input_cells_dict = {vtk.VTK_HEXAHEDRON: cells_hex}
if multiple_cell_types:
cells_quad = np.array([[16, 17, 18, 19]])
cell3 = np.array([[0, 0, -1],
[1, 0, -1],
[1, 1, -1],
[0, 1, -1]])
points = np.vstack((points, cell3))
input_cells_dict[vtk.VTK_QUAD] = cells_quad
#Update expected vtk cell arrays
vtk_cell_format = np.concatenate([vtk_cell_format, [4], np.squeeze(cells_quad)])
vtk8_offsets = np.concatenate([vtk8_offsets, [18]])
vtk9_offsets = np.concatenate([vtk9_offsets, [20]])
cell_type = np.concatenate([cell_type, [vtk.VTK_QUAD]])
if flat_cells:
input_cells_dict = {k: v.reshape([-1]) for k, v in input_cells_dict.items()}
grid = pyvista.UnstructuredGrid(input_cells_dict, points, deep=False)
if VTK9:
assert np.all(grid.offset == vtk9_offsets)
else:
assert np.all(grid.offset == vtk8_offsets)
assert grid.n_cells == (3 if multiple_cell_types else 2)
assert np.all(grid.cells == vtk_cell_format)
if VTK9:
assert np.allclose(grid.cell_connectivity, (np.arange(20) if multiple_cell_types else np.arange(16)))
else:
with pytest.raises(AttributeError):
grid.cell_connectivity
#Now fetch the arrays
output_cells_dict = grid.cells_dict
assert np.all(output_cells_dict[vtk.VTK_HEXAHEDRON].reshape([-1]) == input_cells_dict[vtk.VTK_HEXAHEDRON].reshape([-1]))
if multiple_cell_types:
assert np.all(output_cells_dict[vtk.VTK_QUAD].reshape([-1]) == input_cells_dict[vtk.VTK_QUAD].reshape([-1]))
#Test for some errors
#Invalid index (<0)
input_cells_dict[vtk.VTK_HEXAHEDRON] -= 1
with pytest.raises(ValueError):
pyvista.UnstructuredGrid(input_cells_dict, points, deep=False)
# Restore
input_cells_dict[vtk.VTK_HEXAHEDRON] += 1
# Invalid index (>= nr_points)
input_cells_dict[vtk.VTK_HEXAHEDRON].flat[0] = points.shape[0]
with pytest.raises(ValueError):
pyvista.UnstructuredGrid(input_cells_dict, points, deep=False)
input_cells_dict[vtk.VTK_HEXAHEDRON] -= 1
# Incorrect size
with pytest.raises(ValueError):
pyvista.UnstructuredGrid({vtk.VTK_HEXAHEDRON: cells_hex.reshape([-1])[:-1]}, points, deep=False)
# Unknown cell type
with pytest.raises(ValueError):
pyvista.UnstructuredGrid({255: cells_hex}, points, deep=False)
# Dynamic sizes cell type
with pytest.raises(ValueError):
pyvista.UnstructuredGrid({vtk.VTK_POLYGON: cells_hex.reshape([-1])}, points, deep=False)
# Non-integer arrays
with pytest.raises(ValueError):
pyvista.UnstructuredGrid({vtk.VTK_HEXAHEDRON: cells_hex.reshape([-1])[:-1].astype(np.float32)}, points)
# Invalid point dimensions
with pytest.raises(ValueError):
pyvista.UnstructuredGrid(input_cells_dict, points[..., :-1])
def test_cells_dict_hexbeam_file():
grid = pyvista.UnstructuredGrid(examples.hexbeamfile)
cells = np.delete(grid.cells, np.arange(0, grid.cells.size, 9)).reshape([-1, 8])
assert np.all(grid.cells_dict[vtk.VTK_HEXAHEDRON] == cells)
def test_cells_dict_variable_length():
cells_poly = np.concatenate([[5], np.arange(5)])
cells_types = np.array([vtk.VTK_POLYGON])
points = np.random.normal(size=(5, 3))
grid = pyvista.UnstructuredGrid(cells_poly, cells_types, points)
# Dynamic sizes cell types are currently unsupported
with pytest.raises(ValueError):
grid.cells_dict
grid.celltypes[:] = 255
# Unknown cell types
with pytest.raises(ValueError):
grid.cells_dict
def test_cells_dict_empty_grid():
grid = pyvista.UnstructuredGrid()
assert grid.cells_dict is None
def test_cells_dict_alternating_cells():
cells = np.concatenate([[4], [1, 2, 3, 4], [3], [0, 1, 2], [4], [0, 1, 5, 6]])
cells_types = np.array([vtk.VTK_QUAD, vtk.VTK_TRIANGLE, vtk.VTK_QUAD])
points = np.random.normal(size=(3+2*2, 3))
grid = pyvista.UnstructuredGrid(cells, cells_types, points)
cells_dict = grid.cells_dict
if VTK9:
assert np.all(grid.offset == np.array([0, 4, 7, 11]))
else:
assert np.all(grid.offset == np.array([0, 5, 9]))
assert np.all(cells_dict[vtk.VTK_QUAD] == np.array([cells[1:5], cells[-4:]]))
assert np.all(cells_dict[vtk.VTK_TRIANGLE] == [0, 1, 2])
def test_destructor():
ugrid = examples.load_hexbeam()
ref = weakref.ref(ugrid)
del ugrid
assert ref() is None
def test_surface_indices(hexbeam):
surf = hexbeam.extract_surface()
surf_ind = surf.point_arrays['vtkOriginalPointIds']
assert np.allclose(surf_ind, hexbeam.surface_indices())
def test_extract_feature_edges(hexbeam):
edges = hexbeam.extract_feature_edges(90)
assert edges.n_points
edges = hexbeam.extract_feature_edges(180)
assert not edges.n_points
def test_triangulate_inplace(hexbeam):
hexbeam.triangulate(inplace=True)
assert (hexbeam.celltypes == vtk.VTK_TETRA).all()
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', pyvista.pointset.UnstructuredGrid._WRITERS)
def test_save(extension, binary, tmpdir, hexbeam):
filename = str(tmpdir.mkdir("tmpdir").join(f'tmp.{extension}'))
hexbeam.save(filename, binary)
grid = pyvista.UnstructuredGrid(filename)
assert grid.cells.shape == hexbeam.cells.shape
assert grid.points.shape == hexbeam.points.shape
grid = pyvista.read(filename)
assert grid.cells.shape == hexbeam.cells.shape
assert grid.points.shape == hexbeam.points.shape
assert isinstance(grid, pyvista.UnstructuredGrid)
def test_pathlib_read_write(tmpdir, hexbeam):
path = pathlib.Path(str(tmpdir.mkdir("tmpdir").join('tmp.vtk')))
assert not path.is_file()
hexbeam.save(path)
assert path.is_file()
grid = pyvista.UnstructuredGrid(path)
assert grid.cells.shape == hexbeam.cells.shape
assert grid.points.shape == hexbeam.points.shape
grid = pyvista.read(path)
assert grid.cells.shape == hexbeam.cells.shape
assert grid.points.shape == hexbeam.points.shape
assert isinstance(grid, pyvista.UnstructuredGrid)
def test_init_bad_filename():
filename = os.path.join(test_path, 'test_grid.py')
with pytest.raises(IOError):
grid = pyvista.UnstructuredGrid(filename)
with pytest.raises(FileNotFoundError):
grid = pyvista.UnstructuredGrid('not a file')
def test_save_bad_extension():
with pytest.raises(FileNotFoundError):
grid = pyvista.UnstructuredGrid('file.abc')
def test_linear_copy(hexbeam):
# need a grid with quadratic cells
lgrid = hexbeam.linear_copy()
assert np.all(lgrid.celltypes < 20)
def test_linear_copy_surf_elem():
cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 6, 8, 9, 10, 11, 12, 13], np.int32)
celltypes = np.array([vtk.VTK_QUADRATIC_QUAD, vtk.VTK_QUADRATIC_TRIANGLE],
np.uint8)
cell0 = [[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.5, 0.1, 0.0],
[1.1, 0.5, 0.0],
[0.5, 0.9, 0.0],
[0.1, 0.5, 0.0]]
cell1 = [[0.0, 0.0, 1.0],
[1.0, 0.0, 1.0],
[0.5, 0.5, 1.0],
[0.5, 0.0, 1.3],
[0.7, 0.7, 1.3],
[0.1, 0.1, 1.3]]
points = np.vstack((cell0, cell1))
if VTK9:
grid = pyvista.UnstructuredGrid(cells, celltypes, points, deep=False)
else:
offset = np.array([0, 9])
grid = pyvista.UnstructuredGrid(offset, cells, celltypes, points, deep=False)
lgrid = grid.linear_copy()
qfilter = vtk.vtkMeshQuality()
qfilter.SetInputData(lgrid)
qfilter.Update()
qual = pyvista.wrap(qfilter.GetOutput())['Quality']
assert np.allclose(qual, [1, 1.4], atol=0.01)
def test_extract_cells(hexbeam):
ind = [1, 2, 3]
part_beam = hexbeam.extract_cells(ind)
assert part_beam.n_cells == len(ind)
assert part_beam.n_points < hexbeam.n_points
assert np.allclose(part_beam.cell_arrays['vtkOriginalCellIds'], ind)
mask = np.zeros(hexbeam.n_cells, np.bool_)
mask[ind] = True
part_beam = hexbeam.extract_cells(mask)
assert part_beam.n_cells == len(ind)
assert part_beam.n_points < hexbeam.n_points
assert np.allclose(part_beam.cell_arrays['vtkOriginalCellIds'], ind)
ind = np.vstack(([1, 2], [4, 5]))[:, 0]
part_beam = hexbeam.extract_cells(ind)
def test_merge(hexbeam):
grid = hexbeam.copy()
grid.points[:, 0] += 1
unmerged = grid.merge(hexbeam, inplace=False, merge_points=False)
grid.merge(hexbeam, inplace=True, merge_points=True)
assert grid.n_points > hexbeam.n_points
assert grid.n_points < unmerged.n_points
def test_merge_not_main(hexbeam):
grid = hexbeam.copy()
grid.points[:, 0] += 1
unmerged = grid.merge(hexbeam, inplace=False, merge_points=False,
main_has_priority=False)
grid.merge(hexbeam, inplace=True, merge_points=True)
assert grid.n_points > hexbeam.n_points
assert grid.n_points < unmerged.n_points
def test_merge_list(hexbeam):
grid_a = hexbeam.copy()
grid_a.points[:, 0] += 1
grid_b = hexbeam.copy()
grid_b.points[:, 1] += 1
grid_a.merge([hexbeam, grid_b], inplace=True, merge_points=True)
assert grid_a.n_points > hexbeam.n_points
def test_merge_invalid(hexbeam, sphere):
with pytest.raises(TypeError):
sphere.merge([hexbeam], inplace=True)
def test_init_structured(struct_grid):
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 2)
zrng = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(xrng, yrng, zrng)
grid = pyvista.StructuredGrid(x, y, z)
assert np.allclose(struct_grid.x, x)
assert np.allclose(struct_grid.y, y)
assert np.allclose(struct_grid.z, z)
grid_a = pyvista.StructuredGrid(grid)
assert np.allclose(grid_a.points, grid.points)
def test_slice_structured(struct_grid):
sliced = struct_grid[1, :, 1:3] # three different kinds of slices
assert sliced.dimensions == [1, struct_grid.dimensions[1], 2]
# check that points are in the right place
assert struct_grid.x[1, :, 1:3].ravel() == pytest.approx(sliced.x.ravel())
assert struct_grid.y[1, :, 1:3].ravel() == pytest.approx(sliced.y.ravel())
assert struct_grid.z[1, :, 1:3].ravel() == pytest.approx(sliced.z.ravel())
with pytest.raises(RuntimeError):
# fancy indexing error
struct_grid[[1, 2, 3], :, 1:3]
with pytest.raises(RuntimeError):
# incorrect number of dims error
struct_grid[:, :]
def test_invalid_init_structured():
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 2)
zrng = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(xrng, yrng, zrng)
z = z[:, :, :2]
with pytest.raises(ValueError):
grid = pyvista.StructuredGrid(x, y, z)
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', pyvista.pointset.StructuredGrid._WRITERS)
def test_save_structured(extension, binary, tmpdir, struct_grid):
filename = str(tmpdir.mkdir("tmpdir").join(f'tmp.{extension}'))
struct_grid.save(filename, binary)
grid = pyvista.StructuredGrid(filename)
assert grid.x.shape == struct_grid.y.shape
assert grid.n_cells
assert grid.points.shape == struct_grid.points.shape
grid = pyvista.read(filename)
assert grid.x.shape == struct_grid.y.shape
assert grid.n_cells
assert grid.points.shape == struct_grid.points.shape
assert isinstance(grid, pyvista.StructuredGrid)
def test_load_structured_bad_filename():
with pytest.raises(FileNotFoundError):
pyvista.StructuredGrid('not a file')
filename = os.path.join(test_path, 'test_grid.py')
with pytest.raises(IOError):
grid = pyvista.StructuredGrid(filename)
def test_instantiate_by_filename():
ex = examples
# actual mapping of example file to datatype
fname_to_right_type = {
ex.antfile: pyvista.PolyData,
ex.planefile: pyvista.PolyData,
ex.hexbeamfile: pyvista.UnstructuredGrid,
ex.spherefile: pyvista.PolyData,
ex.uniformfile: pyvista.UniformGrid,
ex.rectfile: pyvista.RectilinearGrid
}
# a few combinations of wrong type
fname_to_wrong_type = {
ex.antfile: pyvista.UnstructuredGrid, # actual data is PolyData
ex.planefile: pyvista.StructuredGrid, # actual data is PolyData
ex.rectfile: pyvista.UnstructuredGrid, # actual data is StructuredGrid
}
# load the files into the right types
for fname, right_type in fname_to_right_type.items():
data = right_type(fname)
assert data.n_points > 0
# load the files into the wrong types
for fname, wrong_type in fname_to_wrong_type.items():
with pytest.raises(ValueError):
data = wrong_type(fname)
def test_create_rectilinear_grid_from_specs():
# 3D example
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 5)
zrng = np.arange(-10, 10, 1)
grid = pyvista.RectilinearGrid(xrng)
assert grid.n_cells == 9
assert grid.n_points == 10
grid = pyvista.RectilinearGrid(xrng, yrng)
assert grid.n_cells == 9*3
assert grid.n_points == 10*4
grid = pyvista.RectilinearGrid(xrng, yrng, zrng)
assert grid.n_cells == 9*3*19
assert grid.n_points == 10*4*20
assert grid.bounds == [-10.0,8.0, -10.0,5.0, -10.0,9.0]
# 2D example
cell_spacings = np.array([1., 1., 2., 2., 5., 10.])
x_coordinates = np.cumsum(cell_spacings)
y_coordinates = np.cumsum(cell_spacings)
grid = pyvista.RectilinearGrid(x_coordinates, y_coordinates)
assert grid.n_cells == 5*5
assert grid.n_points == 6*6
assert grid.bounds == [1.,21., 1.,21., 0.,0.]
def test_create_rectilinear_after_init():
x = np.array([0,1,2])
y = np.array([0,5,8])
z = np.array([3,2,1])
grid = pyvista.RectilinearGrid()
grid.x = x
assert grid.dimensions == [3, 1, 1]
grid.y = y
assert grid.dimensions == [3, 3, 1]
grid.z = z
assert grid.dimensions == [3, 3, 3]
assert np.allclose(grid.x, x)
assert np.allclose(grid.y, y)
assert np.allclose(grid.z, z)
def test_create_rectilinear_grid_from_file():
grid = examples.load_rectilinear()
assert grid.n_cells == 16146
assert grid.n_points == 18144
assert grid.bounds == [-350.0,1350.0, -400.0,1350.0, -850.0,0.0]
assert grid.n_arrays == 1
def test_read_rectilinear_grid_from_file():
grid = pyvista.read(examples.rectfile)
assert grid.n_cells == 16146
assert grid.n_points == 18144
assert grid.bounds == [-350.0,1350.0, -400.0,1350.0, -850.0,0.0]
assert grid.n_arrays == 1
def test_read_rectilinear_grid_from_pathlib():
grid = pyvista.RectilinearGrid(pathlib.Path(examples.rectfile))
assert grid.n_cells == 16146
assert grid.n_points == 18144
assert grid.bounds == [-350.0, 1350.0, -400.0, 1350.0, -850.0, 0.0]
assert grid.n_arrays == 1
def test_cast_rectilinear_grid():
grid = pyvista.read(examples.rectfile)
structured = grid.cast_to_structured_grid()
assert isinstance(structured, pyvista.StructuredGrid)
assert structured.n_points == grid.n_points
assert structured.n_cells == grid.n_cells
assert np.allclose(structured.points, grid.points)
for k, v in grid.point_arrays.items():
assert np.allclose(structured.point_arrays[k], v)
for k, v in grid.cell_arrays.items():
assert np.allclose(structured.cell_arrays[k], v)
def test_create_uniform_grid_from_specs():
# create UniformGrid
dims = [10, 10, 10]
grid = pyvista.UniformGrid(dims) # Using default spacing and origin
assert grid.dimensions == [10, 10, 10]
assert grid.extent == [0, 9, 0, 9, 0, 9]
assert grid.origin == [0.0, 0.0, 0.0]
assert grid.spacing == [1.0, 1.0, 1.0]
spacing = [2, 1, 5]
grid = pyvista.UniformGrid(dims, spacing) # Using default origin
assert grid.dimensions == [10, 10, 10]
assert grid.origin == [0.0, 0.0, 0.0]
assert grid.spacing == [2.0, 1.0, 5.0]
origin = [10, 35, 50]
grid = pyvista.UniformGrid(dims, spacing, origin) # Everything is specified
assert grid.dimensions == [10, 10, 10]
assert grid.origin == [10.0, 35.0, 50.0]
assert grid.spacing == [2.0, 1.0, 5.0]
assert grid.dimensions == [10, 10, 10]
def test_uniform_setters():
grid = pyvista.UniformGrid()
grid.dimensions = [10, 10, 10]
assert grid.GetDimensions() == (10, 10, 10)
assert grid.dimensions == [10, 10, 10]
grid.spacing = [5, 2, 1]
assert grid.GetSpacing() == (5, 2, 1)
assert grid.spacing == [5, 2, 1]
grid.origin = [6, 27.7, 19.8]
assert grid.GetOrigin() == (6, 27.7, 19.8)
assert grid.origin == [6, 27.7, 19.8]
def test_create_uniform_grid_from_file():
grid = examples.load_uniform()
assert grid.n_cells == 729
assert grid.n_points == 1000
assert grid.bounds == [0.0,9.0, 0.0,9.0, 0.0,9.0]
assert grid.n_arrays == 2
assert grid.dimensions == [10, 10, 10]
def test_read_uniform_grid_from_file():
grid = pyvista.read(examples.uniformfile)
assert grid.n_cells == 729
assert grid.n_points == 1000
assert grid.bounds == [0.0,9.0, 0.0,9.0, 0.0,9.0]
assert grid.n_arrays == 2
assert grid.dimensions == [10, 10, 10]
def test_read_uniform_grid_from_pathlib():
grid = pyvista.UniformGrid(pathlib.Path(examples.uniformfile))
assert grid.n_cells == 729
assert grid.n_points == 1000
assert grid.bounds == [0.0, 9.0, 0.0, 9.0, 0.0, 9.0]
assert grid.n_arrays == 2
assert grid.dimensions == [10, 10, 10]
def test_cast_uniform_to_structured():
grid = examples.load_uniform()
structured = grid.cast_to_structured_grid()
assert structured.n_points == grid.n_points
assert structured.n_arrays == grid.n_arrays
assert structured.bounds == grid.bounds
def test_cast_uniform_to_rectilinear():
grid = examples.load_uniform()
rectilinear = grid.cast_to_rectilinear_grid()
assert rectilinear.n_points == grid.n_points
assert rectilinear.n_arrays == grid.n_arrays
assert rectilinear.bounds == grid.bounds
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['.vtk', '.vtr'])
def test_save_rectilinear(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join(f'tmp.{extension}'))
ogrid = examples.load_rectilinear()
ogrid.save(filename, binary)
grid = pyvista.RectilinearGrid(filename)
assert grid.n_cells == ogrid.n_cells
assert np.allclose(grid.x, ogrid.x)
assert np.allclose(grid.y, ogrid.y)
assert np.allclose(grid.z, ogrid.z)
assert grid.dimensions == ogrid.dimensions
grid = pyvista.read(filename)
assert isinstance(grid, pyvista.RectilinearGrid)
assert grid.n_cells == ogrid.n_cells
assert np.allclose(grid.x, ogrid.x)
assert np.allclose(grid.y, ogrid.y)
assert np.allclose(grid.z, ogrid.z)
assert grid.dimensions == ogrid.dimensions
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['.vtk', '.vti'])
def test_save_uniform(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join(f'tmp.{extension}'))
ogrid = examples.load_uniform()
ogrid.save(filename, binary)
grid = pyvista.UniformGrid(filename)
assert grid.n_cells == ogrid.n_cells
assert grid.origin == ogrid.origin
assert grid.spacing == ogrid.spacing
assert grid.dimensions == ogrid.dimensions
grid = pyvista.read(filename)
assert isinstance(grid, pyvista.UniformGrid)
assert grid.n_cells == ogrid.n_cells
assert grid.origin == ogrid.origin
assert grid.spacing == ogrid.spacing
assert grid.dimensions == ogrid.dimensions
def test_grid_points():
"""Test the points methods on UniformGrid and RectilinearGrid"""
# test creation of 2d grids
x = y = range(3)
z = [0,]
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
points = np.c_[xx.ravel(order='F'), yy.ravel(order='F'), zz.ravel(order='F')]
grid = pyvista.UniformGrid()
with pytest.raises(AttributeError):
grid.points = points
grid.origin = (0.0, 0.0, 0.0)
grid.dimensions = (3, 3, 1)
grid.spacing = (1, 1, 1)
assert grid.n_points == 9
assert grid.n_cells == 4
assert np.allclose(grid.points, points)
points = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
grid = pyvista.UniformGrid()
grid.dimensions = [2, 2, 2]
grid.spacing = [1, 1, 1]
grid.origin = [0., 0., 0.]
assert np.allclose(np.unique(grid.points, axis=0), np.unique(points, axis=0))
opts = np.c_[grid.x, grid.y, grid.z]
assert np.allclose(np.unique(opts, axis=0), np.unique(points, axis=0))
# Now test rectilinear grid
grid = pyvista.RectilinearGrid()
with pytest.raises(AttributeError):
grid.points = points
x, y, z = np.array([0, 1, 3]), np.array([0, 2.5, 5]), np.array([0, 1])
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
grid.x = x
grid.y = y
grid.z = z
assert grid.dimensions == [3, 3, 2]
assert np.allclose(grid.meshgrid, (xx, yy, zz))
assert np.allclose(grid.points, np.c_[xx.ravel(order='F'), yy.ravel(order='F'), zz.ravel(order='F')])
def test_grid_extract_selection_points(struct_grid):
grid = pyvista.UnstructuredGrid(struct_grid)
sub_grid = grid.extract_points([0])
assert sub_grid.n_cells == 1
sub_grid = grid.extract_points(range(100))
assert sub_grid.n_cells > 1
def test_gaussian_smooth(hexbeam):
uniform = examples.load_uniform()
active = uniform.active_scalars_name
values = uniform.active_scalars
uniform = uniform.gaussian_smooth(scalars=active)
assert uniform.active_scalars_name == active
assert uniform.active_scalars.shape == values.shape
assert not np.all(uniform.active_scalars == values)
values = uniform.active_scalars
uniform = uniform.gaussian_smooth(radius_factor=5, std_dev=1.3)
assert uniform.active_scalars_name == active
assert uniform.active_scalars.shape == values.shape
assert not np.all(uniform.active_scalars == values)
@pytest.mark.parametrize('ind', [range(10), np.arange(10),
HEXBEAM_CELLS_BOOL])
def test_remove_cells(ind, hexbeam):
grid_copy = hexbeam.copy()
grid_copy.remove_cells(ind)
assert grid_copy.n_cells < hexbeam.n_cells
@pytest.mark.parametrize('ind', [range(10), np.arange(10),
HEXBEAM_CELLS_BOOL])
def test_remove_cells_not_inplace(ind, hexbeam):
grid_copy = hexbeam.copy() # copy to protect
grid_w_removed = grid_copy.remove_cells(ind, inplace=False)
assert grid_w_removed.n_cells < hexbeam.n_cells
assert grid_copy.n_cells == hexbeam.n_cells
def test_remove_cells_invalid(hexbeam):
grid_copy = hexbeam.copy()
with pytest.raises(ValueError):
grid_copy.remove_cells(np.ones(10, np.bool_))
@pytest.mark.parametrize('ind', [range(10), np.arange(10),
STRUCTGRID_CELLS_BOOL])
def test_hide_cells(ind, struct_grid):
sgrid_copy = struct_grid.copy()
sgrid_copy.hide_cells(ind)
assert sgrid_copy.HasAnyBlankCells()
with pytest.raises(ValueError, match='Boolean array size must match'):
sgrid_copy.hide_cells(np.ones(10, dtype=np.bool))
@pytest.mark.skipif(not VTK9, reason='VTK 9 or higher is required')
def test_UnstructuredGrid_cast_to_explicit_structured_grid():
grid = examples.load_explicit_structured()
grid.hide_cells(range(80, 120))
grid = grid.cast_to_unstructured_grid()
grid = grid.cast_to_explicit_structured_grid()
assert grid.n_cells == 120
assert grid.n_points == 210
assert grid.bounds == [0.0, 80.0, 0.0, 50.0, 0.0, 6.0]
assert 'BLOCK_I' in grid.cell_arrays
assert 'BLOCK_J' in grid.cell_arrays
assert 'BLOCK_K' in grid.cell_arrays
assert 'vtkGhostType' in grid.cell_arrays
assert np.count_nonzero(grid.cell_arrays['vtkGhostType']) == 40
@pytest.mark.skipif(not VTK9, reason='VTK 9 or higher is required')
def test_ExplicitStructuredGrid_init():
grid = examples.load_explicit_structured()
assert isinstance(grid, pyvista.ExplicitStructuredGrid)
assert grid.n_cells == 120
assert grid.n_points == 210
assert grid.bounds == [0.0, 80.0, 0.0, 50.0, 0.0, 6.0]
assert repr(grid) == str(grid)
assert 'N Cells' in str(grid)
assert 'N Points' in str(grid)
assert 'N Arrays' in str(grid)
@pytest.mark.skipif(not VTK9, reason='VTK 9 or higher is required')
def test_ExplicitStructuredGrid_cast_to_unstructured_grid():
block_i = np.asarray('''
0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0
1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1
2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2
3 0 1 2 3 0 1 2 3
'''.split(), dtype=int)
block_j = np.asarray('''
0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 4 4 4 4 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 4
4 4 4 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 4 4 4 4 0 0 0 0 1 1 1 1 2 2 2 2 3 3
3 3 4 4 4 4 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 4 4 4 4 0 0 0 0 1 1 1 1 2 2 2
2 3 3 3 3 4 4 4 4
'''.split(), dtype=int)
block_k = np.asarray('''
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3
3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 5 5
5 5 5 5 5 5 5 5 5
'''.split(), dtype=int)
grid = examples.load_explicit_structured()
grid = grid.cast_to_unstructured_grid()
assert isinstance(grid, pyvista.UnstructuredGrid)
assert 'BLOCK_I' in grid.cell_arrays
assert 'BLOCK_J' in grid.cell_arrays
assert 'BLOCK_K' in grid.cell_arrays
assert np.array_equal(grid.cell_arrays['BLOCK_I'], block_i)
assert np.array_equal(grid.cell_arrays['BLOCK_J'], block_j)
assert np.array_equal(grid.cell_arrays['BLOCK_K'], block_k)
@pytest.mark.skipif(not VTK9, reason='VTK 9 or higher is required')
def test_ExplicitStructuredGrid_save():
grid = examples.load_explicit_structured()
grid.hide_cells(range(80, 120))
grid.save('grid.vtu')
grid = pyvista.ExplicitStructuredGrid('grid.vtu')
assert grid.n_cells == 120
assert grid.n_points == 210
assert grid.bounds == [0.0, 80.0, 0.0, 50.0, 0.0, 6.0]
assert np.count_nonzero(grid.cell_arrays['vtkGhostType']) == 40
os.remove('grid.vtu')
@pytest.mark.skipif(not VTK9, reason='VTK 9 or higher is required')
def test_ExplicitStructuredGrid_hide_cells():
ghost = np.asarray('''
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32
32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32
'''.split(), dtype=np.uint8)
grid = examples.load_explicit_structured()
copy = grid.hide_cells(range(80, 120), inplace=False)
assert isinstance(copy, pyvista.ExplicitStructuredGrid)
assert 'vtkGhostType' in copy.cell_arrays
assert 'vtkGhostType' not in grid.cell_arrays
assert np.array_equal(copy.cell_arrays['vtkGhostType'], ghost)
out = grid.hide_cells(range(80, 120), inplace=True)
assert out is grid
assert 'vtkGhostType' in grid.cell_arrays
assert np.array_equal(grid.cell_arrays['vtkGhostType'], ghost)
@pytest.mark.skipif(not VTK9, reason='VTK 9 or higher is required')
def test_ExplicitStructuredGrid_show_cells():
grid = examples.load_explicit_structured()
grid.hide_cells(range(80, 120), inplace=True)
copy = grid.show_cells(inplace=False)
assert isinstance(copy, pyvista.ExplicitStructuredGrid)
assert 'vtkGhostType' in copy.cell_arrays
assert np.count_nonzero(copy.cell_arrays['vtkGhostType']) == 0
assert np.count_nonzero(grid.cell_arrays['vtkGhostType']) == 40
out = grid.show_cells(inplace=True)
assert out is grid
assert np.count_nonzero(grid.cell_arrays['vtkGhostType']) == 0
@pytest.mark.skipif(not VTK9, reason='VTK 9 or higher is required')
def test_ExplicitStructuredGrid_dimensions():
grid = examples.load_explicit_structured()
assert isinstance(grid.dimensions, np.ndarray)
assert np.issubdtype(grid.dimensions.dtype, np.integer)
assert grid.dimensions.shape == (3,)
assert np.array_equal(grid.dimensions, [5, 6, 7])
@pytest.mark.skipif(not VTK9, reason='VTK 9 or higher is required')
def test_ExplicitStructuredGrid_visible_bounds():
grid = examples.load_explicit_structured()
grid.hide_cells(range(80, 120))
assert isinstance(grid.visible_bounds, list)
assert all(isinstance(x, float) for x in grid.visible_bounds)
assert len(grid.visible_bounds) == 6
assert grid.visible_bounds == [0.0, 80.0, 0.0, 50.0, 0.0, 4.0]
@pytest.mark.skipif(not VTK9, reason='VTK 9 or higher is required')
def test_ExplicitStructuredGrid_cell_id():
grid = examples.load_explicit_structured()
ind = grid.cell_id((3, 4, 0))
assert np.issubdtype(ind, np.integer)
assert ind == 19
ind = grid.cell_id([(3, 4, 0), (3, 2, 1), (1, 0, 2), (2, 3, 2)])
assert isinstance(ind, np.ndarray)
assert np.issubdtype(ind.dtype, np.integer)
assert np.array_equal(ind, [19, 31, 41, 54])
@pytest.mark.skipif(not VTK9, reason='VTK 9 or higher is required')
def test_ExplicitStructuredGrid_cell_coords():
grid = examples.load_explicit_structured()
coords = grid.cell_coords(19)
assert isinstance(coords, tuple)
assert all(np.issubdtype(c, np.integer) for c in coords)
assert coords == (3, 4, 0)
coords = grid.cell_coords((19, 31, 41, 54))
assert isinstance(coords, np.ndarray)
assert np.issubdtype(coords.dtype, np.integer)
assert np.array_equal(coords, [(3, 4, 0), (3, 2, 1), (1, 0, 2), (2, 3, 2)])
@pytest.mark.skipif(not VTK9, reason='VTK 9 or higher is required')
def test_ExplicitStructuredGrid_neighbors():
grid = examples.load_explicit_structured()
indices = grid.neighbors(0, rel='topological')
assert isinstance(indices, list)
assert all(np.issubdtype(ind, np.integer) for ind in indices)
assert indices == [1, 4, 20]
indices = grid.neighbors(0, rel='connectivity')
assert isinstance(indices, list)
assert all(np.issubdtype(ind, np.integer) for ind in indices)
assert indices == [1, 4, 20]
indices = grid.neighbors(0, rel='geometric')
assert isinstance(indices, list)
assert all(np.issubdtype(ind, np.integer) for ind in indices)
assert indices == [1, 4, 20]
@pytest.mark.skipif(not VTK9, reason='VTK 9 or higher is required')
def test_ExplicitStructuredGrid_compute_connectivity():
connectivity = np.asarray('''
42 43 43 41 46 47 47 45 46 47 47 45 46 47 47 45 38 39 39 37 58 59 59 57
62 63 63 61 62 63 63 61 62 63 63 61 54 55 55 53 58 59 59 57 62 63 63 61
62 63 63 61 62 63 63 61 54 55 55 53 58 59 59 57 62 63 63 61 62 63 63 61
62 63 63 61 54 55 55 53 58 59 59 57 62 63 63 61 62 63 63 61 62 63 63 61
54 55 55 53 26 27 27 25 30 31 31 29 30 31 31 29 30 31 31 29 22 23 23 21
'''.split(), dtype=int)
grid = examples.load_explicit_structured()
assert 'ConnectivityFlags' not in grid.cell_arrays
copy = grid.compute_connectivity(inplace=False)
assert isinstance(copy, pyvista.ExplicitStructuredGrid)
assert 'ConnectivityFlags' in copy.cell_arrays
assert 'ConnectivityFlags' not in grid.cell_arrays
assert np.array_equal(copy.cell_arrays['ConnectivityFlags'], connectivity)
out = grid.compute_connectivity(inplace=True)
assert out is grid
assert 'ConnectivityFlags' in grid.cell_arrays
assert np.array_equal(grid.cell_arrays['ConnectivityFlags'], connectivity)
@pytest.mark.skipif(not VTK9, reason='VTK 9 or higher is required')
def test_ExplicitStructuredGrid_compute_connections():
connections = np.asarray('''
3 4 4 3 4 5 5 4 4 5 5 4 4 5 5 4 3 4 4 3 4 5 5 4 5 6 6 5 5 6 6 5 5 6 6 5 4
5 5 4 4 5 5 4 5 6 6 5 5 6 6 5 5 6 6 5 4 5 5 4 4 5 5 4 5 6 6 5 5 6 6 5 5 6
6 5 4 5 5 4 4 5 5 4 5 6 6 5 5 6 6 5 5 6 6 5 4 5 5 4 3 4 4 3 4 5 5 4 4 5 5
4 4 5 5 4 3 4 4 3
'''.split(), dtype=int)
grid = examples.load_explicit_structured()
assert 'number_of_connections' not in grid.cell_arrays
copy = grid.compute_connections(inplace=False)
assert isinstance(copy, pyvista.ExplicitStructuredGrid)
assert 'number_of_connections' in copy.cell_arrays
assert 'number_of_connections' not in grid.cell_arrays
assert np.array_equal(copy.cell_arrays['number_of_connections'],
connections)
out = grid.compute_connections(inplace=True)
assert out is grid
assert 'number_of_connections' in grid.cell_arrays
assert np.array_equal(grid.cell_arrays['number_of_connections'],
connections)
| akaszynski/vtkInterface | tests/test_grid.py | Python | mit | 38,339 | [
"VTK"
] | 942873ab382b417bce8a49a1b9d3ce37db7212f88e3c35f59b28f5497696ec50 |
#!/usr/bin/env python3
from ..distributions import MultivariateNormal
from ..likelihoods import _GaussianLikelihoodBase
from .marginal_log_likelihood import MarginalLogLikelihood
class ExactMarginalLogLikelihood(MarginalLogLikelihood):
"""
The exact marginal log likelihood (MLL) for an exact Gaussian process with a
Gaussian likelihood.
.. note::
This module will not work with anything other than a :obj:`~gpytorch.likelihoods.GaussianLikelihood`
and a :obj:`~gpytorch.models.ExactGP`. It also cannot be used in conjunction with
stochastic optimization.
:param ~gpytorch.likelihoods.GaussianLikelihood likelihood: The Gaussian likelihood for the model
:param ~gpytorch.models.ExactGP model: The exact GP model
Example:
>>> # model is a gpytorch.models.ExactGP
>>> # likelihood is a gpytorch.likelihoods.Likelihood
>>> mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
>>>
>>> output = model(train_x)
>>> loss = -mll(output, train_y)
>>> loss.backward()
"""
def __init__(self, likelihood, model):
if not isinstance(likelihood, _GaussianLikelihoodBase):
raise RuntimeError("Likelihood must be Gaussian for exact inference")
super(ExactMarginalLogLikelihood, self).__init__(likelihood, model)
def forward(self, function_dist, target, *params):
r"""
Computes the MLL given :math:`p(\mathbf f)` and :math:`\mathbf y`.
:param ~gpytorch.distributions.MultivariateNormal function_dist: :math:`p(\mathbf f)`
the outputs of the latent function (the :obj:`gpytorch.models.ExactGP`)
:param torch.Tensor target: :math:`\mathbf y` The target values
:rtype: torch.Tensor
:return: Exact MLL. Output shape corresponds to batch shape of the model/input data.
"""
if not isinstance(function_dist, MultivariateNormal):
raise RuntimeError("ExactMarginalLogLikelihood can only operate on Gaussian random variables")
# Get the log prob of the marginal distribution
output = self.likelihood(function_dist, *params)
res = output.log_prob(target)
# Add additional terms (SGPR / learned inducing points, heteroskedastic likelihood models)
for added_loss_term in self.model.added_loss_terms():
res = res.add(added_loss_term.loss(*params))
# Add log probs of priors on the (functions of) parameters
for _, prior, closure, _ in self.named_priors():
res.add_(prior.log_prob(closure()).sum())
# Scale by the amount of data we have
num_data = target.size(-1)
return res.div_(num_data)
def pyro_factor(self, output, target, *params):
import pyro
mll = self(output, target, *params)
pyro.factor("gp_mll", mll)
return mll
| jrg365/gpytorch | gpytorch/mlls/exact_marginal_log_likelihood.py | Python | mit | 2,895 | [
"Gaussian"
] | fdb9dcb3e7e377fc19bc8e57fa1c3cb9e09c24d44f67803c8c62dfdccd5b881c |
"""
Tests for analysis.
"""
import VMD
from pyvmd.analysis import hydrogen_bonds, HydrogenBond
from pyvmd.atoms import Atom, Selection
from .utils import data, PyvmdTestCase
class TestAnalysis(PyvmdTestCase):
"""
Test analysis utilities.
"""
def setUp(self):
molid = VMD.molecule.load('psf', data('water.psf'), 'pdb', data('water.pdb'))
self.molid = molid
def test_hydrogen_bonds(self):
# Test `hydrogen_bonds` function
sel = Selection('noh')
result = [HydrogenBond(Atom(18), Atom(19), Atom(9)), HydrogenBond(Atom(9), Atom(11), Atom(6)),
HydrogenBond(Atom(6), Atom(7), Atom(15))]
self.assertEqual(list(hydrogen_bonds(sel)), result)
self.assertEqual(list(hydrogen_bonds(sel, sel)), result)
result = [HydrogenBond(Atom(18), Atom(19), Atom(9)), HydrogenBond(Atom(9), Atom(11), Atom(6)),
HydrogenBond(Atom(6), Atom(7), Atom(9)), HydrogenBond(Atom(6), Atom(7), Atom(15))]
self.assertEqual(list(hydrogen_bonds(sel, angle=75)), result)
self.assertEqual(list(hydrogen_bonds(sel, angle=180)), [])
self.assertEqual(list(hydrogen_bonds(sel, distance=2)), [])
# If the selections do not share same atoms, check the hydrogen bonds are returned only in correct direction
sel1 = Selection('index 6')
sel2 = Selection('index 9')
self.assertEqual(list(hydrogen_bonds(sel1, sel2, angle=75)), [HydrogenBond(Atom(6), Atom(7), Atom(9))])
| ziima/pyvmd | pyvmd/tests/test_analysis.py | Python | gpl-3.0 | 1,500 | [
"VMD"
] | 696fe63df47ae9877be44b83df8494cd0cb89f91c6652b0a50ddc3356f38e28b |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Import the relevant PTS classes and modules
from pts.core.basics.configuration import ConfigurationDefinition
# -----------------------------------------------------------------
# Configuration
definition = ConfigurationDefinition()
# Galaxy name
definition.add_required("galaxy_name", "string", "the name of the galaxy")
# Flags
definition.add_flag("iras", "include IRAS fluxes", True)
definition.add_flag("planck", "include Planck fluxes", True)
definition.add_flag("write", "write the results", True)
# -----------------------------------------------------------------
| SKIRT/PTS | dustpedia/config/get_sed.py | Python | agpl-3.0 | 899 | [
"Galaxy"
] | 7db53cf321b4f51bd627ddf5492702b7c60bef9d5f22a81a75d7d3287cd4fcba |
########################################################################
# $HeadURL$
########################################################################
""" X509Request is a class for managing X509 requests with their Pkeys
"""
__RCSID__ = "$Id$"
import GSI
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Security.X509Chain import X509Chain
class X509Request:
def __init__( self, reqObj = None, pkeyObj = None ):
self.__valid = False
self.__reqObj = reqObj
self.__pkeyObj = pkeyObj
if reqObj and pkeyObj:
self.__valid = True
# It is not used
# def setParentCerts( self, certList ):
# self.__cerList = certList
def generateProxyRequest( self, bitStrength = 1024, limited = False ) :
self.__pkeyObj = GSI.crypto.PKey()
self.__pkeyObj.generate_key( GSI.crypto.TYPE_RSA, bitStrength )
self.__reqObj = GSI.crypto.X509Req()
self.__reqObj.set_pubkey( self.__pkeyObj )
if limited:
self.__reqObj.get_subject().insert_entry( "CN", "limited proxy" )
else:
self.__reqObj.get_subject().insert_entry( "CN", "proxy" )
self.__reqObj.sign( self.__pkeyObj, "SHA256" )
self.__valid = True
def dumpRequest( self ):
"""
Get the request as a string
"""
if not self.__valid:
return S_ERROR( "No request loaded" )
try:
reqStr = GSI.crypto.dump_certificate_request( GSI.crypto.FILETYPE_PEM, self.__reqObj )
except Exception, e:
return S_ERROR( "Can't serialize request: %s" % str( e ) )
return S_OK( reqStr )
def getPKey( self ):
"""
Get PKey Internal
"""
return self.__pkeyObj
def dumpPKey( self ):
"""
Get the pkey as a string
"""
if not self.__valid:
return S_ERROR( "No request loaded" )
try:
pkeyStr = GSI.crypto.dump_privatekey( GSI.crypto.FILETYPE_PEM, self.__pkeyObj )
except Exception, e:
return S_ERROR( "Can't serialize pkey: %s" % str( e ) )
return S_OK( pkeyStr )
def dumpAll( self ):
"""
Dump the contents into a string
"""
if not self.__valid:
return S_ERROR( "No request loaded" )
try:
reqStr = GSI.crypto.dump_certificate_request( GSI.crypto.FILETYPE_PEM, self.__reqObj )
except Exception, e:
return S_ERROR( "Can't serialize request: %s" % str( e ) )
try:
pkeyStr = GSI.crypto.dump_privatekey( GSI.crypto.FILETYPE_PEM, self.__pkeyObj )
except Exception, e:
return S_ERROR( "Can't serialize pkey: %s" % str( e ) )
return S_OK( "%s%s" % ( reqStr, pkeyStr ) )
def loadAllFromString( self, pemData ):
try:
self.__reqObj = GSI.crypto.load_certificate_request( GSI.crypto.FILETYPE_PEM, pemData )
except Exception, e:
return S_ERROR( "Can't load request: %s" % str( e ) )
try:
self.__pkeyObj = GSI.crypto.load_privatekey( GSI.crypto.FILETYPE_PEM, pemData )
except Exception, e:
return S_ERROR( "Can't load pkey: %s" % str( e ) )
self.__valid = True
return S_OK()
def generateChainFromResponse( self, pemData ):
"""
Generate a X509 Chain from the pkey and the pem data passed as the argument
Return : S_OK( X509Chain ) / S_ERROR
"""
if not self.__valid:
return S_ERROR( "No request loaded" )
try:
certList = GSI.crypto.load_certificate_chain( GSI.crypto.FILETYPE_PEM, pemData )
except Exception, e:
return S_ERROR( "Can't load pem data: %s" % str( e ) )
chain = X509Chain()
chain.setChain( certList )
chain.setPKey( self.__pkeyObj )
return chain
def getSubjectDN( self ):
"""
Get subject DN
Return: S_OK( string )/S_ERROR
"""
if not self.__valid:
return S_ERROR( "No request loaded" )
return S_OK( self.__reqObj.get_subject().one_line() )
def getIssuerDN( self ):
"""
Get issuer DN
Return: S_OK( string )/S_ERROR
"""
if not self.__valid:
return S_ERROR( "No request loaded" )
return S_OK( self.__reqObj.get_issuer().one_line() )
def checkChain( self, chain ):
"""
Check that the chain matches the request
"""
if not self.__valid:
return S_ERROR( "No request loaded" )
retVal = chain.getCertInChain()
if not retVal[ 'OK' ]:
return retVal
lastCert = retVal[ 'Value' ]
chainPubKey = GSI.crypto.dump_publickey( GSI.crypto.FILETYPE_PEM, lastCert.getPublicKey()[ 'Value' ] )
reqPubKey = GSI.crypto.dump_publickey( GSI.crypto.FILETYPE_PEM, self.__pkeyObj )
if not chainPubKey == reqPubKey:
retVal = S_OK( False )
retVal[ 'Message' ] = "Public keys do not match"
return retVal
return S_OK( True )
| coberger/DIRAC | Core/Security/X509Request.py | Python | gpl-3.0 | 4,610 | [
"DIRAC"
] | 868968b2f648ebdb6b1f1f3aeb1cb6b4e49664f76e4f1e30104210e9d61c7522 |
import unittest
import os
from collections import defaultdict
from mpinterfaces.utils import *
from pymatgen import Structure
from pymatgen.io.vasp.inputs import Kpoints
from pymatgen.symmetry.bandstructure import HighSymmKpath
__author__ = "Michael Ashton"
__copyright__ = "Copyright 2017, Henniggroup"
__maintainer__ = "Michael Ashton"
__email__ = "ashtonmv@gmail.com"
__status__ = "Production"
__date__ = "March 3, 2017"
ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", "mat2d", "stability", "tests")
)
class UtilsTest(unittest.TestCase):
def test_is_converged(self):
false_control = is_converged(ROOT)
true_control = is_converged(os.path.join(ROOT, 'BiTeCl'))
self.assertTrue(true_control)
self.assertFalse(false_control)
def test_ensure_vacuum_for_SiP(self):
# Compound test for add_vacuum and get_spacing.
os.chdir(ROOT)
structure = Structure.from_file('POSCAR_SiP')
structure = ensure_vacuum(structure, vacuum=15)
self.assertAlmostEqual(get_spacing(structure), 15.0)
def test_get_magmom_string_for_FeCl2(self):
os.chdir(ROOT)
structure = Structure.from_file('POSCAR_FeCl2')
test_string = get_magmom_string(structure)
self.assertEqual(test_string, u'1*6.0 2*0.5')
def test_get_rotation_matrix(self):
test_matrix = get_rotation_matrix([0, 0, 1], 2*np.pi)
control_matrix = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
for i in range(3):
for j in range(3):
self.assertAlmostEqual(test_matrix[i][j], control_matrix[i][j])
def test_align_c_axis_for_non_aligned_structure(self):
os.chdir(ROOT)
structure = Structure.from_file('POSCAR_SF6')
structure = align_axis(structure, 'c', (0, 0, 1))
control_axis = [9.04099732e-13, -2.42627092e-13, 8.3829076073038635]
for i in range(3):
self.assertAlmostEqual(structure.lattice.matrix[2][i],
control_axis[i])
def test_align_c_axis_for_already_aligned_structure(self):
os.chdir(ROOT)
control_axis = [0, 0, 23.4186286267]
structure = Structure.from_file('BiTeCl/POSCAR')
structure = align_axis(structure, 'c', (0, 0, 1))
for i in range(3):
self.assertTrue(abs(
structure.lattice.matrix[2][i] - control_axis[i]
) < 0.0001)
def test_get_structure_type_for_conventional_material(self):
os.chdir(ROOT)
structure = Structure.from_file('POSCAR_Fe')
test_type = get_structure_type(structure)
self.assertEqual(test_type, 'conventional')
def test_get_structure_type_for_layered_material(self):
os.chdir(ROOT)
structure = Structure.from_file('POSCAR_FeCl2')
test_type = get_structure_type(structure)
self.assertEqual(test_type, 'layered')
def test_get_structure_type_for_1D_material(self):
pass # I still need to find one of these...
def test_get_structure_type_for_0D_material(self):
os.chdir(ROOT)
structure = Structure.from_file('POSCAR_O2')
test_type = get_structure_type(structure)
self.assertEqual(test_type, 'molecular')
def test_write_circle_mesh_kpoints(self):
os.chdir(ROOT)
write_circle_mesh_kpoints()
test_file = open('KPOINTS')
test_lines = test_file.readlines()
control_file = open('circle_mesh_KPOINTS')
control_lines = control_file.readlines()
self.assertEqual(test_lines, control_lines)
os.system('rm KPOINTS')
test_file.close()
control_file.close()
def test_get_markovian_path(self):
points = ((0, 0), (1, 1), (1, 0), (0, 1))
control_points = ((0, 0), (1, 0), (1, 1), (0, 1))
test_points = get_markovian_path(points)
for i in range(len(control_points)):
self.assertEqual(test_points[i], control_points[i])
def test_remove_z_kpoints(self):
os.chdir(os.path.join(ROOT, 'BiTeCl'))
structure = Structure.from_file('POSCAR')
kpath = HighSymmKpath(structure)
Kpoints.automatic_linemode(20, kpath).write_file('KPOINTS')
remove_z_kpoints()
test_file = open('KPOINTS')
test_lines = test_file.readlines()
print (test_lines)
control_file = open('../BiTeCl_control/KPOINTS')
control_lines = control_file.readlines()
print (control_lines)
self.assertEqual(test_lines, control_lines)
os.system('rm KPOINTS')
test_file.close()
control_file.close()
def test_get_run_cmmnd(self):
os.chdir(os.path.join(ROOT, '../../../../'))
QUEUE_SYSTEM='slurm'
trial_output = get_run_cmmnd()
correct_output = (defaultdict(None, {'account': None, 'mem': None, \
'walltime': '10:00:00', 'nodes': 1, 'pre_rocket': None, 'job_name': None, \
'ntasks': 16, 'email': None, 'rocket_launch': None}),None)
self.assertEqual(trial_output, correct_output)
if __name__ == '__main__':
unittest.main()
| henniggroup/MPInterfaces | mpinterfaces/tests/test_utils.py | Python | mit | 5,130 | [
"VASP",
"pymatgen"
] | f3cd9d0a786fafcf6e4d458a68b2025cb64482dbbece08a3c9fbcb951b8b1794 |
# Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
.. math:: Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi}\frac{(n-m)!}{(n+m)!}} e^{i m \theta} P^m_n(\cos(\phi))
Parameters
----------
m : int
``|m| <= n``; the order of the harmonic.
n : int
where `n` >= 0; the degree of the harmonic. This is often called
``l`` (lower case L) in descriptions of spherical harmonics.
theta : float
[0, 2*pi]; the azimuthal (longitudinal) coordinate.
phi : float
[0, pi]; the polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at `theta` and `phi`
Notes
-----
There are different conventions for the meaning of input arguments
`theta` and `phi`. We take `theta` to be the azimuthal angle and
`phi` to be the polar angle. It is common to see the opposite
convention - that is `theta` as the polar angle and `phi` as the
azimuthal angle.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30. http://dlmf.nist.gov/14.30
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "airy",
r"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
Ai, Aip, Bi, Bip : ndarrays
Airy functions Ai and Bi, and their derivatives Aip and Bip.
Notes
-----
The Airy functions Ai and Bi are two independent solutions of
.. math:: y''(x) = x y(x).
For real `z` in [-10, 10], the computation is carried out by calling
the Cephes [1]_ `airy` routine, which uses power series summation
for small `z` and rational minimax approximations for large `z`.
Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
employed. They are computed using power series for :math:`|z| < 1` and
the following relations to modified Bessel functions for larger `z`
(where :math:`t \equiv 2 z^{3/2}/3`):
.. math::
Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
See also
--------
airye : exponentially scaled Airy functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/.org/amos/
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs((2.0/3.0*z*sqrt(z)).real))
eBip = Bip * exp(-abs((2.0/3.0*z*sqrt(z)).real))
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
eAi, eAip, eBi, eBip : array_like
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
See also
--------
airy
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "bdtr",
r"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `k` of the Binomial probability density.
.. math::
\mathrm{bdtr}(k, n, p) = \sum_{j=0}^k {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int).
p : array_like
Probability of success in a single event (float).
Returns
-------
y : ndarray
Probability of `k` or fewer successes in `n` independent events with
success probabilities of `p`.
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtr}(k, n, p) = I_{1 - p}(n - k, k + 1).
Wrapper for the Cephes [1]_ routine `bdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrc",
r"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms `k + 1` through `n` of the binomial probability density,
.. math::
\mathrm{bdtrc}(k, n, p) = \sum_{j=k+1}^n {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int)
p : array_like
Probability of success in a single event.
Returns
-------
y : ndarray
Probability of `k + 1` or more successes in `n` independent events
with success probabilities of `p`.
See also
--------
bdtr
betainc
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtrc}(k, n, p) = I_{p}(k + 1, n - k).
Wrapper for the Cephes [1]_ routine `bdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to `bdtr` with respect to `p`.
Finds the event probability `p` such that the sum of the terms 0 through
`k` of the binomial probability density is equal to the given cumulative
probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
n : array_like
Number of events (float)
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
Returns
-------
p : ndarray
The event probability such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
betaincinv
Notes
-----
The computation is carried out using the inverse beta integral function
and the relation,::
1 - p = betaincinv(n - k, k + 1, y).
Wrapper for the Cephes [1]_ routine `bdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to `bdtr` with respect to `k`.
Finds the number of successes `k` such that the sum of the terms 0 through
`k` of the Binomial probability density for `n` events with probability
`p` is equal to the given cumulative probability `y`.
Parameters
----------
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
n : array_like
Number of events (float).
p : array_like
Success probability (float).
Returns
-------
k : ndarray
The number of successes `k` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `k` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `k`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to `bdtr` with respect to `n`.
Finds the number of events `n` such that the sum of the terms 0 through
`k` of the Binomial probability density for events with probability `p` is
equal to the given cumulative probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
p : array_like
Success probability (float).
Returns
-------
n : ndarray
The number of events `n` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `n` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `n`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
""")
add_newdoc("scipy.special", "btdtria",
r"""
btdtria(p, b, x)
Inverse of `btdtr` with respect to `a`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `a`, returning the value of `a` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
p : array_like
Cumulative probability, in [0, 1].
b : array_like
Shape parameter (`b` > 0).
x : array_like
The quantile, in [0, 1].
Returns
-------
a : ndarray
The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtrib : Inverse with respect to `b`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "btdtrib",
r"""
btdtria(a, p, x)
Inverse of `btdtr` with respect to `b`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `b`, returning the value of `b` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
p : array_like
Cumulative probability, in [0, 1].
x : array_like
The quantile, in [0, 1].
Returns
-------
b : ndarray
The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtria : Inverse with respect to `a`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function `bei`
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function `ber`
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a, b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to `x`::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute `x` such that betainc(a, b, x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(a, b)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
r"""
btdtr(a, b, x)
Cumulative density function of the beta distribution.
Returns the integral from zero to `x` of the beta probability density
function,
.. math::
I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
Shape parameter (a > 0).
b : array_like
Shape parameter (b > 0).
x : array_like
Upper limit of integration, in [0, 1].
Returns
-------
I : ndarray
Cumulative density function of the beta distribution with parameters
`a` and `b` at `x`.
See Also
--------
betainc
Notes
-----
This function is identical to the incomplete beta integral function
`betainc`.
Wrapper for the Cephes [1]_ routine `btdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "btdtri",
r"""
btdtri(a, b, p)
The `p`-th quantile of the beta distribution.
This function is the inverse of the beta cumulative distribution function,
`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
b : array_like
Shape parameter (`b` > 0).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
See Also
--------
betaincinv
btdtr
Notes
-----
The value of `x` is found by interval halving or Newton iterations.
Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent
problem of finding the inverse of the incomplete beta integral.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of `x`
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to `x`) of the Chi
square probability density function with `v` degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v, x)
Chi square survival function
Returns the area under the right hand tail (from `x` to
infinity) of the Chi square probability density function with `v`
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v, p)
Inverse to `chdtrc`
Returns the argument x such that ``chdtrc(v, x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtri(p, x)
Inverse to `chdtr` vs `v`
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to `chndtr` vs `x`
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to `chndtr` vs `df`
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to `chndtr` vs `nc`
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2), t=0..x).
See Also
--------
wofz, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-15, 15, num=1000)
>>> plt.plot(x, special.dawsn(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$dawsn(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpe`.
For `m > 0` the computation uses the approximation,
.. math:: E(m) \\approx P(1-m) - (1-m) \\log(1-m) Q(1-m),
where :math:`P` and :math:`Q` are tenth-order polynomials. For
`m < 0`, the relation
.. math:: E(m) = E(m/(m - 1)) \\sqrt(1-m)
is used.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellie`.
Computation uses arithmetic-geometric means algorithm.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter `m` between
0 and 1, and real argument `u`.
Parameters
----------
m : array_like
Parameter.
u : array_like
Argument.
Returns
-------
sn, cn, dn, ph : ndarrays
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value `ph` is such that if `u = ellipk(ph, m)`,
then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpj`.
These functions are periodic, with quarter-period on the real axis
equal to the complete elliptic integral `ellipk(m)`.
Relation to incomplete elliptic integral: If `u = ellipk(phi,m)`, then
`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called
the amplitude of `u`.
Computation is by means of the arithmetic-geometric mean algorithm,
except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`
close to 1, the approximation applies only for `phi < pi/2`.
See also
--------
ellipk : Complete elliptic integral of the first kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around `m` = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as `m = 1 - p`.
Returns
-------
K : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpk`.
For `p <= 1`, computation uses the approximation,
.. math:: K(p) \\approx P(p) - \\log(p) Q(p),
where :math:`P` and :math:`Q` are tenth-order polynomials. The
argument `p` is used internally rather than `m` so that the logarithmic
singularity at `m = 1` will be shifted to the origin; this preserves
maximum accuracy. For `p > 1`, the identity
.. math:: K(p) = K(1/p)/\\sqrt(p)
is used.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
This function is also called `F(phi, m)`.
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
Wrapper for the Cephes [1]_ routine `ellik`. The computation is
carried out using the arithmetic-geometric mean algorithm.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points `x`.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points `x`.
See Also
--------
erfc, erfinv, erfcinv, wofz, erfcx, erfi
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erf(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erf(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, ``1 - erf(x)``.
See Also
--------
erf, erfi, erfcx, dawsn, wofz
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfc(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfc(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, ``-i erf(i z)``.
See Also
--------
erf, erfc, erfcx, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfi(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfi(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, ``exp(x**2) * erfc(x)``.
See Also
--------
erf, erfc, erfi, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfcx(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfcx(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "eval_jacobi",
"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_gegenbauer",
"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyt",
"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev T polynomial at a point.
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebys",
"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev S polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyc",
"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev C polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev T polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_legendre",
"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_legendre",
"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_genlaguerre",
"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_laguerre",
"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermite",
"""
eval_hermite(n, x, out=None)
Evaluate Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermitenorm",
"""
eval_hermitenorm(n, x, out=None)
Evaluate normalized Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t, t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t, t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer `n` and non-negative `x` and
`n`::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
expm1
.. versionadded:: 0.17.0
""")
add_newdoc("scipy.special", "fdtr",
r"""
fdtr(dfn, dfd, x)
F cumulative distribution function.
Returns the value of the cumulative density function of the
F-distribution, also known as Snedecor's F-distribution or the
Fisher-Snedecor distribution.
The F-distribution with parameters :math:`d_n` and :math:`d_d` is the
distribution of the random variable,
.. math::
X = \frac{U_n/d_n}{U_d/d_d},
where :math:`U_n` and :math:`U_d` are random variables distributed
:math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,
respectively.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).
Wrapper for the Cephes [1]_ routine `fdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtrc",
r"""
fdtrc(dfn, dfd, x)
F survival function.
Returns the complemented F-distribution function (the integral of the
density from `x` to infinity).
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The complemented F-distribution function with parameters `dfn` and
`dfd` at `x`.
See also
--------
fdtr
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).
Wrapper for the Cephes [1]_ routine `fdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtri",
r"""
fdtri(dfn, dfd, p)
The `p`-th quantile of the F-distribution.
This function is the inverse of the F-distribution CDF, `fdtr`, returning
the `x` such that `fdtr(dfn, dfd, x) = p`.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
Notes
-----
The computation is carried out using the relation to the inverse
regularized beta function, :math:`I^{-1}_x(a, b)`. Let
:math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,
.. math::
x = \frac{d_d (1 - z)}{d_n z}.
If `p` is such that :math:`x < 0.5`, the following relation is used
instead for improved stability: let
:math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,
.. math::
x = \frac{d_d z'}{d_n (1 - z')}.
Wrapper for the Cephes [1]_ routine `fdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to `fdtr` vs dfd
Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to `fdtr` vs dfn
finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2), t=0..z)
csa = integral(cos(pi/2 * t**2), t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function.
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
"""
gammainc(a, x)
Incomplete gamma function
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammaincc",
"""
gammaincc(a, x)
Complemented incomplete gamma integral
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=x..inf) = 1 - gammainc(a, x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a, y)
Inverse to `gammaincc`
Returns `x` such that ``gammaincc(a, x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to `gammainc`
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "_gammaln",
"""
Internal function, use ``gammaln`` instead.
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
loggamma
""")
add_newdoc("scipy.special", "gdtr",
r"""
gdtr(a, b, x)
Gamma distribution cumulative density function.
Returns the integral from zero to `x` of the gamma probability density
function,
.. math::
F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (upper limit of integration; float).
See also
--------
gdtrc : 1 - CDF of the gamma distribution.
Returns
-------
F : ndarray
The CDF of the gamma distribution with parameters `a` and `b`
evaluated at `x`.
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtrc",
r"""
gdtrc(a, b, x)
Gamma distribution survival function.
Integral from `x` to infinity of the gamma probability density function,
.. math::
F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (lower limit of integration; float).
Returns
-------
F : ndarray
The survival function of the gamma distribution with parameters `a`
and `b` evaluated at `x`.
See Also
--------
gdtr, gdtri
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of `gdtr` vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of `gdtr` vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of `gdtr` vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `x` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `x`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
r"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the first kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
See also
--------
hankel1e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel1e",
r"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2",
r"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
See also
--------
hankel2e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2e",
r"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel2e(v, z) = hankel2(v, z) * exp(1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2}))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp0f1",
r"""
hyp0f1(v, x)
Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}.
It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`,
and satisfies the differential equation :math:`f''(z) + vf'(z) = f(z)`.
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
r"""
i0(x)
Modified Bessel function of order 0.
Defined as,
.. math::
I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x),
where :math:`J_0` is the Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i0`.
See also
--------
iv
i0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 0
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i0`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i0e`.
See also
--------
iv
i0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1",
r"""
i1(x)
Modified Bessel function of order 1.
Defined as,
.. math::
I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!}
= -\imath J_1(\imath x),
where :math:`J_1` is the Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i1`.
See also
--------
iv
i1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 1
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i1`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i1e`.
See also
--------
iv
i1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
r"""
it2struve0(x)
Integral related to the Struve function of order 0.
Returns the integral,
.. math::
\int_x^\infty \frac{H_0(t)}{t}\,dt
where :math:`H_0` is the Struve function of order 0.
Parameters
----------
x : array_like
Lower limit of integration.
Returns
-------
I : ndarray
The value of the integral.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integrals of Airy functions from 0 to `x`.
Parameters
----------
x: array_like
Upper limit of integration (float).
Returns
-------
Apt
Integral of Ai(t) from 0 to x.
Bpt
Integral of Bi(t) from 0 to x.
Ant
Integral of Ai(-t) from 0 to x.
Bnt
Integral of Bi(-t) from 0 to x.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order modified
Bessel functions `i0` and `k0`.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order Bessel
functions `j0` and `y0`.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
r"""
itmodstruve0(x)
Integral of the modified Struve function of order 0.
.. math::
I = \int_0^x L_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`L_0` from 0 to `x`.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "itstruve0",
r"""
itstruve0(x)
Integral of the Struve function of order 0.
.. math::
I = \int_0^x H_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`H_0` from 0 to `x`.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "iv",
r"""
iv(v, z)
Modified Bessel function of the first kind of real order.
Parameters
----------
v : array_like
Order. If `z` is of real type and negative, `v` must be integer
valued.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the modified Bessel function.
Notes
-----
For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out
using Temme's method [1]_. For larger orders, uniform asymptotic
expansions are applied.
For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is
called. It uses a power series for small `z`, the asymptitic expansion
for large `abs(z)`, the Miller algorithm normalized by the Wronskian
and a Neumann series for intermediate magnitudes, and the uniform
asymptitic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large
orders. Backward recurrence is used to generate sequences or reduce
orders when necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
See also
--------
kve : This function with leading exponential behavior stripped off.
References
----------
.. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "ive",
r"""
ive(v, z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v, z) = iv(v, z) * exp(-abs(z.real))
Parameters
----------
v : array_like of float
Order.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the exponentially scaled modified Bessel function.
Notes
-----
For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a
power series for small `z`, the asymptitic expansion for large
`abs(z)`, the Miller algorithm normalized by the Wronskian and a
Neumann series for intermediate magnitudes, and the uniform asymptitic
expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.
Backward recurrence is used to generate sequences or reduce orders when
necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "j0",
r"""
j0(x)
Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval the following rational approximation is used:
.. math::
J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)},
where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of
:math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3
and 8, respectively.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `j0`.
See also
--------
jv : Bessel function of real order and complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 24 term Chebyshev expansion is used. In the second, the
asymptotic trigonometric representation is employed using two rational
functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `j1`.
See also
--------
jv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order and real argument.
Notes
-----
`jn` is an alias of `jv`.
See also
--------
jv
""")
add_newdoc("scipy.special", "jv",
r"""
jv(v, z)
Bessel function of the first kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the Bessel function, :math:`J_v(z)`.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(n\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-n\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
jve : :math:`J_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "jve",
r"""
jve(v, z)
Exponentially scaled Bessel function of order `v`.
Defined as::
jve(v, z) = jv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(n\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-n\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "k0",
r"""
k0(x)
Modified Bessel function of the second kind of order 0, :math:`K_0`.
This function is also sometimes referred to as the modified Bessel
function of the third kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
K : ndarray
Value of the modified Bessel function :math:`K_0` at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0`.
See also
--------
kv
k0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0e`.
See also
--------
kv
k0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1",
"""
k1(x)
Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the modified Bessel function K of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1`.
See also
--------
kv
k1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1e`.
See also
--------
kv
k1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at `x`. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in `x` and `y`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "kn",
r"""
kn(n, x)
Modified Bessel function of the second kind of integer order `n`
Returns the modified Bessel function of the second kind for integer order
`n` at real `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
n : array_like of int
Order of Bessel functions (floats will truncate with a warning)
z : array_like of float
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kv : Same function, but accepts real order and complex argument
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kn
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in range(6):
... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kn([4, 5, 6], 1)
array([ 44.23241585, 360.9605896 , 3653.83831186])
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
r"""
kv(v, z)
Modified Bessel function of the second kind of real order `v`
Returns the modified Bessel function of the second kind for real order
`v` at complex `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions. They are defined as those solutions
of the modified Bessel equation for which,
.. math::
K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x)
as :math:`x \to \infty` [3]_.
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results. Note that input must be of complex type to get complex
output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kve : This function with leading exponential behavior stripped off.
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
.. [3] NIST Digital Library of Mathematical Functions,
Eq. 10.25.E3. http://dlmf.nist.gov/10.25.E3
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kv
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in np.linspace(0, 6, 5):
... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kv([4, 4.5, 5], 1+2j)
array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])
""")
add_newdoc("scipy.special", "kve",
r"""
kve(v, z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order `v` at
complex `z`::
kve(v, z) = kv(v, z) * exp(z)
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The exponentially scaled modified Bessel function of the second kind.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when `x` is near zero
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
"""
lpmv(m, v, x)
Associated legendre function of integer order.
Parameters
----------
m : int
Order
v : float
Degree.
x : float
Argument. Must be ``|x| <= 1``.
Returns
-------
res : float
The value of the function.
See Also
--------
lpmn : Similar, but computes values for all orders 0..m and degrees 0..n.
clpmn : Similar to `lpmn` but allows a complex argument.
Notes
-----
It is possible to extend the domain of this function to all
complex m, v, x, but this is not yet implemented.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m, q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m, q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m, q, x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of ce_m(x, q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m, q, x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x, q), of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of se_m(x, q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
r"""
modstruve(v, x)
Modified Struve function.
Return the value of the modified Struve function of order `v` at `x`. The
modified Struve function is defined as,
.. math::
L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(x),
where :math:`H_v` is the Struve function.
Parameters
----------
v : array_like
Order of the modified Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
L : ndarray
Value of the modified Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
struve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "nbdtr",
r"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function.
Returns the sum of the terms 0 through `k` of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that `k` or fewer failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k` or fewer failures before `n` successes in a
sequence of events with individual success probability `p`.
See also
--------
nbdtrc
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).
Wrapper for the Cephes [1]_ routine `nbdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrc",
r"""
nbdtrc(k, n, p)
Negative binomial survival function.
Returns the sum of the terms `k + 1` to infinity of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that more than `k` failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k + 1` or more failures before `n` successes in a
sequence of events with individual success probability `p`.
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).
Wrapper for the Cephes [1]_ routine `nbdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of `nbdtr` vs `p`.
Returns the inverse with respect to the parameter `p` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
Returns
-------
p : ndarray
Probability of success in a single event (float) such that
`nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `nbdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrik",
r"""
nbdtrik(y, n, p)
Inverse of `nbdtr` vs `k`.
Returns the inverse with respect to the parameter `k` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
k : ndarray
The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `k` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `k`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "nbdtrin",
r"""
nbdtrin(k, y, p)
Inverse of `nbdtr` vs `n`.
Returns the inverse with respect to the parameter `n` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
p : array_like
Probability of success in a single event (float).
Returns
-------
n : ndarray
The number of successes `n` such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `n` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `n`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "ncfdtr",
r"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
The non-central F describes the distribution of,
.. math::
Z = \frac{X/d_n}{Y/d_d}
where :math:`X` and :math:`Y` are independently distributed, with
:math:`X` distributed non-central :math:`\chi^2` with noncentrality
parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`
distributed :math:`\chi^2` with :math:`d_d` degrees of freedom.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.
The cumulative distribution function is computed using Formula 26.6.20 of
[2]_:
.. math::
F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}),
where :math:`I` is the regularized incomplete beta function, and
:math:`x = f d_n/(f d_n + d_d)`.
The computation time required for this routine is proportional to the
noncentrality parameter `nc`. Very large values of this parameter can
consume immense computer resources. This is why the search range is
bounded by 10,000.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central `t` distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
r"""
ndtr(x)
Gaussian cumulative distribution function.
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`
.. math::
\frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
log_ndtr
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function.
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the log of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
ndtr
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of `ndtr` vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to `x`)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m, n, c, x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m, n, c, x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v, x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a, x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a, x) in w and the
derivative, W'(a, x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first `k` terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k, y)
Inverse to `pdtr` vs m
Returns the Poisson variable `m` such that the sum from 0 to `k` of
the Poisson density is equal to the given probability `y`:
calculated by gammaincinv(k+1, y). `k` must be a nonnegative
integer and `y` between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p, m)
Inverse to `pdtr` vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m, n, c, x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m, n, c, cv, x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m, n, c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m, n, c, x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m, n, c, x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z, out=None)
The digamma function.
The logarithmic derivative of the gamma function evaluated at ``z``.
Parameters
----------
z : array_like
Real or complex argument.
out : ndarray, optional
Array for the computed values of ``psi``.
Returns
-------
digamma : ndarray
Computed values of ``psi``.
Notes
-----
For large values not close to the negative real axis ``psi`` is
computed using the asymptotic series (5.11.2) from [1]_. For small
arguments not close to the negative real axis the recurrence
relation (5.5.2) from [1]_ is used until the argument is large
enough to use the asymptotic series. For values close to the
negative real axis the reflection formula (5.5.4) from [1]_ is
used first. Note that ``psi`` has a family of zeros on the
negative real axis which occur between the poles at nonpositive
integers. Around the zeros the reflection formula suffers from
cancellation and the implementation loses precision. The sole
positive zero and the first negative zero, however, are handled
separately by precomputing series expansions using [2]_, so the
function should maintain full accuracy around the origin.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/5
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to `x` as a double precision floating
point result. If `x` ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
"""
shichi(x)
Hyperbolic sine and cosine integrals
Returns
-------
shi
``integral(sinh(t)/t, t=0..x)``
chi
``eul + ln x + integral((cosh(t)-1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sici",
"""
sici(x)
Sine and cosine integrals
Returns
-------
si
``integral(sin(t)/t, t=0..x)``
ci
``eul + ln x + integral((cos(t) - 1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on `n` samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to `smirnov`
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
r"""
spence(z)
Spence's function, also known as the dilogarithm. It is defined to
be
.. math::
\int_0^z \frac{\log(t)}{1 - t}dt
for complex :math:`z`, where the contour of integration is taken
to avoid the branch cut of the logarithm. Spence's function is
analytic everywhere except the negative real axis where it has a
branch cut.
Note that there is a different convention which defines Spence's
function by the integral
.. math::
-\int_0^z \frac{\log(1 - t)}{t}dt;
this is our ``spence(1 - z)``.
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df, t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p, t)
Inverse of `stdtr` vs df
Returns the argument df such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df, p)
Inverse of `stdtr` vs `t`
Returns the argument `t` such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "struve",
r"""
struve(v, x)
Struve function.
Return the value of the Struve function of order `v` at `x`. The Struve
function is defined as,
.. math::
H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})},
where :math:`\Gamma` is the gamma function.
Parameters
----------
v : array_like
Order of the Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
H : ndarray
Value of the Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the Struve function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
modstruve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2) * erfc(-i*z)
See Also
--------
dawsn, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.wofz(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$wofz(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
r"""
y0(x)
Bessel function of the second kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval a rational approximation :math:`R(x)` is employed to
compute,
.. math::
Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi},
where :math:`J_0` is the Bessel function of the first kind of order 0.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `y0`.
See also
--------
j0
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 25 term Chebyshev expansion is used, and computing
:math:`J_1` (the Bessel function of the first kind) is required. In the
second, the asymptotic trigonometric representation is employed using two
rational functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `y1`.
See also
--------
j1
yn
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yn",
r"""
yn(n, x)
Bessel function of the second kind of integer order and real argument.
Parameters
----------
n : array_like
Order (integer).
z : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function, :math:`Y_n(x)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `yn`.
The function is evaluated by forward recurrence on `n`, starting with
values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,
the routine for `y0` or `y1` is called directly.
See also
--------
yv : For real order and real or complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yv",
r"""
yv(v, z)
Bessel function of the second kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind, :math:`Y_v(x)`.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
yve : :math:`Y_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "yve",
r"""
yve(v, z)
Exponentially scaled Bessel function of the second kind of real order.
Returns the exponentially scaled Bessel function of the second
kind of real order `v` at complex `z`::
yve(v, z) = yv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "_zeta",
"""
_zeta(x, q)
Internal function, Hurwitz zeta.
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using Bessel function series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_spherical_jn",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_jn_d",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn_d",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_in",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_in_d",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_kn",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "_spherical_kn_d",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "loggamma",
r"""
loggamma(z, out=None)
Principal branch of the logarithm of the Gamma function. It is
defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and
extended to the complex plane by analytic continuation. The
implementation here is based on [hare1997]_.
The function has a single branch cut on the negative real axis and
is taken to be continuous when approaching the axis from
above. Note that it is not generally true that
:math:`\log\Gamma(z) = \log(\Gamma(z))`, though the real parts of
the functions do agree. The benefit of not defining ``loggamma``
as :math:`\log(\Gamma(z))` is that the latter function has a
complicated branch cut structure whereas ``loggamma`` is analytic
except for on the negative real axis.
The identities
.. math::
\exp(\log\Gamma(z)) &= \Gamma(z) \\
\log\Gamma(z + 1) &= \log(z) + \log\Gamma(z)
make ``loggama`` useful for working in complex logspace. However,
``loggamma`` necessarily returns complex outputs for real inputs,
so if you want to work only with real numbers use `gammaln`. On
the real line the two functions are related by ``exp(loggamma(x))
= gammasgn(x)*exp(gammaln(x))``, though in practice rounding
errors will introduce small spurious imaginary components in
``exp(loggamma(x))``.
.. versionadded:: 0.18.0
Parameters
----------
z : array-like
Values in the complex plain at which to compute ``loggamma``
out : ndarray, optional
Output array for computed values of ``loggamma``
Returns
-------
loggamma : ndarray
Values of ``loggamma`` at z.
See also
--------
gammaln : logarithm of the absolute value of the Gamma function
gammasgn : sign of the gamma function
References
----------
.. [hare1997] D.E.G. Hare,
*Computing the Principal Branch of log-Gamma*,
Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.
""")
add_newdoc("scipy.special", "_sinpi",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_cospi",
"""
Internal function, do not use.
""")
| Newman101/scipy | scipy/special/add_newdocs.py | Python | bsd-3-clause | 137,537 | [
"Gaussian"
] | 7f350a55004c900411fd76ce837d318c80e8383d6b651cf2b087ade763709284 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Opera browser history parsers."""
import unittest
from plaso.parsers import opera
from tests.parsers import test_lib
class OperaTypedParserTest(test_lib.ParserTestCase):
"""Tests for the Opera Typed History parser."""
def testParse(self):
"""Tests the Parse function."""
parser = opera.OperaTypedHistoryParser()
storage_writer = self._ParseFile(['typed_history.xml'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 4)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'opera:history:typed_entry',
'entry_selection': 'Filled from autocomplete.',
'timestamp': '2013-11-11 23:45:27.000000',
'url': 'plaso.kiddaland.net'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
expected_event_values = {
'data_type': 'opera:history:typed_entry',
'entry_selection': 'Manually typed.',
'timestamp': '2013-11-11 22:46:07.000000',
'url': 'theonion.com'}
self.CheckEventValues(storage_writer, events[3], expected_event_values)
class OperaGlobalParserTest(test_lib.ParserTestCase):
"""Tests for the Opera Global History parser."""
def testParseFile(self):
"""Read a history file and run a few tests."""
parser = opera.OperaGlobalHistoryParser()
storage_writer = self._ParseFile(['global_history.dat'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 37)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'opera:history:entry',
'description': 'First and Only Visit',
'timestamp': '2013-11-11 22:45:46.000000',
'title': 'Karl Bretaprins fær ellilífeyri - mbl.is',
'url': (
'http://www.mbl.is/frettir/erlent/2013/11/11/'
'karl_bretaprins_faer_ellilifeyri/')}
self.CheckEventValues(storage_writer, events[4], expected_event_values)
expected_event_values = {
'data_type': 'opera:history:entry',
'timestamp': '2013-11-11 22:45:55.000000'}
self.CheckEventValues(storage_writer, events[10], expected_event_values)
expected_event_values = {
'data_type': 'opera:history:entry',
'timestamp': '2013-11-11 22:46:16.000000',
'title': (
'10 Celebrities You Never Knew Were Abducted And Murdered '
'By Andie MacDowell | The Onion - America\'s Finest News Source')}
self.CheckEventValues(storage_writer, events[16], expected_event_values)
if __name__ == '__main__':
unittest.main()
| Onager/plaso | tests/parsers/opera.py | Python | apache-2.0 | 2,745 | [
"VisIt"
] | 4151e7ca3beb4f97ec1881484dc540c065a9ab511f329ce54fc7bc621dd2ae6a |
from __future__ import print_function
import numpy as np
np.seterr(divide='ignore', invalid='ignore')
from orphics import maps
from pixell import enmap, utils
from scipy.special import factorial
try:
from pixell import lensing as enlensing
except:
print("WARNING: Couldn't load pixell lensing. Some features will be unavailable.")
from scipy.integrate import simps
from scipy.interpolate import splrep,splev
from scipy.fftpack import fftshift,ifftshift,fftfreq
from scipy.interpolate import interp1d
from pixell.fft import fft,ifft
from orphics.stats import bin2D
import time
from six.moves import cPickle as pickle
from orphics import stats
import os,sys
from pyfisher import get_lensing_nl as get_nl
def validate_geometry(shape,wcs,verbose=False):
area = enmap.area(shape,wcs)*(180./np.pi)**2.
if verbose: print("Geometry area : ", area, " sq.deg.")
if area>41252.:
print("WARNING: Geometry has area larger than full-sky.")
print(shape,wcs)
if area<(1./60./60.):
print("WARNING: Geometry has area less than 1 arcmin^2.")
print(shape,wcs)
res = np.rad2deg(maps.resolution(shape,wcs))
if verbose: print("Geometry pixel width : ", res*60., " arcmin.")
if res>30.0:
print("WARNING: Geometry has pixel larger than 30 degrees.")
print(shape,wcs)
if res<(1./60./60.):
print("WARNING: Geometry has pixel smaller than 1 arcsecond.")
print(shape,wcs)
def binned_nfw(mass,z,conc,cc,shape,wcs,bin_edges_arcmin,lmax=None,lmin=None,overdensity=200.,
critical=False,at_cluster_z=True,kmask=None,
sigma_mis=None,improved=True,hm=None,exclude_2h=False):
# mass in msolar/h
# cc Cosmology object
modrmap = enmap.modrmap(shape,wcs)
binner = bin2D(modrmap,bin_edges_arcmin*np.pi/180./60.)
if improved:
thetas = np.linspace(bin_edges_arcmin.min(),bin_edges_arcmin.max(),101) * utils.arcmin
Ms = [mass]
concs = [conc]
zsource = 1100
sig = sigma_mis*utils.arcmin if sigma_mis is not None else None
k1h = hm.kappa_1h_profiles(thetas,Ms,concs,zsource,sig_theta=sig,delta=overdensity,rho='critical' if critical else 'mean',rho_at_z=at_cluster_z)
k2h = hm.kappa_2h_profiles(thetas,Ms,zsource,delta=overdensity,rho='critical' if critical else 'mean',rho_at_z=at_cluster_z,lmin=2,lmax=10000) if not(exclude_2h) else np.asarray(k1h).T*0
k1h[~np.isfinite(k1h)] = 0
k1h = np.asarray(k1h[0])
k2h = k2h[:,0]
k = enmap.enmap(maps.interp(thetas,k1h+k2h)(modrmap),wcs)
else:
k = nfw_kappa(mass,modrmap,cc,zL=z,concentration=conc,overdensity=overdensity,critical=critical,atClusterZ=at_cluster_z)
if kmask is None: kmask = maps.mask_kspace(shape,wcs,lmin=lmin,lmax=lmax)
kf = maps.filter_map(k,kmask)
cents,k1d = binner.bin(kf)
return cents,k1d
def fit_nfw_profile(profile_data,profile_cov,masses,z,conc,cc,shape,wcs,bin_edges_arcmin,lmax,lmin=None,
overdensity=200.,critical=False,at_cluster_z=True,
mass_guess=2e14,sigma_guess=2e13,kmask=None,sigma_mis=None,improved=True):
"""
Returns
lnlikes - actual lnlike as function of masses
like_fit - gaussian fit as function of masses
fit_mass - fit mass
mass_err - fit mass err
"""
from orphics.stats import fit_gauss
cinv = np.linalg.inv(profile_cov)
lnlikes = []
fprofiles = []
if improved:
import hmvec
ks = np.geomspace(1e-2,10,200)
ms = np.geomspace(1e8,4e15,102)
zs = [z]
hm = hmvec.HaloModel(zs,ks,ms=ms,params={},mass_function="tinker",
halofit=None,mdef='mean',nfw_numeric=False,skip_nfw=True)
else:
hm = None
for mass in masses:
cents,profile_theory = binned_nfw(mass,z,conc,cc,shape,wcs,bin_edges_arcmin,lmax,lmin,
overdensity,critical,at_cluster_z,kmask=kmask,
sigma_mis=sigma_mis,improved=improved,hm=hm)
diff = profile_data - profile_theory
fprofiles.append(profile_theory)
lnlike = -0.5 * np.dot(np.dot(diff,cinv),diff)
lnlikes.append(lnlike)
fit_mass,mass_err,_,_ = fit_gauss(masses,np.exp(lnlikes),mu_guess=mass_guess,sigma_guess=sigma_guess)
gaussian = lambda t,mu,sigma: np.exp(-(t-mu)**2./2./sigma**2.)/np.sqrt(2.*np.pi*sigma**2.)
like_fit = gaussian(masses,fit_mass,mass_err)
cents,fit_profile = binned_nfw(fit_mass,z,conc,cc,shape,wcs,bin_edges_arcmin,lmax,lmin,
overdensity,critical,at_cluster_z,kmask=kmask,
sigma_mis=sigma_mis,improved=improved,hm=hm)
return np.array(lnlikes),np.array(like_fit),fit_mass,mass_err,fprofiles,fit_profile
def mass_estimate(kappa_recon,kappa_noise_2d,mass_guess,concentration,z):
"""Given a cutout kappa map centered on a cluster and a redshift,
returns a mass estimate and variance of the estimate by applying a matched filter.
Imagine that reliable richness estimates and redshifts exist for each cluster.
We split the sample into n richness bins.
We go through each bin. This bin has a mean richness and a mean redshift. We convert this to a fiducial mean mass and mean concentration.
We choose a template with this mean mass and mean concentration.
We do a reconstruction on each cluster for each array. We now have a 2D kappa measurement. We apply MF to this with the template.
We get a relative amplitude for each cluster, which we convert to a mass for each cluster.
We want a mean mass versus mean richness relationship.
Step 1
Loop through each cluster.
Cut out patches from each array split-coadd and split.
Estimate noise spectrum from splits for each array.
Use noise spectra to get optimal coadd of all arrays and splits of coadds.
Use coadd for reconstruction, with coadd noise from splits in weights.
For gradient, use Planck.
Save reconstructions and 2d kappa noise to disk.
Repeat above for 100x random locations and save only mean to disk.
Postprocess by loading each reconstruction, subtract meanfield, crop out region where taper**2 < 1 with some threshold.
Verify above by performing on simulations to check that cluster profile is recovered.
Step 2
For each richness bin, use fiducial mass and concentration and mean redshift to choose template.
For each cluster in each richness bin, apply MF and get masses. Find mean mass in bin. Iterate above on mass until converged.
This provides a mean mass, concentration for each richness bin.
"""
shape,wcs = kappa_recon.shape,kappa_recon.wcs
mf = maps.MatchedFilter(shape,wcs,template,kappa_noise_2d)
mf.apply(kappa_recon,kmask=kmask)
def flat_taylens(phi,imap,taylor_order = 5):
"""
Lens a map imap with lensing potential phi
using the Taylens algorithm up to taylor_order.
The original routine is from Thibaut Louis.
It has been modified here to work with pixell.
"""
f = lambda x: enmap.fft(x,normalize='phys')
invf = lambda x: enmap.ifft(x,normalize='phys')
def binomial(n,k):
"Compute n factorial by a direct multiplicative method"
if k > n-k: k = n-k # Use symmetry of Pascal's triangle
accum = 1
for i in range(1,k+1):
accum *= (n - (k - i))
accum /= i
return accum
kmap = f(phi)
Ny,Nx = phi.shape
ly,lx = enmap.laxes(phi.shape,phi.wcs)
ly_array,lx_array = phi.lmap()
alphaX=np.real(invf(1j*lx_array*kmap))
alphaY=np.real(invf(1j*ly_array*kmap))
iy,ix = np.mgrid[0:Ny,0:Nx]
py,px = enmap.pixshape(phi.shape,phi.wcs)
alphaX0 = np.array(np.round(alphaX/ px),dtype='int64')
alphaY0 = np.array(np.round(alphaY/ py),dtype='int64')
delta_alphaX=alphaX-alphaX0*px
delta_alphaY=alphaY-alphaY0*py
lensed_T_Map = imap.copy()
cont = imap.copy()
lensed_T_Map = imap[(iy+alphaY0)%Ny, (ix+alphaX0)%Nx]
kmap=f(imap)
for n in range(1,taylor_order):
cont[:]=0
for k in range(n+1):
fac=1j**n*binomial(n,k)*lx_array**(n-k)*ly_array**k/(factorial(n))
T_add=np.real(invf(fac*kmap))[(iy+alphaY0)%Ny, (ix+alphaX0)%Nx]*delta_alphaX**(n-k)*delta_alphaY**k
lensed_T_Map[:] += T_add
cont[:] += T_add
return lensed_T_Map
def alpha_from_kappa(kappa=None,posmap=None,phi=None):
if phi is None:
phi,_ = kappa_to_phi(kappa,kappa.modlmap(),return_fphi=True)
shape,wcs = phi.shape,phi.wcs
else:
shape,wcs = phi.shape,phi.wcs
grad_phi = enmap.grad(phi)
if posmap is None: posmap = enmap.posmap(shape,wcs)
pos = posmap + grad_phi
alpha_pix = enmap.sky2pix(shape,wcs,pos, safe=False)
return alpha_pix
class FlatLensingSims(object):
def __init__(self,shape,wcs,theory,beam_arcmin,noise_uk_arcmin,noise_e_uk_arcmin=None,noise_b_uk_arcmin=None,pol=False,fixed_lens_kappa=None):
# assumes theory in uK^2
from orphics import cosmology
if len(shape)<3 and pol: shape = (3,)+shape
if noise_e_uk_arcmin is None: noise_e_uk_arcmin = np.sqrt(2.)*noise_uk_arcmin
if noise_b_uk_arcmin is None: noise_b_uk_arcmin = noise_e_uk_arcmin
self.modlmap = enmap.modlmap(shape,wcs)
Ny,Nx = shape[-2:]
lmax = self.modlmap.max()
ells = np.arange(0,lmax,1)
ps_cmb = cosmology.power_from_theory(ells,theory,lensed=False,pol=pol)
self.mgen = maps.MapGen(shape,wcs,ps_cmb)
if fixed_lens_kappa is not None:
self._fixed = True
self.kappa = fixed_lens_kappa
self.alpha = alpha_from_kappa(self.kappa)
else:
self._fixed = False
ps_kk = theory.gCl('kk',self.modlmap).reshape((1,1,Ny,Nx))
self.kgen = maps.MapGen(shape[-2:],wcs,ps_kk)
self.posmap = enmap.posmap(shape[-2:],wcs)
self.ps_kk = ps_kk
self.kbeam = maps.gauss_beam(self.modlmap,beam_arcmin)
ncomp = 3 if pol else 1
ps_noise = np.zeros((ncomp,ncomp,Ny,Nx))
ps_noise[0,0] = (noise_uk_arcmin*np.pi/180./60.)**2.
if pol:
ps_noise[1,1] = (noise_e_uk_arcmin*np.pi/180./60.)**2.
ps_noise[2,2] = (noise_b_uk_arcmin*np.pi/180./60.)**2.
self.ngen = maps.MapGen(shape,wcs,ps_noise)
self.ps_noise = ps_noise
def update_kappa(self,kappa):
self.kappa = kappa
self.alpha = alpha_from_kappa(self.kappa)
def get_unlensed(self,seed=None):
return self.mgen.get_map(seed=seed)
def get_kappa(self,seed=None):
return self.kgen.get_map(seed=seed)
def get_sim(self,seed_cmb=None,seed_kappa=None,seed_noise=None,lens_order=5,return_intermediate=False,skip_lensing=False,cfrac=None):
unlensed = self.get_unlensed(seed_cmb)
if skip_lensing:
lensed = unlensed
kappa = enmap.samewcs(lensed.copy()[0]*0,lensed)
else:
if not(self._fixed):
kappa = self.get_kappa(seed_kappa)
self.kappa = kappa
self.alpha = alpha_from_kappa(kappa,posmap=self.posmap)
else:
kappa = None
assert seed_kappa is None
lensed = enlensing.displace_map(unlensed, self.alpha, order=lens_order)
beamed = maps.filter_map(lensed,self.kbeam)
noise_map = self.ngen.get_map(seed=seed_noise)
observed = beamed + noise_map
if return_intermediate:
return [ maps.get_central(x,cfrac) for x in [unlensed,kappa,lensed,beamed,noise_map,observed] ]
else:
return maps.get_central(observed,cfrac)
def lens_cov_pol(shape,wcs,iucov,alpha_pix,lens_order=5,kbeam=None,npixout=None,comm=None):
"""Given the pix-pix covariance matrix for the unlensed CMB,
returns the lensed covmat for a given pixel displacement model.
ucov -- (ncomp,ncomp,Npix,Npix) array where Npix = Ny*Nx
alpha_pix -- (2,Ny,Nx) array of lensing displacements in pixel units
kbeam -- (Ny,Nx) array of 2d beam wavenumbers
"""
from pixell import lensing as enlensing
assert iucov.ndim==4
ncomp = iucov.shape[0]
assert ncomp==iucov.shape[1]
assert 1 <= ncomp <= 3
if len(shape)==2: shape = (1,)+shape
n = shape[-2]
assert n==shape[-1]
ucov = iucov.copy()
ucov = np.transpose(ucov,(0,2,1,3))
ucov = ucov.reshape((ncomp*n**2,ncomp*n**2))
npix = ncomp*n**2
if comm is None:
from orphics import mpi
comm = mpi.MPI.COMM_WORLD
def efunc(vec):
unlensed = enmap.enmap(vec.reshape(shape),wcs)
lensed = enlensing.displace_map(unlensed, alpha_pix, order=lens_order)
if kbeam is not None: lensed = maps.filter_map(lensed,kbeam) # TODO: replace with convolution
# because for ~(60x60) arrays, it is probably much faster. >1 threads means worse performance
# with FFTs for these array sizes.
return np.asarray(lensed).reshape(-1)
Scov = np.zeros(ucov.shape,dtype=ucov.dtype)
for i in range(comm.rank, npix, comm.size):
Scov[i,:] = efunc(ucov[i,:])
Scov2 = utils.allreduce(Scov, comm)
Scov = np.zeros(ucov.shape,dtype=ucov.dtype)
for i in range(comm.rank, npix, comm.size):
Scov[:,i] = efunc(Scov2[:,i])
Scov = utils.allreduce(Scov, comm)
Scov = Scov.reshape((ncomp,n*n,ncomp,n*n))
if (npixout is not None) and (npixout!=n):
Scov = Scov.reshape((ncomp,n,n,ncomp,n,n))
s = n//2-npixout//2
e = s + npixout
Scov = Scov[:,s:e,s:e,:,s:e,s:e].reshape((ncomp,npixout**2,ncomp,npixout**2))
Scov = np.transpose(Scov,(0,2,1,3))
return Scov
def lensing_noise(ells,ntt,nee,nbb,
ellmin_t,ellmin_e,ellmin_b,
ellmax_t,ellmax_e,ellmax_b,
bin_edges,
camb_theory_file_root=None,
estimators = ['TT'],
delens = False,
theory=None,
dimensionless=False,
unlensed_equals_lensed=True,
grad_cut=None,
ellmin_k = None,
ellmax_k = None,
y_ells=None,y_ntt=None,y_nee=None,y_nbb=None,
y_ellmin_t=None,y_ellmin_e=None,y_ellmin_b=None,
y_ellmax_t=None,y_ellmax_e=None,y_ellmax_b=None,
lxcut_t=None,lycut_t=None,y_lxcut_t=None,y_lycut_t=None,
lxcut_e=None,lycut_e=None,y_lxcut_e=None,y_lycut_e=None,
lxcut_b=None,lycut_b=None,y_lxcut_b=None,y_lycut_b=None,
width_deg=5.,px_res_arcmin=1.0,shape=None,wcs=None,bigell=9000):
from orphics import cosmology, stats
if theory is None: theory = cosmology.loadTheorySpectraFromCAMB(camb_theory_file_root,unlensedEqualsLensed=False,
useTotal=False,TCMB = 2.7255e6,lpad=9000,get_dimensionless=False)
if y_ells is None: y_ells=ells
if y_ntt is None: y_ntt=ntt
if y_nee is None: y_nee=nee
if y_nbb is None: y_nbb=nbb
if y_ellmin_t is None: y_ellmin_t=ellmin_t
if y_ellmin_e is None: y_ellmin_e=ellmin_e
if y_ellmin_b is None: y_ellmin_b=ellmin_b
if y_ellmax_t is None: y_ellmax_t=ellmax_t
if y_ellmax_e is None: y_ellmax_e=ellmax_e
if y_ellmax_b is None: y_ellmax_b=ellmax_b
if ellmin_k is None: ellmin_k = bin_edges.min() #min(ellmin_t,ellmin_e,ellmin_b,y_ellmin_t,y_ellmin_e,y_ellmin_b)
if ellmax_k is None: ellmax_k = bin_edges.max() #max(ellmax_t,ellmax_e,ellmax_b,y_ellmax_t,y_ellmax_e,y_ellmax_b)
pol = False if estimators==['TT'] else True
if ells.ndim==2:
assert shape is None
assert wcs is None
modlmap = ells
shape = modlmap.shape
wcs = modlmap.wcs
validate_geometry(shape,wcs,verbose=True)
nTX = ntt
nTY = y_ntt
nEX = nee
nEY = y_nee
nBX = nbb
nBY = y_nbb
else:
if (shape is None) or (wcs is None):
shape,wcs = maps.rect_geometry(width_deg=width_deg,px_res_arcmin=px_res_arcmin)
modlmap = enmap.modlmap(shape,wcs)
nTX = maps.interp(ells,ntt)(modlmap)
nTY = maps.interp(ells,y_ntt)(modlmap)
nEX = maps.interp(ells,nee)(modlmap)
nEY = maps.interp(ells,y_nee)(modlmap)
nBX = maps.interp(ells,nbb)(modlmap)
nBY = maps.interp(ells,y_nbb)(modlmap)
kmask_TX = maps.mask_kspace(shape,wcs,lmin=ellmin_t,lmax=ellmax_t,lxcut=lxcut_t,lycut=lycut_t)
kmask_TY = maps.mask_kspace(shape,wcs,lmin=y_ellmin_t,lmax=y_ellmax_t,lxcut=y_lxcut_t,lycut=y_lycut_t)
kmask_EX = maps.mask_kspace(shape,wcs,lmin=ellmin_e,lmax=ellmax_e,lxcut=lxcut_e,lycut=lycut_e)
kmask_EY = maps.mask_kspace(shape,wcs,lmin=y_ellmin_e,lmax=y_ellmax_e,lxcut=y_lxcut_e,lycut=y_lycut_e)
kmask_BX = maps.mask_kspace(shape,wcs,lmin=ellmin_b,lmax=ellmax_b,lxcut=lxcut_b,lycut=lycut_b)
kmask_BY = maps.mask_kspace(shape,wcs,lmin=y_ellmin_b,lmax=y_ellmax_b,lxcut=y_lxcut_b,lycut=y_lycut_b)
kmask_K = maps.mask_kspace(shape,wcs,lmin=ellmin_k,lmax=ellmax_k)
qest = Estimator(shape,wcs,
theory,
theorySpectraForNorm=theory,
noiseX2dTEB=[nTX,nEX,nBX],
noiseY2dTEB=[nTY,nEY,nBY],
noiseX_is_total = False,
noiseY_is_total = False,
fmaskX2dTEB=[kmask_TX,kmask_EX,kmask_BX],
fmaskY2dTEB=[kmask_TY,kmask_EY,kmask_BY],
fmaskKappa=kmask_K,
kBeamX = None,
kBeamY = None,
doCurl=False,
TOnly=not(pol),
halo=True,
gradCut=grad_cut,
verbose=False,
loadPickledNormAndFilters=None,
savePickledNormAndFilters=None,
uEqualsL=unlensed_equals_lensed,
bigell=bigell,
mpi_comm=None,
lEqualsU=False)
nlkks = {}
nsum = 0.
for est in estimators:
nlkk2d = qest.N.Nlkk[est]
ls,nlkk = stats.bin_in_annuli(nlkk2d, modlmap, bin_edges)
nlkks[est] = nlkk.copy()
nsum += np.nan_to_num(kmask_K/nlkk2d)
nmv = np.nan_to_num(kmask_K/nsum)
nlkks['mv'] = stats.bin_in_annuli(nmv, modlmap, bin_edges)[1]
return ls,nlkks,theory,qest
def lens_cov(shape,wcs,ucov,alpha_pix,lens_order=5,kbeam=None,bshape=None):
"""Given the pix-pix covariance matrix for the unlensed CMB,
returns the lensed covmat for a given pixel displacement model.
ucov -- (Npix,Npix) array where Npix = Ny*Nx
alpha_pix -- (2,Ny,Nx) array of lensing displacements in pixel units
kbeam -- (Ny,Nx) array of 2d beam wavenumbers
"""
from pixell import lensing as enlensing
Scov = ucov.copy()
for i in range(ucov.shape[0]):
unlensed = enmap.enmap(Scov[i,:].copy().reshape(shape),wcs)
lensed = enlensing.displace_map(unlensed, alpha_pix, order=lens_order)
if kbeam is not None: lensed = maps.filter_map(lensed,kbeam)
Scov[i,:] = lensed.ravel()
for j in range(ucov.shape[1]):
unlensed = enmap.enmap(Scov[:,j].copy().reshape(shape),wcs)
lensed = enlensing.displace_map(unlensed, alpha_pix, order=lens_order)
if kbeam is not None: lensed = maps.filter_map(lensed,kbeam)
Scov[:,j] = lensed.ravel()
if (bshape is not None) and (bshape!=shape):
ny,nx = shape
Scov = Scov.reshape((ny,nx,ny,nx))
bny,bnx = bshape
sy = ny//2-bny//2
ey = sy + bny
sx = nx//2-bnx//2
ex = sx + bnx
Scov = Scov[sy:ey,sx:ex,sy:ey,sx:ex].reshape((np.prod(bshape),np.prod(bshape)))
return Scov
def beam_cov(ucov,kbeam):
"""Given the pix-pix covariance matrix for the lensed CMB,
returns the beamed covmat. The beam can be a ratio of beams to
readjust the beam in a given matrix.
ucov -- (Npix,Npix) array where Npix = Ny*Nx
kbeam -- (Ny,Nx) array of 2d beam wavenumbers
"""
Scov = ucov.copy()
wcs = ucov.wcs
shape = kbeam.shape[-2:]
for i in range(Scov.shape[0]):
lensed = enmap.enmap(Scov[i,:].copy().reshape(shape) ,wcs)
lensed = maps.filter_map(lensed,kbeam)
Scov[i,:] = lensed.ravel()
for j in range(Scov.shape[1]):
lensed = enmap.enmap(Scov[:,j].copy().reshape(shape),wcs)
lensed = maps.filter_map(lensed,kbeam)
Scov[:,j] = lensed.ravel()
return Scov
def qest(shape,wcs,theory,noise2d=None,beam2d=None,kmask=None,noise2d_P=None,kmask_P=None,kmask_K=None,pol=False,grad_cut=None,unlensed_equals_lensed=False,bigell=9000,noise2d_B=None,noiseX_is_total=False,noiseY_is_total=False):
# if beam2d is None, assumes input maps are beam deconvolved and noise2d is beam deconvolved
# otherwise, it beam deconvolves itself
if noise2d is None: noise2d = np.zeros(shape[-2:])
if noise2d_P is None: noise2d_P = 2.*noise2d
if noise2d_B is None: noise2d_B = noise2d_P
if beam2d is None: beam2d = np.ones(shape[-2:])
return Estimator(shape,wcs,
theory,
theorySpectraForNorm=theory,
noiseX2dTEB=[noise2d,noise2d_P,noise2d_B],
noiseY2dTEB=[noise2d,noise2d_P,noise2d_B],
noiseX_is_total = noiseX_is_total,
noiseY_is_total = noiseY_is_total,
fmaskX2dTEB=[kmask,kmask_P,kmask_P],
fmaskY2dTEB=[kmask,kmask_P,kmask_P],
fmaskKappa=kmask_K,
kBeamX = beam2d,
kBeamY = beam2d,
doCurl=False,
TOnly=not(pol),
halo=True,
gradCut=grad_cut,
verbose=False,
loadPickledNormAndFilters=None,
savePickledNormAndFilters=None,
uEqualsL=unlensed_equals_lensed,
bigell=bigell,
mpi_comm=None,
lEqualsU=False)
def kappa_to_phi(kappa,modlmap,return_fphi=False):
fphi = enmap.samewcs(kappa_to_fphi(kappa,modlmap),kappa)
phi = enmap.samewcs(ifft(fphi,axes=[-2,-1],normalize=True).real, kappa)
if return_fphi:
return phi, fphi
else:
return phi
def kappa_to_fphi(kappa,modlmap):
return fkappa_to_fphi(fft(kappa,axes=[-2,-1]),modlmap)
def fkappa_to_fphi(fkappa,modlmap):
kmap = np.nan_to_num(2.*fkappa/modlmap/(modlmap+1.))
kmap[modlmap<2.] = 0.
return kmap
def fillLowEll(ells,cls,ellmin):
# Fill low ells with the same value
low_index = np.where(ells>ellmin)[0][0]
lowest_ell = ells[low_index]
lowest_val = cls[low_index]
fill_ells = np.arange(2,lowest_ell,1)
new_ells = np.append(fill_ells,ells[low_index:])
fill_cls = np.array([lowest_val]*len(fill_ells))
new_cls = np.append(fill_cls,cls[low_index:])
return new_ells,new_cls
def sanitizePower(Nlbinned):
Nlbinned[Nlbinned<0.] = np.nan
# fill nans with interp
ok = ~np.isnan(Nlbinned)
xp = ok.ravel().nonzero()[0]
fp = Nlbinned[~np.isnan(Nlbinned)]
x = np.isnan(Nlbinned).ravel().nonzero()[0]
Nlbinned[np.isnan(Nlbinned)] = np.interp(x, xp, fp)
return Nlbinned
def getMax(polComb,tellmax,pellmax):
if polComb=='TT':
return tellmax
elif polComb in ['EE','EB']:
return pellmax
else:
return max(tellmax,pellmax)
class QuadNorm(object):
def __init__(self,shape,wcs,gradCut=None,verbose=False,bigell=9000,kBeamX=None,kBeamY=None,fmask=None):
self.shape = shape
self.wcs = wcs
self.verbose = verbose
self.Ny,self.Nx = shape[-2:]
self.lxMap,self.lyMap,self.modLMap,thetaMap,lx,ly = maps.get_ft_attributes(shape,wcs)
self.lxHatMap = self.lxMap*np.nan_to_num(1. / self.modLMap)
self.lyHatMap = self.lyMap*np.nan_to_num(1. / self.modLMap)
self.fmask = fmask
if kBeamX is not None:
self.kBeamX = kBeamX
else:
self.kBeamX = 1.
if kBeamY is not None:
self.kBeamY = kBeamY
else:
self.kBeamY = 1.
self.uClNow2d = {}
self.uClFid2d = {}
self.lClFid2d = {}
self.noiseXX2d = {}
self.noiseYY2d = {}
self.fMaskXX = {}
self.fMaskYY = {}
self.lmax_T=bigell
self.lmax_P=bigell
self.defaultMaskT = maps.mask_kspace(self.shape,self.wcs,lmin=2,lmax=self.lmax_T)
self.defaultMaskP = maps.mask_kspace(self.shape,self.wcs,lmin=2,lmax=self.lmax_P)
#del lx
#del ly
self.thetaMap = thetaMap
self.lx = lx
self.ly = ly
self.bigell=bigell #9000.
if gradCut is not None:
self.gradCut = gradCut
else:
self.gradCut = bigell
self.Nlkk = {}
self.pixScaleY,self.pixScaleX = enmap.pixshape(shape,wcs)
self.noiseX_is_total = False
self.noiseY_is_total = False
def fmask_func(self,arr,mask):
arr[mask<1.e-3] = 0.
return arr
def addUnlensedFilter2DPower(self,XY,power2dData):
'''
XY = TT, TE, EE, EB or TB
power2d is a flipper power2d object
These Cls belong in the Wiener filters, and will not
be perturbed if/when calculating derivatives.
'''
self.uClFid2d[XY] = power2dData.copy()+0.j
def addUnlensedNorm2DPower(self,XY,power2dData):
'''
XY = TT, TE, EE, EB or TB
power2d is a flipper power2d object
These Cls belong in the CMB normalization, and will
be perturbed if/when calculating derivatives.
'''
self.uClNow2d[XY] = power2dData.copy()+0.j
def addLensedFilter2DPower(self,XY,power2dData):
'''
XY = TT, TE, EE, EB or TB
power2d is a flipper power2d object
These Cls belong in the Wiener filters, and will not
be perturbed if/when calculating derivatives.
'''
self.lClFid2d[XY] = power2dData.copy()+0.j
def addNoise2DPowerXX(self,XX,power2dData,fourierMask=None,is_total=False):
'''
Noise power for the X leg of the quadratic estimator
XX = TT, EE, BB
power2d is a flipper power2d object
fourierMask is an int array that is 0 where noise is
infinite and 1 where not. It should be in the same
fftshift state as power2d.powerMap
'''
# check if fourier mask is int!
self.noiseX_is_total = is_total
self.noiseXX2d[XX] = power2dData.copy()+0.j
if fourierMask is not None:
self.noiseXX2d[XX][fourierMask==0] = np.inf
self.fMaskXX[XX] = fourierMask
else:
if XX=='TT':
self.noiseXX2d[XX][self.defaultMaskT==0] = np.inf
else:
self.noiseXX2d[XX][self.defaultMaskP==0] = np.inf
def addNoise2DPowerYY(self,YY,power2dData,fourierMask=None,is_total=False):
'''
Noise power for the Y leg of the quadratic estimator
XX = TT, EE, BB
power2d is a flipper power2d object
fourierMask is an int array that is 0 where noise is
infinite and 1 where not. It should be in the same
fftshift state as power2d.powerMap
'''
# check if fourier mask is int!
self.noiseY_is_total = is_total
self.noiseYY2d[YY] = power2dData.copy()+0.j
if fourierMask is not None:
self.noiseYY2d[YY][fourierMask==0] = np.inf
self.fMaskYY[YY] = fourierMask
else:
if YY=='TT':
self.noiseYY2d[YY][self.defaultMaskT==0] = np.inf
else:
self.noiseYY2d[YY][self.defaultMaskP==0] = np.inf
def addClkk2DPower(self,power2dData):
'''
Fiducial Clkk power
Used if delensing
power2d is a flipper power2d object
'''
self.clkk2d = power2dData.copy()+0.j
self.clpp2d = 0.j+np.nan_to_num(self.clkk2d.copy()*4./(self.modLMap**2.)/((self.modLMap+1.)**2.))
def WXY(self,XY):
X,Y = XY
if Y=='B': Y='E'
gradClXY = X+Y
if XY=='ET': gradClXY = 'TE'
if XY=='BE': gradClXY = 'EE'
totnoise = self.noiseXX2d[X+X].copy() if self.noiseX_is_total else (self.lClFid2d[X+X].copy()*self.kBeamX**2.+self.noiseXX2d[X+X].copy())
W = self.fmask_func(np.nan_to_num(self.uClFid2d[gradClXY].copy()/totnoise)*self.kBeamX,self.fMaskXX[X+X])
W[self.modLMap>self.gradCut]=0.
if X=='T':
W[np.where(self.modLMap >= self.lmax_T)] = 0.
else:
W[np.where(self.modLMap >= self.lmax_P)] = 0.
# debug_edges = np.arange(400,6000,50)
# import orphics.tools.stats as stats
# import orphics.tools.io as io
# binner = stats.bin2D(self.modLMap,debug_edges)
# cents,ws = binner.bin(W)
# pl = io.Plotter()
# pl.add(cents,ws)
# pl.done("ws.png")
# sys.exit()
return W
def WY(self,YY):
assert YY[0]==YY[1]
totnoise = self.noiseYY2d[YY].copy() if self.noiseY_is_total else (self.lClFid2d[YY].copy()*self.kBeamY**2.+self.noiseYY2d[YY].copy())
W = self.fmask_func(np.nan_to_num(1./totnoise)*self.kBeamY,self.fMaskYY[YY]) #* self.modLMap # !!!!!
W[np.where(self.modLMap >= self.lmax_T)] = 0.
if YY[0]=='T':
W[np.where(self.modLMap >= self.lmax_T)] = 0.
else:
W[np.where(self.modLMap >= self.lmax_P)] = 0.
# debug_edges = np.arange(400,6000,50)
# import orphics.tools.stats as stats
# import orphics.tools.io as io
# io.quickPlot2d(np.fft.fftshift(W.real),"wy2d.png")
# binner = stats.bin2D(self.modLMap,debug_edges)
# cents,ws = binner.bin(W.real)
# print cents
# print ws
# pl = io.Plotter()#scaleY='log')
# pl.add(cents,ws)
# pl._ax.set_xlim(2,6000)
# pl.done("wy.png")
# sys.exit()
return W
def getCurlNlkk2d(self,XY,halo=False):
raise NotImplementedError
def super_dumb_N0_TTTT(self,data_power_2d_TT):
ratio = np.nan_to_num(data_power_2d_TT*self.WY("TT")/self.kBeamY)
lmap = self.modLMap
replaced = np.nan_to_num(self.getNlkk2d("TT",halo=True,l1Scale=self.fmask_func(ratio,self.fMaskXX["TT"]),l2Scale=self.fmask_func(ratio,self.fMaskYY["TT"]),setNl=False) / (2. * np.nan_to_num(1. / lmap/(lmap+1.))))
unreplaced = self.Nlkk["TT"].copy()
return np.nan_to_num(unreplaced**2./replaced)
def super_dumb_N0_EEEE(self,data_power_2d_EE):
ratio = np.nan_to_num(data_power_2d_EE*self.WY("EE")/self.kBeamY)
lmap = self.modLMap
replaced = np.nan_to_num(self.getNlkk2d("EE",halo=True,l1Scale=self.fmask_func(ratio,self.fMaskXX["EE"]),l2Scale=self.fmask_func(ratio,self.fMaskYY["EE"]),setNl=False) / (2. * np.nan_to_num(1. / lmap/(lmap+1.))))
unreplaced = self.Nlkk["EE"].copy()
return np.nan_to_num(unreplaced**2./replaced)
def getNlkk2d(self,XY,halo=True,l1Scale=1.,l2Scale=1.,setNl=True):
if not(halo): raise NotImplementedError
lx,ly = self.lxMap,self.lyMap
lmap = self.modLMap
X,Y = XY
XX = X+X
YY = Y+Y
if self.verbose:
print(("Calculating norm for ", XY))
h=0.
allTerms = []
if XY == 'TT':
clunlenTTArrNow = self.uClNow2d['TT'].copy()
if halo:
WXY = self.WXY('TT')*self.kBeamX*l1Scale
WY = self.WY('TT')*self.kBeamY*l2Scale
preG = WY
rfact = 2.**0.25
for ell1,ell2 in [(lx,lx),(ly,ly),(rfact*lx,rfact*ly)]:
preF = ell1*ell2*clunlenTTArrNow*WXY
preFX = ell1*WXY
preGX = ell2*clunlenTTArrNow*WY
calc = ell1*ell2*fft(ifft(preF,axes=[-2,-1],normalize=True)*ifft(preG,axes=[-2,-1],normalize=True)+ifft(preFX,axes=[-2,-1],normalize=True)*ifft(preGX,axes=[-2,-1],normalize=True),axes=[-2,-1])
allTerms += [calc]
else:
clunlenTTArr = self.uClFid2d['TT'].copy()
preG = self.WY('TT') #np.nan_to_num(1./cltotTTArrY)
cltotTTArrX = np.nan_to_num(clunlenTTArr/self.WXY('TT'))
cltotTTArrY = np.nan_to_num(1./self.WY('TT'))
rfact = 2.**0.25
for ell1,ell2 in [(lx,lx),(ly,ly),(rfact*lx,rfact*ly)]:
preF = ell1*ell2*clunlenTTArrNow*clunlenTTArr*np.nan_to_num(1./cltotTTArrX)/2.
preFX = ell1*clunlenTTArrNow*np.nan_to_num(1./cltotTTArrX)
preGX = ell2*clunlenTTArr*np.nan_to_num(1./cltotTTArrY)
calc = 2.*ell1*ell2*fft(ifft(preF,axes=[-2,-1],normalize=True)*ifft(preG,axes=[-2,-1],normalize=True)+ifft(preFX,axes=[-2,-1],normalize=True)*ifft(preGX,axes=[-2,-1],normalize=True)/2.,axes=[-2,-1])
allTerms += [calc]
elif XY == 'EE':
clunlenEEArrNow = self.uClNow2d['EE'].copy()
sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
lx = self.lxMap
ly = self.lyMap
lxhat = self.lxHatMap
lyhat = self.lyHatMap
sinf = sin2phi(lxhat,lyhat)
sinsqf = sinf**2.
cosf = cos2phi(lxhat,lyhat)
cossqf = cosf**2.
if halo:
WXY = self.WXY('EE')*self.kBeamX
WY = self.WY('EE')*self.kBeamY
rfact = 2.**0.25
for ell1,ell2 in [(lx,lx),(ly,ly),(rfact*lx,rfact*ly)]:
for trigfact in [cossqf,sinsqf,np.sqrt(2.)*sinf*cosf]:
preF = trigfact*ell1*ell2*clunlenEEArrNow*WXY
preG = trigfact*WY
allTerms += [ell1*ell2*fft(ifft(preF,axes=[-2,-1],normalize=True)*ifft(preG,axes=[-2,-1],normalize=True),axes=[-2,-1])]
#allTerms += [ell1*ell2*fft2(ifft2(preF)*ifft2(preG))]
preFX = trigfact*ell1*clunlenEEArrNow*WY
preGX = trigfact*ell2*WXY
allTerms += [ell1*ell2*fft(ifft(preFX,axes=[-2,-1],normalize=True)*ifft(preGX,axes=[-2,-1],normalize=True),axes=[-2,-1])]
#allTerms += [ell1*ell2*fft2(ifft2(preFX)*ifft2(preGX))]
# else:
# rfact = 2.**0.25
# for ell1,ell2 in [(lx,lx),(ly,ly),(rfact*lx,rfact*ly)]:
# for trigfact in [cossqf,sinsqf,np.sqrt(2.)*sinf*cosf]:
# preF = trigfact*ell1*ell2*clunlenEEArrNow*clunlenEEArr*np.nan_to_num(1./cltotEEArr)/2.
# preG = trigfact*np.nan_to_num(1./cltotEEArr)
# preFX = trigfact*ell1*clunlenEEArrNow*np.nan_to_num(1./cltotEEArr)
# preGX = trigfact*ell2*clunlenEEArr*np.nan_to_num(1./cltotEEArr)
# allTerms += [2.*ell1*ell2*fft2(ifft2(preF)*ifft2(preG)+ifft2(preFX)*ifft2(preGX)/2.)]
elif XY == 'EB':
clunlenEEArrNow = self.uClNow2d['EE'].copy()
clunlenBBArrNow = self.uClNow2d['BB'].copy()
sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
lx = self.lxMap
ly = self.lyMap
termsF = []
termsF.append( lambda pre,lxhat,lyhat: pre * sin2phi(lxhat,lyhat)**2. )
termsF.append( lambda pre,lxhat,lyhat: pre * cos2phi(lxhat,lyhat)**2. )
termsF.append( lambda pre,lxhat,lyhat: pre * (1.j*np.sqrt(2.)*sin2phi(lxhat,lyhat)*cos2phi(lxhat,lyhat)) )
termsG = []
termsG.append( lambda pre,lxhat,lyhat: pre * cos2phi(lxhat,lyhat)**2. )
termsG.append( lambda pre,lxhat,lyhat: pre * sin2phi(lxhat,lyhat)**2. )
termsG.append( lambda pre,lxhat,lyhat: pre * (1.j*np.sqrt(2.)*sin2phi(lxhat,lyhat)*cos2phi(lxhat,lyhat)) )
lxhat = self.lxHatMap
lyhat = self.lyHatMap
WXY = self.WXY('EB')*self.kBeamX
WY = self.WY('BB')*self.kBeamY
for ellsq in [lx*lx,ly*ly,np.sqrt(2.)*lx*ly]:
preF = ellsq*clunlenEEArrNow*WXY
preG = WY
for termF,termG in zip(termsF,termsG):
allTerms += [ellsq*fft(ifft(termF(preF,lxhat,lyhat),axes=[-2,-1],normalize=True)*ifft(termG(preG,lxhat,lyhat),axes=[-2,-1],normalize=True),axes=[-2,-1])]
elif XY == 'BE':
clunlenEEArrNow = self.uClNow2d['EE'].copy()
clunlenBBArrNow = self.uClNow2d['BB'].copy()
sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
lx = self.lxMap
ly = self.lyMap
termsF = []
termsF.append( lambda pre,lxhat,lyhat: pre * sin2phi(lxhat,lyhat)**2. )
termsF.append( lambda pre,lxhat,lyhat: pre * cos2phi(lxhat,lyhat)**2. )
termsF.append( lambda pre,lxhat,lyhat: pre * (1.j*np.sqrt(2.)*sin2phi(lxhat,lyhat)*cos2phi(lxhat,lyhat)) )
termsG = []
termsG.append( lambda pre,lxhat,lyhat: pre * cos2phi(lxhat,lyhat)**2. )
termsG.append( lambda pre,lxhat,lyhat: pre * sin2phi(lxhat,lyhat)**2. )
termsG.append( lambda pre,lxhat,lyhat: pre * (1.j*np.sqrt(2.)*sin2phi(lxhat,lyhat)*cos2phi(lxhat,lyhat)) )
lxhat = self.lxHatMap
lyhat = self.lyHatMap
WXY = self.WXY('BE')*self.kBeamX
WY = self.WY('EE')*self.kBeamY
for ellsq in [lx*lx,ly*ly,np.sqrt(2.)*lx*ly]:
preF = WXY
preG = ellsq*clunlenEEArrNow*WY
for termF,termG in zip(termsF,termsG):
allTerms += [ellsq*fft(ifft(termF(preF,lxhat,lyhat),axes=[-2,-1],normalize=True)*ifft(termG(preG,lxhat,lyhat),axes=[-2,-1],normalize=True),axes=[-2,-1])]
elif XY=='ET':
clunlenTEArrNow = self.uClNow2d['TE'].copy()
if halo:
sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
lx = self.lxMap
ly = self.lyMap
lxhat = self.lxHatMap
lyhat = self.lyHatMap
sinf = sin2phi(lxhat,lyhat)
sinsqf = sinf**2.
cosf = cos2phi(lxhat,lyhat)
cossqf = cosf**2.
WXY = self.WXY('ET')*self.kBeamX
WY = self.WY('TT')*self.kBeamY
rfact = 2.**0.25
for ell1,ell2 in [(lx,lx),(ly,ly),(rfact*lx,rfact*ly)]:
preF = ell1*ell2*clunlenTEArrNow*WXY
preG = WY
allTerms += [ell1*ell2*fft(ifft(preF,axes=[-2,-1],normalize=True)*ifft(preG,axes=[-2,-1],normalize=True),axes=[-2,-1])]
for trigfact in [cosf,sinf]:
preFX = trigfact*ell1*clunlenTEArrNow*WY
preGX = trigfact*ell2*WXY
allTerms += [ell1*ell2*fft(ifft(preFX,axes=[-2,-1],normalize=True)*ifft(preGX,axes=[-2,-1],normalize=True),axes=[-2,-1])]
# else:
# sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
# cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
# lx = self.lxMap
# ly = self.lyMap
# lxhat = self.lxHatMap
# lyhat = self.lyHatMap
# sinf = sin2phi(lxhat,lyhat)
# sinsqf = sinf**2.
# cosf = cos2phi(lxhat,lyhat)
# cossqf = cosf**2.
# rfact = 2.**0.25
# for ell1,ell2 in [(lx,lx),(ly,ly),(rfact*lx,rfact*ly)]:
# preF = ell1*ell2*clunlenTEArrNow*clunlenTEArr*np.nan_to_num(1./cltotEEArr)
# preG = np.nan_to_num(1./cltotTTArr)
# allTerms += [ell1*ell2*fft2(ifft2(preF)*ifft2(preG))]
# for trigfact in [cossqf,sinsqf,np.sqrt(2.)*sinf*cosf]:
# preF = np.nan_to_num(1./cltotEEArr)
# preG = trigfact*ell1*ell2*clunlenTEArrNow*clunlenTEArr*np.nan_to_num(1./cltotTTArr)
# allTerms += [ell1*ell2*fft2(ifft2(preF)*ifft2(preG))]
# for trigfact in [cosf,sinf]:
# preFX = trigfact*ell1*clunlenTEArrNow*np.nan_to_num(1./cltotEEArr)
# preGX = trigfact*ell2*clunlenTEArr*np.nan_to_num(1./cltotTTArr)
# allTerms += [2.*ell1*ell2*fft2(ifft2(preFX)*ifft2(preGX))]
elif XY=='TE':
clunlenTEArrNow = self.uClNow2d['TE'].copy()
if halo:
sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
lx = self.lxMap
ly = self.lyMap
lxhat = self.lxHatMap
lyhat = self.lyHatMap
sinf = sin2phi(lxhat,lyhat)
sinsqf = sinf**2.
cosf = cos2phi(lxhat,lyhat)
cossqf = cosf**2.
WXY = self.WXY('TE')*self.kBeamX
WY = self.WY('EE')*self.kBeamY
rfact = 2.**0.25
for ell1,ell2 in [(lx,lx),(ly,ly),(rfact*lx,rfact*ly)]:
for trigfact in [cossqf,sinsqf,np.sqrt(2.)*sinf*cosf]:
preF = trigfact*ell1*ell2*clunlenTEArrNow*WXY
preG = trigfact*WY
allTerms += [ell1*ell2*fft(ifft(preF,axes=[-2,-1],normalize=True)*ifft(preG,axes=[-2,-1],normalize=True),axes=[-2,-1])]
for trigfact in [cosf,sinf]:
preFX = trigfact*ell1*clunlenTEArrNow*WY
preGX = trigfact*ell2*WXY
allTerms += [ell1*ell2*fft(ifft(preFX,axes=[-2,-1],normalize=True)*ifft(preGX,axes=[-2,-1],normalize=True),axes=[-2,-1])]
# else:
# sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
# cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
# lx = self.lxMap
# ly = self.lyMap
# lxhat = self.lxHatMap
# lyhat = self.lyHatMap
# sinf = sin2phi(lxhat,lyhat)
# sinsqf = sinf**2.
# cosf = cos2phi(lxhat,lyhat)
# cossqf = cosf**2.
# rfact = 2.**0.25
# for ell1,ell2 in [(lx,lx),(ly,ly),(rfact*lx,rfact*ly)]:
# for trigfact in [cossqf,sinsqf,np.sqrt(2.)*sinf*cosf]:
# preF = trigfact*ell1*ell2*clunlenTEArrNow* self.WXY('TE')#clunlenTEArr*np.nan_to_num(1./cltotTTArr)
# preG = trigfact*self.WY('EE')#np.nan_to_num(1./cltotEEArr)
# allTerms += [ell1*ell2*fft2(ifft2(preF)*ifft2(preG))]
# preF = self.WY('TT')#np.nan_to_num(1./cltotTTArr)
# preG = ell1*ell2*clunlenTEArrNow* self.WXY('ET') #*clunlenTEArr*np.nan_to_num(1./cltotEEArr)
# allTerms += [ell1*ell2*fft2(ifft2(preF)*ifft2(preG))]
# for trigfact in [cosf,sinf]:
# preFX = trigfact*ell1*clunlenTEArrNow*self.WY('TT')#np.nan_to_num(1./cltotTTArr)
# preGX = trigfact*ell2* self.WXY('ET')#*clunlenTEArr*np.nan_to_num(1./cltotEEArr)
# allTerms += [2.*ell1*ell2*fft2(ifft2(preFX)*ifft2(preGX))]
elif XY == 'TB':
clunlenTEArrNow = self.uClNow2d['TE'].copy()
sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
lx = self.lxMap
ly = self.lyMap
termsF = []
termsF.append( lambda pre,lxhat,lyhat: pre * sin2phi(lxhat,lyhat)**2. )
termsF.append( lambda pre,lxhat,lyhat: pre * cos2phi(lxhat,lyhat)**2. )
termsF.append( lambda pre,lxhat,lyhat: pre * (1.j*np.sqrt(2.)*sin2phi(lxhat,lyhat)*cos2phi(lxhat,lyhat)) )
termsG = []
termsG.append( lambda pre,lxhat,lyhat: pre * cos2phi(lxhat,lyhat)**2. )
termsG.append( lambda pre,lxhat,lyhat: pre * sin2phi(lxhat,lyhat)**2. )
termsG.append( lambda pre,lxhat,lyhat: pre * (1.j*np.sqrt(2.)*sin2phi(lxhat,lyhat)*cos2phi(lxhat,lyhat)) )
lxhat = self.lxHatMap
lyhat = self.lyHatMap
WXY = self.WXY('TB')*self.kBeamX
WY = self.WY('BB')*self.kBeamY
for ellsq in [lx*lx,ly*ly,np.sqrt(2.)*lx*ly]:
preF = ellsq*clunlenTEArrNow*WXY
preG = WY
for termF,termG in zip(termsF,termsG):
allTerms += [ellsq*fft(ifft(termF(preF,lxhat,lyhat),axes=[-2,-1],normalize=True)*ifft(termG(preG,lxhat,lyhat),axes=[-2,-1],normalize=True),axes=[-2,-1])]
else:
print("ERROR: Unrecognized polComb")
sys.exit(1)
ALinv = np.real(np.sum( allTerms, axis = 0))
alval = np.nan_to_num(1. / ALinv)
if self.fmask is not None: alval = self.fmask_func(alval,self.fmask)
l4 = (lmap**2.) * ((lmap + 1.)**2.)
NL = l4 *alval/ 4.
NL[np.where(np.logical_or(lmap >= self.bigell, lmap <2.))] = 0.
retval = np.nan_to_num(NL.real * self.pixScaleX*self.pixScaleY )
if setNl:
self.Nlkk[XY] = retval.copy()
#print "SETTING NL"
# debug_edges = np.arange(400,6000,50)
# import orphics.tools.stats as stats
# import orphics.tools.io as io
# io.quickPlot2d((np.fft.fftshift(retval)),"nl2d.png")
# binner = stats.bin2D(self.modLMap,debug_edges)
# cents,ws = binner.bin(retval.real)
# pl = io.Plotter()#scaleY='log')
# pl.add(cents,ws)
# pl._ax.set_xlim(2,6000)
# pl.done("nl.png")
# sys.exit()
return retval * 2. * np.nan_to_num(1. / lmap/(lmap+1.))
def delensClBB(self,Nlkk,fmask=None,halo=True):
"""
Delens ClBB with input Nlkk curve
"""
# Set the phi noise = Clpp + Nlpp
Nlppnow = Nlkk*4./(self.modLMap**2.)/((self.modLMap+1.)**2.)
clPPArr = self.clpp2d
cltotPPArr = clPPArr + Nlppnow
cltotPPArr[np.isnan(cltotPPArr)] = np.inf
# Get uClEE
clunlenEEArr = self.uClFid2d['EE'].copy()
# Get lClEE + NEE
clunlentotEEArr = (self.lClFid2d['EE'].copy()+self.noiseYY2d['EE'])
# Mask
clunlentotEEArr[self.fMaskYY['EE']==0] = np.inf
if fmask is None:
fmask = self.fMaskYY['EE']
cltotPPArr[fmask==0] = np.inf
# Trig required for responses
sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
lx = self.lxMap
ly = self.lyMap
lxhat = self.lxHatMap
lyhat = self.lyHatMap
sinf = sin2phi(lxhat,lyhat)
sinsqf = sinf**2.
cosf = cos2phi(lxhat,lyhat)
cossqf = cosf**2.
# Use ffts to calculate each term instead of convolving
allTerms = []
for ellsq in [lx*lx,ly*ly,np.sqrt(2.)*lx*ly]:
for trigfactOut,trigfactIn in zip([sinsqf,cossqf,1.j*np.sqrt(2.)*sinf*cosf],[cossqf,sinsqf,1.j*np.sqrt(2.)*sinf*cosf]):
preF1 = trigfactIn*ellsq*clunlenEEArr
preG1 = ellsq*clPPArr
preF2 = trigfactIn*ellsq*clunlenEEArr**2.*np.nan_to_num(1./clunlentotEEArr) * self.fMaskYY['EE']
preG2 = ellsq*clPPArr**2.*np.nan_to_num(1./cltotPPArr) * fmask
t1 = ifft(preF1,axes=[-2,-1],normalize=True)*ifft(preG1,axes=[-2,-1],normalize=True) # Orig B
t2 = ifft(preF2,axes=[-2,-1],normalize=True)*ifft(preG2,axes=[-2,-1],normalize=True) # Delensed part
allTerms += [trigfactOut*(fft(t1 - t2,axes=[-2,-1]))]
# Sum all terms
ClBBres = np.real(np.sum( allTerms, axis = 0))
# Pixel factors
ClBBres[np.where(np.logical_or(self.modLMap >= self.bigell, self.modLMap == 0.))] = 0.
ClBBres *= self.Nx * self.Ny
area =self.Nx*self.Ny*self.pixScaleX*self.pixScaleY
bbNoise2D = ((np.sqrt(ClBBres)/self.pixScaleX/self.pixScaleY)**2.)*(area/(self.Nx*self.Ny*1.0)**2)
# Set lensed BB to delensed level
self.lClFid2d['BB'] = bbNoise2D.copy()
return bbNoise2D
class NlGenerator(object):
def __init__(self,shape,wcs,theorySpectra,bin_edges=None,gradCut=None,TCMB=2.7255e6,bigell=9000,lensedEqualsUnlensed=False,unlensedEqualsLensed=True):
self.shape = shape
self.wcs = wcs
self.N = QuadNorm(shape,wcs,gradCut=gradCut,bigell=bigell)
self.TCMB = TCMB
cmbList = ['TT','TE','EE','BB']
self.theory = theorySpectra
for cmb in cmbList:
uClFilt = theorySpectra.uCl(cmb,self.N.modLMap)
uClNorm = uClFilt
lClFilt = theorySpectra.lCl(cmb,self.N.modLMap)
if unlensedEqualsLensed:
self.N.addUnlensedNorm2DPower(cmb,lClFilt.copy())
self.N.addUnlensedFilter2DPower(cmb,lClFilt.copy())
else:
self.N.addUnlensedNorm2DPower(cmb,uClNorm.copy())
self.N.addUnlensedFilter2DPower(cmb,uClFilt.copy())
if lensedEqualsUnlensed:
self.N.addLensedFilter2DPower(cmb,uClFilt.copy())
else:
self.N.addLensedFilter2DPower(cmb,lClFilt.copy())
Clkk2d = theorySpectra.gCl("kk",self.N.modLMap)
self.N.addClkk2DPower(Clkk2d)
self.N.bigell = bigell
if bin_edges is not None:
self.bin_edges = bin_edges
self.binner = bin2D(self.N.modLMap, bin_edges)
def updateBins(self,bin_edges):
self.N.bigell = bin_edges[len(bin_edges)-1]
self.binner = bin2D(self.N.modLMap, bin_edges)
self.bin_edges = bin_edges
def updateNoiseAdvanced(self,beamTX,noiseTX,beamPX,noisePX,tellminX,tellmaxX,pellminX,pellmaxX,beamTY,noiseTY,beamPY,noisePY,tellminY,tellmaxY,pellminY,pellmaxY,lkneesX=[0,0],alphasX=[1,1],lkneesY=[0,0],alphasY=[1,1],lxcutTX=None,lxcutTY=None,lycutTX=None,lycutTY=None,lxcutPX=None,lxcutPY=None,lycutPX=None,lycutPY=None,fgFuncX=None,fgFuncY=None,beamFileTX=None,beamFilePX=None,beamFileTY=None,beamFilePY=None,noiseFuncTX=None,noiseFuncTY=None,noiseFuncPX=None,noiseFuncPY=None):
self.N.lmax_T = self.N.bigell
self.N.lmax_P = self.N.bigell
lkneeTX, lkneePX = lkneesX
lkneeTY, lkneePY = lkneesY
alphaTX, alphaPX = alphasX
alphaTY, alphaPY = alphasY
nTX = maps.whiteNoise2D([noiseTX],beamTX,self.N.modLMap, \
TCMB=self.TCMB,lknees=[lkneeTX],alphas=[alphaTX],\
beamFile=beamFileTX, \
noiseFuncs = [noiseFuncTX])[0]
nTY = maps.whiteNoise2D([noiseTY],beamTY,self.N.modLMap, \
TCMB=self.TCMB,lknees=[lkneeTY],alphas=[alphaTY], \
beamFile=beamFileTY, \
noiseFuncs=[noiseFuncTY])[0]
nPX = maps.whiteNoise2D([noisePX],beamPX,self.N.modLMap, \
TCMB=self.TCMB,lknees=[lkneePX],alphas=[alphaPX],\
beamFile=beamFilePX, \
noiseFuncs = [noiseFuncPX])[0]
nPY = maps.whiteNoise2D([noisePY],beamPY,self.N.modLMap, \
TCMB=self.TCMB,lknees=[lkneePY],alphas=[alphaPY], \
beamFile=beamFilePY, \
noiseFuncs=[noiseFuncPY])[0]
fMaskTX = maps.mask_kspace(self.shape,self.wcs,lmin=tellminX,lmax=tellmaxX,lxcut=lxcutTX,lycut=lycutTX)
fMaskTY = maps.mask_kspace(self.shape,self.wcs,lmin=tellminY,lmax=tellmaxY,lxcut=lxcutTY,lycut=lycutTY)
fMaskPX = maps.mask_kspace(self.shape,self.wcs,lmin=pellminX,lmax=pellmaxX,lxcut=lxcutPX,lycut=lycutPX)
fMaskPY = maps.mask_kspace(self.shape,self.wcs,lmin=pellminY,lmax=pellmaxY,lxcut=lxcutPY,lycut=lycutPY)
if fgFuncX is not None:
fg2d = fgFuncX(self.N.modLMap) #/ self.TCMB**2.
nTX += fg2d
if fgFuncY is not None:
fg2d = fgFuncY(self.N.modLMap) #/ self.TCMB**2.
nTY += fg2d
nList = ['TT','EE','BB']
nListX = [nTX,nPX,nPX]
nListY = [nTY,nPY,nPY]
fListX = [fMaskTX,fMaskPX,fMaskPX]
fListY = [fMaskTY,fMaskPY,fMaskPY]
for i,noise in enumerate(nList):
self.N.addNoise2DPowerXX(noise,nListX[i],fListX[i])
self.N.addNoise2DPowerYY(noise,nListY[i],fListY[i])
return nTX,nPX,nTY,nPY
def updateNoise(self,beamX,noiseTX,noisePX,tellminX,tellmaxX,pellminX,pellmaxX,beamY=None,noiseTY=None,noisePY=None,tellminY=None,tellmaxY=None,pellminY=None,pellmaxY=None,lkneesX=[0.,0.],alphasX=[1.,1.],lkneesY=[0.,0.],alphasY=[1.,1.],lxcutTX=0,lxcutTY=0,lycutTX=0,lycutTY=0,lxcutPX=0,lxcutPY=0,lycutPX=0,lycutPY=0,fgFuncX=None,beamFileX=None,fgFuncY=None,beamFileY=None,noiseFuncTX=None,noiseFuncTY=None,noiseFuncPX=None,noiseFuncPY=None,bellminY=None,bellmaxY=None):
def setDefault(A,B):
if A is None:
return B
else:
return A
beamY = setDefault(beamY,beamX)
noiseTY = setDefault(noiseTY,noiseTX)
noisePY = setDefault(noisePY,noisePX)
tellminY = setDefault(tellminY,tellminX)
pellminY = setDefault(pellminY,pellminX)
tellmaxY = setDefault(tellmaxY,tellmaxX)
pellmaxY = setDefault(pellmaxY,pellmaxX)
bellminY = setDefault(bellminY,pellminY)
bellmaxY = setDefault(bellmaxY,pellmaxY)
self.N.lmax_T = self.N.bigell
self.N.lmax_P = self.N.bigell
nTX,nPX = maps.whiteNoise2D([noiseTX,noisePX],beamX,self.N.modLMap, \
TCMB=self.TCMB,lknees=lkneesX,alphas=alphasX,beamFile=beamFileX, \
noiseFuncs = [noiseFuncTX,noiseFuncPX])
nTY,nPY = maps.whiteNoise2D([noiseTY,noisePY],beamY,self.N.modLMap, \
TCMB=self.TCMB,lknees=lkneesY,alphas=alphasY,beamFile=beamFileY, \
noiseFuncs=[noiseFuncTY,noiseFuncPY])
### DEBUG
# beam = 1.5
# noise = 5.
# from orphics import cosmology,io
# import sys
# nTX = cosmology.white_noise_with_atm_func(self.N.modLMap,noise,0,1,dimensionless=False,TCMB=2.7255e6)/maps.gauss_beam(self.N.modLMap,beam)**2.
# nTY = nTX.copy()
# nPX = nTX.copy()
# nPY = nTX.copy()
# # ells = np.arange(2,6000)
# # nTX = cosmology.white_noise_with_atm_func(ells,noise,0,1,dimensionless=False,TCMB=2.7255e6)/maps.gauss_beam(ells,beam)**2.
# # pl = io.Plotter(yscale='log')
# # pl.add(ells,ells**2.*self.theory.lCl('TT',ells))
# # pl.add(ells,nTX*ells**2.)
# # pl.done()
# # sys.exit()
# print(tellminX,tellmaxX,tellminY,tellmaxY)
####
fMaskTX = maps.mask_kspace(self.shape,self.wcs,lmin=tellminX,lmax=tellmaxX,lxcut=lxcutTX,lycut=lycutTX)
fMaskTY = maps.mask_kspace(self.shape,self.wcs,lmin=tellminY,lmax=tellmaxY,lxcut=lxcutTY,lycut=lycutTY)
fMaskPX = maps.mask_kspace(self.shape,self.wcs,lmin=pellminX,lmax=pellmaxX,lxcut=lxcutPX,lycut=lycutPX)
fMaskPY = maps.mask_kspace(self.shape,self.wcs,lmin=pellminY,lmax=pellmaxY,lxcut=lxcutPY,lycut=lycutPY)
fMaskBX = maps.mask_kspace(self.shape,self.wcs,lmin=pellminX,lmax=pellmaxX,lxcut=lxcutPX,lycut=lycutPX)
fMaskBY = maps.mask_kspace(self.shape,self.wcs,lmin=bellminY,lmax=bellmaxY,lxcut=lxcutPY,lycut=lycutPY)
if fgFuncX is not None:
fg2d = fgFuncX(self.N.modLMap) #/ self.TCMB**2.
nTX += fg2d
if fgFuncY is not None:
fg2d = fgFuncY(self.N.modLMap) #/ self.TCMB**2.
nTY += fg2d
nList = ['TT','EE','BB']
nListX = [nTX,nPX,nPX]
nListY = [nTY,nPY,nPY]
fListX = [fMaskTX,fMaskPX,fMaskBX]
fListY = [fMaskTY,fMaskPY,fMaskBY]
for i,noise in enumerate(nList):
self.N.addNoise2DPowerXX(noise,nListX[i],fListX[i])
self.N.addNoise2DPowerYY(noise,nListY[i],fListY[i])
return nTX,nPX,nTY,nPY
def updateNoiseSimple(self,ells,nltt,nlee,lmin,lmax):
nTX = interp1d(ells,nltt,bounds_error=False,fill_value=0.)(self.N.modLMap)
nPX = interp1d(ells,nltt,bounds_error=False,fill_value=0.)(self.N.modLMap)
nTY = nTX
nPY = nPX
fMaskTX = maps.mask_kspace(self.N.shape,self.N.wcs,lmin=lmin,lmax=lmax)
fMaskTY = maps.mask_kspace(self.N.shape,self.N.wcs,lmin=lmin,lmax=lmax)
fMaskPX = maps.mask_kspace(self.N.shape,self.N.wcs,lmin=lmin,lmax=lmax)
fMaskPY = maps.mask_kspace(self.N.shape,self.N.wcs,lmin=lmin,lmax=lmax)
nList = ['TT','EE','BB']
nListX = [nTX,nPX,nPX]
nListY = [nTY,nPY,nPY]
fListX = [fMaskTX,fMaskPX,fMaskPX]
fListY = [fMaskTY,fMaskPY,fMaskPY]
for i,noise in enumerate(nList):
self.N.addNoise2DPowerXX(noise,nListX[i],fListX[i])
self.N.addNoise2DPowerYY(noise,nListY[i],fListY[i])
return nTX,nPX,nTY,nPY
def getNl(self,polComb='TT',halo=True):
AL = self.N.getNlkk2d(polComb,halo=halo)
data2d = self.N.Nlkk[polComb]
centers, Nlbinned = self.binner.bin(data2d)
Nlbinned = sanitizePower(Nlbinned)
return centers, Nlbinned
def getNlIterative(self,polCombs,pellmin,pellmax,dell=20,halo=True,dTolPercentage=1.,verbose=True,plot=False,max_iterations=np.inf,eff_at=60,kappa_min=0,kappa_max=np.inf):
kmax = max(pellmax,kappa_max)
kmin = 2
fmask = maps.mask_kspace(self.shape,self.wcs,lmin=kappa_min,lmax=kappa_max)
Nleach = {}
bin_edges = np.arange(2,kmax+dell/2.,dell)
for polComb in polCombs:
self.updateBins(bin_edges)
AL = self.N.getNlkk2d(polComb,halo=halo)
data2d = self.N.Nlkk[polComb]
ls, Nls = self.binner.bin(data2d)
Nls = sanitizePower(Nls)
Nleach[polComb] = (ls,Nls)
if ('EB' not in polCombs) and ('TB' not in polCombs):
Nlret = Nlmv(Nleach,polCombs,None,None,bin_edges)
return bin_edges,sanitizePower(Nlret),None,None,None
origBB = self.N.lClFid2d['BB'].copy()
delensBinner = bin2D(self.N.modLMap, bin_edges)
ellsOrig, oclbb = delensBinner.bin(origBB.real)
oclbb = sanitizePower(oclbb)
origclbb = oclbb.copy()
if plot:
from orphics.tools.io import Plotter
pl = Plotter(scaleY='log',scaleX='log')
pl.add(ellsOrig,oclbb*ellsOrig**2.,color='black',lw=2)
ctol = np.inf
inum = 0
while ctol>dTolPercentage:
if inum >= max_iterations: break
bNlsinv = 0.
polPass = list(polCombs)
if verbose: print("Performing iteration ", inum+1)
for pol in ['EB','TB']:
if not(pol in polCombs): continue
Al2d = self.N.getNlkk2d(pol,halo)
centers, nlkkeach = delensBinner.bin(self.N.Nlkk[pol])
nlkkeach = sanitizePower(nlkkeach)
bNlsinv += 1./nlkkeach
polPass.remove(pol)
nlkk = 1./bNlsinv
Nldelens = Nlmv(Nleach,polPass,centers,nlkk,bin_edges)
Nldelens2d = interp1d(bin_edges,Nldelens,fill_value=0.,bounds_error=False)(self.N.modLMap)
bbNoise2D = self.N.delensClBB(Nldelens2d,fmask=fmask,halo=halo)
ells, dclbb = delensBinner.bin(bbNoise2D)
dclbb = sanitizePower(dclbb)
if inum>0:
newLens = np.nanmean(nlkk)
oldLens = np.nanmean(oldNl)
new = np.nanmean(dclbb)
old = np.nanmean(oclbb)
ctol = np.abs(old-new)*100./new
ctolLens = np.abs(oldLens-newLens)*100./newLens
if verbose: print("Percentage difference between iterations is ",ctol, " compared to requested tolerance of ", dTolPercentage,". Diff of Nlkks is ",ctolLens)
oldNl = nlkk.copy()
oclbb = dclbb.copy()
inum += 1
if plot:
pl.add(ells,dclbb*ells**2.,ls="--",alpha=0.5,color="black")
if plot:
import os
pl.done(os.environ['WWW']+'delens.png')
self.N.lClFid2d['BB'] = origBB.copy()
def find_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return idx
new_ells,new_bb = ells,dclbb
new_k_ells,new_nlkk = fillLowEll(bin_edges,sanitizePower(Nldelens),kmin)
if eff_at is None:
efficiency = ((origclbb-dclbb)*100./origclbb).max()
else:
id_ellO = find_nearest(ellsOrig,eff_at)
id_ellD = find_nearest(new_ells,eff_at)
efficiency = ((origclbb[id_ellO]-new_bb[id_ellD])*100./origclbb[id_ellO])
return new_k_ells,new_nlkk,new_ells,new_bb,efficiency
def iterativeDelens(self,xy,dTolPercentage=1.0,halo=True,verbose=True):
assert xy=='EB' or xy=='TB'
origBB = self.N.lClFid2d['BB'].copy()
bin_edges = self.bin_edges #np.arange(100.,3000.,20.)
delensBinner = bin2D(self.N.modLMap, bin_edges)
ells, oclbb = delensBinner.bin(origBB)
oclbb = sanitizePower(oclbb)
ctol = np.inf
inum = 0
#from orphics.tools.output import Plotter
#pl = Plotter(scaleY='log',scaleX='log')
#pl = Plotter(scaleY='log')
while ctol>dTolPercentage:
if verbose: print("Performing iteration ", inum+1)
Al2d = self.N.getNlkk2d(xy,halo)
centers, nlkk = delensBinner.bin(self.N.Nlkk[xy])
nlkk = sanitizePower(nlkk)
bbNoise2D = self.N.delensClBB(self.N.Nlkk[xy],halo)
ells, dclbb = delensBinner.bin(bbNoise2D)
dclbb = sanitizePower(dclbb)
if inum>0:
new = np.nanmean(nlkk)
old = np.nanmean(oldNl)
ctol = np.abs(old-new)*100./new
if verbose: print("Percentage difference between iterations is ",ctol, " compared to requested tolerance of ", dTolPercentage)
oldNl = nlkk.copy()
inum += 1
#pl.add(centers,nlkk)
#pl.add(ells,dclbb*ells**2.)
#pl.done('output/delens'+xy+'.png')
self.N.lClFid2d['BB'] = origBB.copy()
efficiency = (np.max(oclbb)-np.max(dclbb))*100./np.max(oclbb)
return centers,nlkk,efficiency
class Estimator(object):
'''
Flat-sky lensing and Omega quadratic estimators
Functionality includes:
- small-scale lens estimation with gradient cutoff
- combine maps from two different experiments
NOTE: The TE estimator is not identical between large
and small-scale estimators. Need to test this.
'''
def __init__(self,shape,wcs,
theorySpectraForFilters,
theorySpectraForNorm=None,
noiseX2dTEB=[None,None,None],
noiseY2dTEB=[None,None,None],
noiseX_is_total = False,
noiseY_is_total = False,
fmaskX2dTEB=[None,None,None],
fmaskY2dTEB=[None,None,None],
fmaskKappa=None,
kBeamX = None,
kBeamY = None,
doCurl=False,
TOnly=False,
halo=True,
gradCut=None,
verbose=False,
loadPickledNormAndFilters=None,
savePickledNormAndFilters=None,
uEqualsL=False,
bigell=9000,
mpi_comm=None,
lEqualsU=False):
'''
All the 2d fourier objects below are pre-fftshifting. They must be of the same dimension.
shape,wcs: enmap geometry
theorySpectraForFilters: an orphics.tools.cmb.TheorySpectra object with CMB Cls loaded
theorySpectraForNorm=None: same as above but if you want to use a different cosmology in the expected value of the 2-pt
noiseX2dTEB=[None,None,None]: a list of 2d arrays that corresponds to the noise power in T, E, B (same units as Cls above)
noiseY2dTEB=[None,None,None]: the same as above but if you want to use a different experiment for the Y maps
fmaskX2dTEB=[None,None,None]: a list of 2d integer arrays where 1 corresponds to modes included and 0 to those not included
fmaskY2dTEB=[None,None,None]: same as above but for Y maps
fmaskKappa=None: same as above but for output kappa map
doCurl=False: return curl Omega estimates too? If yes, output of getKappa will be (kappa,curl)
TOnly=False: do only TT? If yes, others will not be initialized and you'll get errors if you try to getKappa(XY) for XY!=TT
halo=False: use the halo lensing estimators?
gradCut=None: if using halo lensing estimators, specify an integer up to what L the X map will be retained
verbose=False: print some occasional output?
'''
self.verbose = verbose
# initialize norm and filters
self.doCurl = doCurl
if loadPickledNormAndFilters is not None:
if verbose: print("Unpickling...")
with open(loadPickledNormAndFilters,'rb') as fin:
self.N,self.AL,self.OmAL,self.fmaskK,self.phaseY = pickle.load(fin)
return
self.halo = halo
self.AL = {}
if doCurl: self.OmAL = {}
if kBeamX is not None:
self.kBeamX = kBeamX
else:
self.kBeamX = 1.
if kBeamY is not None:
self.kBeamY = kBeamY
else:
self.kBeamY = 1.
self.doCurl = doCurl
self.halo = halo
if fmaskKappa is None:
ellMinK = 80
ellMaxK = 3000
print("WARNING: using default kappa mask of 80 < L < 3000")
self.fmaskK = fmaps.fourierMask(self.N.lx,self.N.ly,self.N.modLMap,lmin=ellMinK,lmax=ellMaxK)
else:
self.fmaskK = fmaskKappa
self.fmaskX2dTEB = fmaskX2dTEB
self.fmaskY2dTEB = fmaskY2dTEB
# Get MPI comm
comm = mpi_comm
if comm is not None:
rank = comm.Get_rank()
numcores = comm.Get_size()
else:
rank = 0
numcores = 1
self.wcs = wcs
if rank==0:
self.N = QuadNorm(shape,wcs,gradCut=gradCut,verbose=verbose,kBeamX=self.kBeamX,kBeamY=self.kBeamY,bigell=bigell,fmask=self.fmaskK)
if TOnly:
nList = ['TT']
cmbList = ['TT']
estList = ['TT']
self.phaseY = 1.
else:
self.phaseY = np.cos(2.*self.N.thetaMap)+1.j*np.sin(2.*self.N.thetaMap)
nList = ['TT','EE','BB']
cmbList = ['TT','TE','EE','BB']
#estList = ['TT','TE','ET','EB','EE','TB']
estList = ['TT','TE','ET','EB','EE','TB','BE']
self.nList = nList
if self.verbose: print("Initializing filters and normalization for quadratic estimators...")
assert not(uEqualsL and lEqualsU)
for cmb in cmbList:
if uEqualsL:
uClFilt = theorySpectraForFilters.lCl(cmb,self.N.modLMap)
else:
uClFilt = theorySpectraForFilters.uCl(cmb,self.N.modLMap)
if theorySpectraForNorm is not None:
if uEqualsL:
uClNorm = theorySpectraForFilters.lCl(cmb,self.N.modLMap)
else:
uClNorm = theorySpectraForNorm.uCl(cmb,self.N.modLMap)
else:
uClNorm = uClFilt
if lEqualsU:
lClFilt = theorySpectraForFilters.uCl(cmb,self.N.modLMap)
else:
lClFilt = theorySpectraForFilters.lCl(cmb,self.N.modLMap)
#lClFilt = theorySpectraForFilters.lCl(cmb,self.N.modLMap)
self.N.addUnlensedFilter2DPower(cmb,uClFilt)
self.N.addLensedFilter2DPower(cmb,lClFilt)
self.N.addUnlensedNorm2DPower(cmb,uClNorm)
for i,noise in enumerate(nList):
self.N.addNoise2DPowerXX(noise,noiseX2dTEB[i],fmaskX2dTEB[i],is_total=noiseX_is_total)
self.N.addNoise2DPowerYY(noise,noiseY2dTEB[i],fmaskY2dTEB[i],is_total=noiseY_is_total)
try:
self.N.addClkk2DPower(theorySpectraForFilters.gCl("kk",self.N.modLMap))
except:
print("Couldn't add Clkk2d power")
self.estList = estList
self.OmAL = None
for est in estList:
self.AL[est] = self.N.getNlkk2d(est,halo=halo)
#if doCurl: self.OmAL[est] = self.N.getCurlNlkk2d(est,halo=halo)
# send_dat = np.array(self.vectors[label]).astype(np.float64)
# self.comm.Send(send_dat, dest=0, tag=self.tag_start+k)
else:
pass
def updateNoise(self,nTX,nEX,nBX,nTY,nEY,nBY,noiseX_is_total=False,noiseY_is_total=False):
noiseX2dTEB = [nTX,nEX,nBX]
noiseY2dTEB = [nTY,nEY,nBY]
for i,noise in enumerate(self.nList):
self.N.addNoise2DPowerXX(noise,noiseX2dTEB[i],self.fmaskX2dTEB[i],is_total=noiseX_is_total)
self.N.addNoise2DPowerYY(noise,noiseY2dTEB[i],self.fmaskY2dTEB[i],is_total=noiseY_is_total)
for est in self.estList:
self.AL[est] = self.N.getNlkk2d(est,halo=self.halo)
if self.doCurl: self.OmAL[est] = self.N.getCurlNlkk2d(est,halo=self.halo)
def updateTEB_X(self,T2DData,E2DData=None,B2DData=None,alreadyFTed=False):
'''
Masking and windowing and apodizing and beam deconvolution has to be done beforehand!
Maps must have units corresponding to those of theory Cls and noise power
'''
self._hasX = True
self.kGradx = {}
self.kGrady = {}
lx = self.N.lxMap
ly = self.N.lyMap
if alreadyFTed:
self.kT = T2DData
else:
self.kT = fft(T2DData,axes=[-2,-1])
self.kGradx['T'] = lx*self.kT.copy()*1j
self.kGrady['T'] = ly*self.kT.copy()*1j
if E2DData is not None:
if alreadyFTed:
self.kE = E2DData
else:
self.kE = fft(E2DData,axes=[-2,-1])
self.kGradx['E'] = 1.j*lx*self.kE.copy()
self.kGrady['E'] = 1.j*ly*self.kE.copy()
if B2DData is not None:
if alreadyFTed:
self.kB = B2DData
else:
self.kB = fft(B2DData,axes=[-2,-1])
self.kGradx['B'] = 1.j*lx*self.kB.copy()
self.kGrady['B'] = 1.j*ly*self.kB.copy()
def updateTEB_Y(self,T2DData=None,E2DData=None,B2DData=None,alreadyFTed=False):
assert self._hasX, "Need to initialize gradient first."
self._hasY = True
self.kHigh = {}
if T2DData is not None:
if alreadyFTed:
self.kHigh['T']=T2DData
else:
self.kHigh['T']=fft(T2DData,axes=[-2,-1])
else:
self.kHigh['T']=self.kT.copy()
if E2DData is not None:
if alreadyFTed:
self.kHigh['E']=E2DData
else:
self.kHigh['E']=fft(E2DData,axes=[-2,-1])
else:
try:
self.kHigh['E']=self.kE.copy()
except:
pass
if B2DData is not None:
if alreadyFTed:
self.kHigh['B']=B2DData
else:
self.kHigh['B']=fft(B2DData,axes=[-2,-1])
else:
try:
self.kHigh['B']=self.kB.copy()
except:
pass
def kappa_from_map(self,XY,T2DData,E2DData=None,B2DData=None,T2DDataY=None,E2DDataY=None,B2DDataY=None,alreadyFTed=False,returnFt=False):
self.updateTEB_X(T2DData,E2DData,B2DData,alreadyFTed)
self.updateTEB_Y(T2DDataY,E2DDataY,B2DDataY,alreadyFTed)
return self.get_kappa(XY,returnFt=returnFt)
def fmask_func(self,arr):
fMask = self.fmaskK
arr[fMask<1.e-3] = 0.
return arr
def coadd_nlkk(self,ests):
ninvtot = 0.
for est in ests:
ninvtot += self.fmask_func(np.nan_to_num(1./self.N.Nlkk[est]))
return self.fmask_func(np.nan_to_num(1./ninvtot))
def coadd_kappa(self,ests,returnFt=False):
ktot = 0.
for est in ests:
rkappa = self.get_kappa(est,returnFt=True)
ktot += self.fmask_func(np.nan_to_num(rkappa/self.N.Nlkk[est]))
kft = ktot*self.coadd_nlkk(ests)
if returnFt: return kft
return ifft(kft,axes=[-2,-1],normalize=True).real
def get_kappa(self,XY,returnFt=False):
assert self._hasX and self._hasY
assert XY in ['TT','TE','ET','EB','TB','EE','BE']
X,Y = XY
WXY = self.N.WXY(XY)
WY = self.N.WY(Y+Y)
lx = self.N.lxMap
ly = self.N.lyMap
if Y in ['E','B']:
phaseY = self.phaseY
else:
phaseY = 1.
phaseB = (int(Y=='B')*1.j)+(int(Y!='B'))
fMask = self.fmaskK
if self.verbose: startTime = time.time()
HighMapStar = ifft((self.kHigh[Y]*WY*phaseY*phaseB),axes=[-2,-1],normalize=True).conjugate()
kPx = fft(ifft(self.kGradx[X]*WXY*phaseY,axes=[-2,-1],normalize=True)*HighMapStar,axes=[-2,-1])
kPy = fft(ifft(self.kGrady[X]*WXY*phaseY,axes=[-2,-1],normalize=True)*HighMapStar,axes=[-2,-1])
rawKappa = ifft((1.j*lx*kPx) + (1.j*ly*kPy),axes=[-2,-1],normalize=True).real
AL = np.nan_to_num(self.AL[XY])
assert not(np.any(np.isnan(rawKappa)))
lmap = self.N.modLMap
kappaft = -self.fmask_func(AL*fft(rawKappa,axes=[-2,-1]))
if returnFt:
return kappaft
self.kappa = enmap.enmap(ifft(kappaft,axes=[-2,-1],normalize=True).real,self.wcs)
try:
assert not(np.any(np.isnan(self.kappa)))
except:
import orphics.tools.io as io
import orphics.tools.stats as stats
io.quickPlot2d(np.fft.fftshift(np.abs(kappaft)),"ftkappa.png")
io.quickPlot2d(np.fft.fftshift(fMask),"fmask.png")
io.quickPlot2d(self.kappa.real,"nankappa.png")
debug_edges = np.arange(20,20000,100)
dbinner = stats.bin2D(self.N.modLMap,debug_edges)
cents, bclkk = dbinner.bin(self.N.clkk2d)
cents, nlkktt = dbinner.bin(self.N.Nlkk['TT'])
cents, alkktt = dbinner.bin(AL/2.*lmap*(lmap+1.))
try:
cents, nlkkeb = dbinner.bin(self.N.Nlkk['EB'])
except:
pass
pl = io.Plotter(scaleY='log',scaleX='log')
pl.add(cents,bclkk)
pl.add(cents,nlkktt,label="TT")
pl.add(cents,alkktt,label="TTnorm",ls="--")
try:
pl.add(cents,nlkkeb,label="EB")
except:
pass
pl.legendOn()
pl._ax.set_ylim(1.e-9,1.e-5)
pl.done("clkk.png")
sys.exit()
# from orphics.tools.io import Plotter
# pl = Plotter()
# #pl.plot2d(np.nan_to_num(self.kappa))
# pl.plot2d((self.kappa.real))
# pl.done("output/nankappa.png")
# sys.exit(0)
# try:
# assert not(np.any(np.isnan(self.kappa)))
# except:
# from orphics.tools.io import Plotter
# pl = Plotter()
# pl.plot2d(np.nan_to_num(self.kappa))
# pl.done("output/nankappa.png")
# sys.exit(0)
# if self.verbose:
# elapTime = time.time() - startTime
# print(("Time for core kappa was ", elapTime ," seconds."))
# if self.doCurl:
# OmAL = self.OmAL[XY]*fMask
# rawCurl = ifft(1.j*lx*kPy - 1.j*ly*kPx,axes=[-2,-1],normalize=True).real
# self.curl = -ifft(OmAL*fft(rawCurl,axes=[-2,-1]),axes=[-2,-1],normalize=True)
# return self.kappa, self.curl
return self.kappa
def Nlmv(Nleach,pols,centers,nlkk,bin_edges):
# Nleach: dict of (ls,Nls) for each polComb
# pols: list of polCombs to include
# centers,nlkk: additonal Nl to add
Nlmvinv = 0.
for polComb in pols:
ls,Nls = Nleach[polComb]
nlfunc = interp1d(ls,Nls,bounds_error=False,fill_value=np.inf)
Nleval = nlfunc(bin_edges)
Nlmvinv += np.nan_to_num(1./Nleval)
if nlkk is not None:
nlfunc = interp1d(centers,nlkk,bounds_error=False,fill_value=np.inf)
Nleval = nlfunc(bin_edges)
Nlmvinv += np.nan_to_num(1./Nleval)
return np.nan_to_num(1./Nlmvinv)
## HALOS
# g(x) = g(theta/thetaS) HuDeDeoVale 2007
gnfw = lambda x: np.piecewise(x, [x>1., x<1., x==1.], \
[lambda y: (1./(y*y - 1.)) * \
( 1. - ( (2./np.sqrt(y*y - 1.)) * np.arctan(np.sqrt((y-1.)/(y+1.))) ) ), \
lambda y: (1./(y*y - 1.)) * \
( 1. - ( (2./np.sqrt(-(y*y - 1.))) * np.arctanh(np.sqrt(-((y-1.)/(y+1.)))) ) ), \
lambda y: (1./3.)])
f_c = lambda c: np.log(1.+c) - (c/(1.+c))
def nfw_kappa(massOverh,modrmap_radians,cc,zL=0.7,concentration=3.2,overdensity=180.,critical=False,atClusterZ=False):
sgn = 1. if massOverh>0. else -1.
comS = cc.results.comoving_radial_distance(cc.cmbZ)*cc.h
comL = cc.results.comoving_radial_distance(zL)*cc.h
winAtLens = (comS-comL)/comS
kappa,r500 = NFWkappa(cc,np.abs(massOverh),concentration,zL,modrmap_radians* 180.*60./np.pi,winAtLens,
overdensity=overdensity,critical=critical,atClusterZ=atClusterZ)
return sgn*kappa
def NFWkappa(cc,massOverh,concentration,zL,thetaArc,winAtLens,overdensity=500.,critical=True,atClusterZ=True):
comL = (cc.results.comoving_radial_distance(zL) )*cc.h
c = concentration
M = massOverh
zdensity = 0.
if atClusterZ: zdensity = zL
if critical:
r500 = cc.rdel_c(M,zdensity,overdensity).flatten()[0] # R500 in Mpc/h
else:
r500 = cc.rdel_m(M,zdensity,overdensity) # R500 in Mpc/h
conv=np.pi/(180.*60.)
theta = thetaArc*conv # theta in radians
rS = r500/c
thetaS = rS/ comL
const12 = 9.571e-20 # 2G/c^2 in Mpc / solar mass
fc = np.log(1.+c) - (c/(1.+c))
#const3 = comL * comLS * (1.+zL) / comS # Mpc
const3 = comL * (1.+zL) *winAtLens # Mpc
const4 = M / (rS*rS) #solar mass / MPc^2
const5 = 1./fc
kappaU = gnfw(theta/thetaS)+theta*0. # added for compatibility with enmap
consts = const12 * const3 * const4 * const5
kappa = consts * kappaU
if thetaArc.shape[0]%2==1 and thetaArc.shape[1]%2==1:
Ny,Nx = thetaArc.shape
cx = int(Nx/2.)
cy = int(Ny/2.)
kappa[cy,cx] = kappa[cy-1,cx]
assert np.all(np.isfinite(kappa))
return kappa, r500
def NFWMatchedFilterSN(clusterCosmology,log10Moverh,c,z,ells,Nls,kellmax,overdensity=500.,critical=True,atClusterZ=True,arcStamp=100.,pxStamp=0.05,saveId=None,verbose=False,rayleighSigmaArcmin=None,returnKappa=False,winAtLens=None):
if rayleighSigmaArcmin is not None: assert rayleighSigmaArcmin>=pxStamp
M = 10.**log10Moverh
shape,wcs = maps.rect_geometry(width_deg=arcStamp/60.,px_res_arcmin=pxStamp)
kellmin = 2.*np.pi/arcStamp*np.pi/60./180.
modLMap = enmap.modlmap(shape,wcs)
xMap,yMap,modRMap,xx,yy = maps.get_real_attributes(shape,wcs)
cc = clusterCosmology
cmb = False
if winAtLens is None:
cmb = True
comS = cc.results.comoving_radial_distance(cc.cmbZ)*cc.h
comL = cc.results.comoving_radial_distance(z)*cc.h
winAtLens = (comS-comL)/comS
kappaReal, r500 = NFWkappa(cc,M,c,z,modRMap*180.*60./np.pi,winAtLens,overdensity=overdensity,critical=critical,atClusterZ=atClusterZ)
dAz = cc.results.angular_diameter_distance(z) * cc.h
# print ("daz " , dAz , " mpc")
# print ("r500 " , r500 , " mpc")
th500 = r500/dAz
#fiveth500 = 10.*np.pi/180./60. #5.*th500
fiveth500 = 5.*th500
# print ("5theta500 " , fiveth500*180.*60./np.pi , " arcminutes")
# print ("maximum theta " , modRMap.max()*180.*60./np.pi, " arcminutes")
kInt = kappaReal.copy()
kInt[modRMap>fiveth500] = 0.
# print "mean kappa inside theta500 " , kInt[modRMap<fiveth500].mean()
# print "area of th500 disc " , np.pi*fiveth500**2.*(180.*60./np.pi)**2.
# print "estimated integral " , kInt[modRMap<fiveth500].mean()*np.pi*fiveth500**2.
k500 = simps(simps(kInt, yy), xx)
if verbose: print(("integral of kappa inside disc ",k500))
kappaReal[modRMap>fiveth500] = 0. #### !!!!!!!!! Might not be necessary!
# if cmb: print z,fiveth500*180.*60./np.pi
Ukappa = kappaReal/k500
# pl = Plotter()
# pl.plot2d(Ukappa)
# pl.done("output/kappa.png")
ellmax = kellmax
ellmin = kellmin
Uft = fft(Ukappa,axes=[-2,-1])
if rayleighSigmaArcmin is not None:
Prayleigh = rayleigh(modRMap*180.*60./np.pi,rayleighSigmaArcmin)
outDir = "/gpfs01/astro/www/msyriac/plots/"
# io.quickPlot2d(Prayleigh,outDir+"rayleigh.png")
rayK = fft(ifftshift(Prayleigh),axes=[-2,-1])
rayK /= rayK[modLMap<1.e-3]
Uft = Uft.copy()*rayK
Upower = np.real(Uft*Uft.conjugate())
# pl = Plotter()
# pl.plot2d(fftshift(Upower))
# pl.done("output/upower.png")
Nls[Nls<0.]=0.
s = splrep(ells,Nls,k=3)
Nl2d = splev(modLMap,s)
Nl2d[modLMap<ellmin]=np.inf
Nl2d[modLMap>ellmax] = np.inf
Ny,Nx = shape
pixScaleY,pixScaleX = enmap.pixshape(shape,wcs)
area = Nx*Ny*pixScaleX*pixScaleY
Upower = Upower *area / (Nx*Ny)**2
filter = np.nan_to_num(Upower/Nl2d)
#filter = np.nan_to_num(1./Nl2d)
filter[modLMap>ellmax] = 0.
filter[modLMap<ellmin] = 0.
# pl = Plotter()
# pl.plot2d(fftshift(filter))
# pl.done("output/filter.png")
# if (cmb): print Upower.sum()
# if not(cmb) and z>2.5:
# bin_edges = np.arange(500,ellmax,100)
# binner = bin2D(modLMap, bin_edges)
# centers, nl2dells = binner.bin(Nl2d)
# centers, upowerells = binner.bin(np.nan_to_num(Upower))
# centers, filterells = binner.bin(filter)
# from orphics.tools.io import Plotter
# pl = Plotter(scaleY='log')
# pl.add(centers,upowerells,label="upower")
# pl.add(centers,nl2dells,label="noise")
# pl.add(centers,filterells,label="filter")
# pl.add(ells,Nls,ls="--")
# pl.legendOn(loc='upper right')
# #pl._ax.set_ylim(0,1e-8)
# pl.done("output/filterells.png")
# sys.exit()
varinv = filter.sum()
std = np.sqrt(1./varinv)
sn = k500/std
if verbose: print(sn)
if saveId is not None:
np.savetxt("data/"+saveId+"_m"+str(log10Moverh)+"_z"+str(z)+".txt",np.array([log10Moverh,z,1./sn]))
if returnKappa:
return sn,ifft(Uft,axes=[-2,-1],normalize=True).real*k500
return sn, k500, std
def rayleigh(theta,sigma):
sigmasq = sigma*sigma
#return np.exp(-0.5*theta*theta/sigmasq)
return theta/sigmasq*np.exp(-0.5*theta*theta/sigmasq)
# NFW dimensionless form
fnfw = lambda x: 1./(x*((1.+x)**2.))
Gval = 4.517e-48 # Newton G in Mpc,seconds,Msun units
cval = 9.716e-15 # speed of light in Mpc,second units
# NFW density (M/L^3) as a function of distance from center of cluster
def rho_nfw(M,c,R):
return lambda r: 1./(4.*np.pi)*((c/R)**3.)*M/f_c(c)*fnfw(c*r/R)
# NFW projected along line of sight (M/L^2) as a function of angle on the sky in radians
def proj_rho_nfw(theta,comL,M,c,R):
thetaS = R/c/comL
return 1./(4.*np.pi)*((c/R)**2.)*M/f_c(c)*(2.*gnfw(theta/thetaS))
# Generic profile projected along line of sight (M/L^2) as a function of angle on the sky in radians
# rhoFunc is density (M/L^3) as a function of distance from center of cluster
def projected_rho(thetas,comL,rhoFunc,pmaxN=2000,numps=500000):
# default integration times are good to 0.01% for z=0.1 to 3
# increase numps for lower z/theta and pmaxN for higher z/theta
# g(x) = \int dl rho(sqrt(l**2+x**2)) = g(theta/thetaS)
pzrange = np.linspace(-pmaxN,pmaxN,numps)
g = np.array([np.trapz(rhoFunc(np.sqrt(pzrange**2.+(theta*comL)**2.)),pzrange) for theta in thetas])
return g
def kappa_nfw_generic(theta,z,comLMpcOverh,M,c,R,windowAtLens):
return 4.*np.pi*Gval*(1+z)*comLMpcOverh*windowAtLens*proj_rho_nfw(theta,comLMpcOverh,M,c,R)/cval**2.
def kappa_generic(theta,z,comLMpcOverh,rhoFunc,windowAtLens,pmaxN=2000,numps=500000):
# default integration times are good to 0.01% for z=0.1 to 3
# increase numps for lower z/theta and pmaxN for higher z/theta
return 4.*np.pi*Gval*(1+z)*comLMpcOverh*windowAtLens*projected_rho(theta,comLMpcOverh,rhoFunc,pmaxN,numps)/cval**2.
def kappa_from_rhofunc(M,c,R,theta,cc,z,rhoFunc=None):
if rhoFunc is None: rhoFunc = rho_nfw(M,c,R)
sgn = 1. if M>0. else -1.
comS = cc.results.comoving_radial_distance(cc.cmbZ)*cc.h
comL = cc.results.comoving_radial_distance(z)*cc.h
winAtLens = (comS-comL)/comS
kappa = kappa_generic(theta,z,comL,rhoFunc,winAtLens)
return sgn*kappa
def kappa_nfw(M,c,R,theta,cc,z):
sgn = 1. if M>0. else -1.
comS = cc.results.comoving_radial_distance(cc.cmbZ)*cc.h
comL = cc.results.comoving_radial_distance(z)*cc.h
winAtLens = (comS-comL)/comS
kappa = kappa_nfw_generic(theta,z,comL,np.abs(M),c,R,winAtLens)
return sgn*kappa
class SplitLensing(object):
def __init__(self,shape,wcs,qest,XY="TT"):
# PS calculator
self.fc = maps.FourierCalc(shape,wcs)
self.qest = qest
self.est = XY
def qpower(self,k1,k2):
# PS func
return self.fc.f2power(k1,k2)
def qfrag(self,a,b):
# kappa func (accepts fts, returns ft)
if self.est=='TT':
k1 = self.qest.kappa_from_map(self.est,T2DData=a.copy(),T2DDataY=b.copy(),alreadyFTed=True,returnFt=True)
elif self.est=='EE': # wrong!
k1 = self.qest.kappa_from_map(self.est,T2DData=a.copy(),E2DData=a.copy(),B2DData=a.copy(),
T2DDataY=b.copy(),E2DDataY=b.copy(),B2DDataY=b.copy(),alreadyFTed=True,returnFt=True)
return k1
def cross_estimator(self,ksplits):
# 4pt from splits
splits = ksplits
splits = np.asanyarray(ksplits)
insplits = splits.shape[0]
nsplits = float(insplits)
s = np.mean(splits,axis=0)
k = self.qfrag(s,s)
kiisum = 0.
psum = 0.
psum2 = 0.
for i in range(insplits):
mi = splits[i]
ki = (self.qfrag(mi,s)+self.qfrag(s,mi))/2.
kii = self.qfrag(mi,mi)
kiisum += kii
kic = ki - (1./nsplits)*kii
psum += self.qpower(kic,kic)
for j in range(i+1,int(insplits)):
mj = splits[j]
kij = (self.qfrag(mi,mj)+self.qfrag(mj,mi))/2.
psum2 += self.qpower(kij,kij)
kc = k - (1./nsplits**2.)*kiisum
return (nsplits**4.*self.qpower(kc,kc)-4.*nsplits**2.*psum+4.*psum2)/nsplits/(nsplits-1.)/(nsplits-2.)/(nsplits-3.)
class QE(object):
def __init__(self,shape,wcs,cmb,xnoise,xbeam,ynoise=None,ybeam=None,ests=None,cmb_response=None):
modlmap = enmap.modlmap(shape,wcs)
self.modlmap = modlmap
self.shape = shape
self.wcs = wcs
kbeamx = self._process_beam(xbeam)
kbeamy = self._process_beam(ybeam) if ybeam is not None else kbeamx.copy()
def _process_beam(self,beam):
beam = np.asarray(beam)
if beam.ndim==0:
kbeam = maps.gauss_beam(beam,modlmap)
elif beam.ndim==1:
ells = np.arange(0,beam.size)
kbeam = maps.interp(ells,maps.gauss_beam(beam,ells))(self.modlmap)
elif beam.ndim==2:
kbeam = beam
assert kbeam.shape==self.shape
return kbeam
def WXY(self,XY):
X,Y = XY
if Y=='B': Y='E'
gradClXY = X+Y
if XY=='ET': gradClXY = 'TE'
if XY=='BE': gradClXY = 'EE'
totnoise = self.noiseXX2d[X+X].copy() if self.noiseX_is_total else (self.lClFid2d[X+X].copy()+self.noiseXX2d[X+X].copy())
W = self.fmask_func(np.nan_to_num(self.uClFid2d[gradClXY].copy()/totnoise)*self.kBeamX,self.fMaskXX[X+X])
W[self.modLMap>self.gradCut]=0.
if X=='T':
W[np.where(self.modLMap >= self.lmax_T)] = 0.
else:
W[np.where(self.modLMap >= self.lmax_P)] = 0.
return W
def WY(self,YY):
assert YY[0]==YY[1]
totnoise = self.noiseYY2d[YY].copy() if self.noiseY_is_total else (self.lClFid2d[YY].copy()*self.kBeamY**2.+self.noiseYY2d[YY].copy())
W = self.fmask_func(np.nan_to_num(1./totnoise)*self.kBeamY,self.fMaskYY[YY]) #* self.modLMap # !!!!!
W[np.where(self.modLMap >= self.lmax_T)] = 0.
if YY[0]=='T':
W[np.where(self.modLMap >= self.lmax_T)] = 0.
else:
W[np.where(self.modLMap >= self.lmax_P)] = 0.
return W
def reconstruct_from_iqu(self,XYs,imapx,imapy=None,return_ft=True):
pass
def reconstruct(self,XYs,kmapx=None,kmapy=None,imapx=None,imapy=None,return_ft=True):
pass
def reconstruct_xy(self,XY,kmapx=None,kmapy=None,imapx=None,imapy=None,return_ft=True):
X,Y = XY
WXY = self.WXY(XY)
WY = self.WY(Y+Y)
lx = self.lxMap
ly = self.lyMap
if Y in ['E','B']:
phaseY = self.phaseY
else:
phaseY = 1.
phaseB = (int(Y=='B')*1.j)+(int(Y!='B'))
fMask = self.fmaskK
HighMapStar = ifft((self.kHigh[Y]*WY*phaseY*phaseB),axes=[-2,-1],normalize=True).conjugate()
kPx = fft(ifft(self.kGradx[X]*WXY*phaseY,axes=[-2,-1],normalize=True)*HighMapStar,axes=[-2,-1])
kPy = fft(ifft(self.kGrady[X]*WXY*phaseY,axes=[-2,-1],normalize=True)*HighMapStar,axes=[-2,-1])
rawKappa = ifft((1.j*lx*kPx) + (1.j*ly*kPy),axes=[-2,-1],normalize=True).real
AL = np.nan_to_num(self.AL[XY])
assert not(np.any(np.isnan(rawKappa)))
lmap = self.N.modLMap
kappaft = -self.fmask_func(AL*fft(rawKappa,axes=[-2,-1]))
if return_ft:
return kappaft
else:
kappa = ifft(kappaft,axes=[-2,-1],normalize=True).real
return kappa,kappaft
def norm(self,XY):
kbeamx = self.kbeamx
kbeamy = self.kbeamy
allTerms = []
if XY=='TT':
clunlenTTArrNow = self.uClNow2d['TT'].copy()
WXY = self.WXY('TT')*kbeamx*l1Scale
WY = self.WY('TT')*kbeamy*l2Scale
preG = WY
rfact = 2.**0.25
for ell1,ell2 in [(lx,lx),(ly,ly),(rfact*lx,rfact*ly)]:
preF = ell1*ell2*clunlenTTArrNow*WXY
preFX = ell1*WXY
preGX = ell2*clunlenTTArrNow*WY
calc = ell1*ell2*fft(ifft(preF,axes=[-2,-1],normalize=True)*ifft(preG,axes=[-2,-1],normalize=True)
+ifft(preFX,axes=[-2,-1],normalize=True)*ifft(preGX,axes=[-2,-1],normalize=True),axes=[-2,-1])
allTerms += [calc]
elif XY == 'EE':
clunlenEEArrNow = self.uClNow2d['EE'].copy()
sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
lx = self.lxMap
ly = self.lyMap
lxhat = self.lxHatMap
lyhat = self.lyHatMap
sinf = sin2phi(lxhat,lyhat)
sinsqf = sinf**2.
cosf = cos2phi(lxhat,lyhat)
cossqf = cosf**2.
WXY = self.WXY('EE')*kbeamx
WY = self.WY('EE')*kbeamy
rfact = 2.**0.25
for ell1,ell2 in [(lx,lx),(ly,ly),(rfact*lx,rfact*ly)]:
for trigfact in [cossqf,sinsqf,np.sqrt(2.)*sinf*cosf]:
preF = trigfact*ell1*ell2*clunlenEEArrNow*WXY
preG = trigfact*WY
allTerms += [ell1*ell2*fft(ifft(preF,axes=[-2,-1],normalize=True)*ifft(preG,axes=[-2,-1],normalize=True),axes=[-2,-1])]
preFX = trigfact*ell1*clunlenEEArrNow*WY
preGX = trigfact*ell2*WXY
allTerms += [ell1*ell2*fft(ifft(preFX,axes=[-2,-1],normalize=True)*ifft(preGX,axes=[-2,-1],normalize=True),axes=[-2,-1])]
elif XY == 'EB':
clunlenEEArrNow = self.uClNow2d['EE'].copy()
clunlenBBArrNow = self.uClNow2d['BB'].copy()
sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
lx = self.lxMap
ly = self.lyMap
termsF = []
termsF.append( lambda pre,lxhat,lyhat: pre * sin2phi(lxhat,lyhat)**2. )
termsF.append( lambda pre,lxhat,lyhat: pre * cos2phi(lxhat,lyhat)**2. )
termsF.append( lambda pre,lxhat,lyhat: pre * (1.j*np.sqrt(2.)*sin2phi(lxhat,lyhat)*cos2phi(lxhat,lyhat)) )
termsG = []
termsG.append( lambda pre,lxhat,lyhat: pre * cos2phi(lxhat,lyhat)**2. )
termsG.append( lambda pre,lxhat,lyhat: pre * sin2phi(lxhat,lyhat)**2. )
termsG.append( lambda pre,lxhat,lyhat: pre * (1.j*np.sqrt(2.)*sin2phi(lxhat,lyhat)*cos2phi(lxhat,lyhat)) )
lxhat = self.lxHatMap
lyhat = self.lyHatMap
WXY = self.WXY('EB')*kbeamx
WY = self.WY('BB')*kbeamy
for ellsq in [lx*lx,ly*ly,np.sqrt(2.)*lx*ly]:
preF = ellsq*clunlenEEArrNow*WXY
preG = WY
for termF,termG in zip(termsF,termsG):
allTerms += [ellsq*fft(ifft(termF(preF,lxhat,lyhat),axes=[-2,-1],normalize=True)
*ifft(termG(preG,lxhat,lyhat),axes=[-2,-1],normalize=True),axes=[-2,-1])]
elif XY == 'BE':
clunlenEEArrNow = self.uClNow2d['EE'].copy()
clunlenBBArrNow = self.uClNow2d['BB'].copy()
sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
lx = self.lxMap
ly = self.lyMap
termsF = []
termsF.append( lambda pre,lxhat,lyhat: pre * sin2phi(lxhat,lyhat)**2. )
termsF.append( lambda pre,lxhat,lyhat: pre * cos2phi(lxhat,lyhat)**2. )
termsF.append( lambda pre,lxhat,lyhat: pre * (1.j*np.sqrt(2.)*sin2phi(lxhat,lyhat)*cos2phi(lxhat,lyhat)) )
termsG = []
termsG.append( lambda pre,lxhat,lyhat: pre * cos2phi(lxhat,lyhat)**2. )
termsG.append( lambda pre,lxhat,lyhat: pre * sin2phi(lxhat,lyhat)**2. )
termsG.append( lambda pre,lxhat,lyhat: pre * (1.j*np.sqrt(2.)*sin2phi(lxhat,lyhat)*cos2phi(lxhat,lyhat)) )
lxhat = self.lxHatMap
lyhat = self.lyHatMap
WXY = self.WXY('BE')*kbeamx
WY = self.WY('EE')*kbeamy
for ellsq in [lx*lx,ly*ly,np.sqrt(2.)*lx*ly]:
preF = WXY
preG = ellsq*clunlenEEArrNow*WY
for termF,termG in zip(termsF,termsG):
allTerms += [ellsq*fft(ifft(termF(preF,lxhat,lyhat),axes=[-2,-1],normalize=True)
*ifft(termG(preG,lxhat,lyhat),axes=[-2,-1],normalize=True),axes=[-2,-1])]
elif XY=='ET':
clunlenTEArrNow = self.uClNow2d['TE'].copy()
sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
lx = self.lxMap
ly = self.lyMap
lxhat = self.lxHatMap
lyhat = self.lyHatMap
sinf = sin2phi(lxhat,lyhat)
sinsqf = sinf**2.
cosf = cos2phi(lxhat,lyhat)
cossqf = cosf**2.
WXY = self.WXY('ET')*kbeamx
WY = self.WY('TT')*kbeamy
rfact = 2.**0.25
for ell1,ell2 in [(lx,lx),(ly,ly),(rfact*lx,rfact*ly)]:
preF = ell1*ell2*clunlenTEArrNow*WXY
preG = WY
allTerms += [ell1*ell2*fft(ifft(preF,axes=[-2,-1],normalize=True)*ifft(preG,axes=[-2,-1],normalize=True),axes=[-2,-1])]
for trigfact in [cosf,sinf]:
preFX = trigfact*ell1*clunlenTEArrNow*WY
preGX = trigfact*ell2*WXY
allTerms += [ell1*ell2*fft(ifft(preFX,axes=[-2,-1],normalize=True)*ifft(preGX,axes=[-2,-1],normalize=True),axes=[-2,-1])]
elif XY=='TE':
clunlenTEArrNow = self.uClNow2d['TE'].copy()
sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
lx = self.lxMap
ly = self.lyMap
lxhat = self.lxHatMap
lyhat = self.lyHatMap
sinf = sin2phi(lxhat,lyhat)
sinsqf = sinf**2.
cosf = cos2phi(lxhat,lyhat)
cossqf = cosf**2.
WXY = self.WXY('TE')*kbeamx
WY = self.WY('EE')*kbeamy
rfact = 2.**0.25
for ell1,ell2 in [(lx,lx),(ly,ly),(rfact*lx,rfact*ly)]:
for trigfact in [cossqf,sinsqf,np.sqrt(2.)*sinf*cosf]:
preF = trigfact*ell1*ell2*clunlenTEArrNow*WXY
preG = trigfact*WY
allTerms += [ell1*ell2*fft(ifft(preF,axes=[-2,-1],normalize=True)*ifft(preG,axes=[-2,-1],normalize=True),axes=[-2,-1])]
for trigfact in [cosf,sinf]:
preFX = trigfact*ell1*clunlenTEArrNow*WY
preGX = trigfact*ell2*WXY
allTerms += [ell1*ell2*fft(ifft(preFX,axes=[-2,-1],normalize=True)*ifft(preGX,axes=[-2,-1],normalize=True),axes=[-2,-1])]
elif XY == 'TB':
clunlenTEArrNow = self.uClNow2d['TE'].copy()
sin2phi = lambda lxhat,lyhat: (2.*lxhat*lyhat)
cos2phi = lambda lxhat,lyhat: (lyhat*lyhat-lxhat*lxhat)
lx = self.lxMap
ly = self.lyMap
termsF = []
termsF.append( lambda pre,lxhat,lyhat: pre * sin2phi(lxhat,lyhat)**2. )
termsF.append( lambda pre,lxhat,lyhat: pre * cos2phi(lxhat,lyhat)**2. )
termsF.append( lambda pre,lxhat,lyhat: pre * (1.j*np.sqrt(2.)*sin2phi(lxhat,lyhat)*cos2phi(lxhat,lyhat)) )
termsG = []
termsG.append( lambda pre,lxhat,lyhat: pre * cos2phi(lxhat,lyhat)**2. )
termsG.append( lambda pre,lxhat,lyhat: pre * sin2phi(lxhat,lyhat)**2. )
termsG.append( lambda pre,lxhat,lyhat: pre * (1.j*np.sqrt(2.)*sin2phi(lxhat,lyhat)*cos2phi(lxhat,lyhat)) )
lxhat = self.lxHatMap
lyhat = self.lyHatMap
WXY = self.WXY('TB')*kbeamx
WY = self.WY('BB')*kbeamy
for ellsq in [lx*lx,ly*ly,np.sqrt(2.)*lx*ly]:
preF = ellsq*clunlenTEArrNow*WXY
preG = WY
for termF,termG in zip(termsF,termsG):
allTerms += [ellsq*fft(ifft(termF(preF,lxhat,lyhat),axes=[-2,-1],normalize=True)
*ifft(termG(preG,lxhat,lyhat),axes=[-2,-1],normalize=True),axes=[-2,-1])]
else:
print("ERROR: Unrecognized polComb")
sys.exit(1)
ALinv = np.real(np.sum( allTerms, axis = 0))
alval = np.nan_to_num(1. / ALinv)
if self.fmask is not None: alval = self.fmask_func(alval,self.fmask)
l4 = (lmap**2.) * ((lmap + 1.)**2.)
NL = l4 *alval/ 4.
NL[np.where(np.logical_or(lmap >= self.bigell, lmap <2.))] = 0.
retval = np.nan_to_num(NL.real * self.pixScaleX*self.pixScaleY )
if setNl:
self.Nlkk[XY] = retval.copy()
return retval * 2. * np.nan_to_num(1. / lmap/(lmap+1.))
class L1Integral(object):
"""
Calculates I(L) = \int d^2l_1 f(l1,l2)
on a grid.
L is assumed to lie along the positive x-axis.
This is ok for most integrals which are isotropic in L-space.
The integrand has shape (num_Ls,Ny,Nx)
"""
def __init__(self,Ls,degrees=None,pixarcmin=None,shape=None,wcs=None,pol=True):
if (shape is None) or (wcs is None):
if degrees is None: degrees = 10.
if pixarcmin is None: pixarcmin = 2.0
shape,wcs = maps.rgeo(degrees,pixarcmin)
self.shape = shape
self.wcs = wcs
assert Ls.ndim==1
Ls = Ls[:,None,None]
ly,lx = enmap.lmap(shape,wcs)
self.l1x = lx.copy()
self.l1y = ly.copy()
l1y = ly[None,...]
l1x = lx[None,...]
l1 = enmap.modlmap(shape,wcs)[None,...]
l2y = -l1y
l2x = Ls - l1x
l2 = np.sqrt(l2x**2.+l2y**2.)
self.Ldl1 = Ls*l1x
self.Ldl2 = Ls*l2x
self.l1 = l1
self.l2 = l2
print(self.Ldl1.shape,self.Ldl2.shape,self.l1.shape,self.l2.shape,self.l1x.shape,self.l1y.shape)
if pol:
from orphics import symcoupling as sc
sl1x,sl1y,sl2x,sl2y,sl1,sl2 = sc.get_ells()
scost2t12,ssint2t12 = sc.substitute_trig(sl1x,sl1y,sl2x,sl2y,sl1,sl2)
feed_dict = {'l1x':l1x,'l1y':l1y,'l2x':l2x,'l2y':l2y,'l1':l1,'l2':l2}
cost2t12 = sc.evaluate(scost2t12,feed_dict)
sint2t12 = sc.evaluate(ssint2t12,feed_dict)
self.cost2t12 = cost2t12
self.sint2t12 = sint2t12
def integrate(self,integrand):
integral = np.trapz(y=integrand,x=self.l1x[0,:],axis=-1)
integral = np.trapz(y=integral,x=self.l1y[:,0],axis=-1)
return integral
| msyriac/orphics | orphics/lensing.py | Python | bsd-2-clause | 104,980 | [
"Gaussian",
"TINKER"
] | fe8c0e6ea5256934736081563845d0a05e7de718878c11223b6a3dce22c4ff2e |
# coding: utf-8
import sublime
import re
st_version = int(sublime.version())
if st_version > 3000:
from JoomlaPack.lib.inflector.base import Base
else:
from lib.inflector.base import Base
class English(Base):
'''
Inflector for pluralize and singularize English nouns.
This is the default Inflector
'''
def __init__(self):
Base.__init__(self)
def pluralize(self, word):
'''
Pluralizes English nouns.
'''
rules = {
'regular': [
['(?i)(on)$', 'a'],
['(?i)(alumn|alg)a$', '\\1ae'],
['(?i)([ti])um$', '\\1a'],
['(?i)(ndum)$', 'nda'],
['(?i)(gen|visc)us$', '\\1era'],
['(?i)(corp)us$', '\\1ora'],
['(?i)(octop|vir|alumn|bacill|cact|foc|fung)us$', '\\1i'],
['(?i)(loc|nucle|radi|stimul|styl|succub)us$', '\\1i'],
['(?i)(syllab|termin|tor)us$', '\\1i'],
['(?i)(us)$', '\\1es'],
['(?i)(matr|vert|ind)(ix|ex)$', '\\1ices'],
['(?i)([m|l])ouse$', '\\1ice'],
['(?i)(hive)$', '\\1s'],
['(?i)(s|t|x)is$', '\\1es'],
['^(?i)(ox)$', '\\1en'],
['(?i)(quiz)$', '\\1zes'],
['(?i)(?:([^f])fe|([aelor])f)$', '\\1\\2ves'],
['(?i)(([p|m]atriar|monar|stoma|con|epo)ch)$', '\\1s'],
['(?i)(x|ch|s|ss|sh|z)$', '\\1es'],
['(?i)([^aeiouy]o)$', '\\1es'],
['(?i)([^aeiouy]|qu)y$', '\\1ies'],
['(?i)$', 's']
],
'irregular': {
'albino': 'albinos',
'armadillo': 'armadillos',
'auto': 'autos',
'cello': 'cellos',
'chief': 'chiefs',
'child': 'children',
'combo': 'combos',
'ego': 'egos',
'foot': 'feet',
'goose': 'geese',
'halo': 'halos',
'inferno': 'infernos',
'lasso': 'lassos',
'man': 'men',
'memento': 'mementos',
'memo': 'memos',
'person': 'people',
'piano': 'pianos',
'photo': 'photos',
'pro': 'pros',
'safe': 'safes',
'sex': 'sexes',
'silo': 'silos',
'solo': 'solos',
'staff': 'staves',
'taco': 'tacos',
'tooth': 'teeth',
'tuxedo': 'tuxedos',
'typo': 'typos',
'veto': 'vetos',
'yo': 'yos'
},
'countable': [
'aircraft',
'cannon',
'deer',
'elk',
'equipment',
'fish',
'glasses',
'information',
'money',
'moose',
'news',
'pants',
'pliers',
'politics',
'rice',
'savings',
'scissors',
'series',
'sheep',
'species',
'swine'
]
}
word = word.lower()
for key, value in self.cache.items():
if word == key or word == value:
return value
if word in rules['countable']:
self.cache[word] = word
return word
for key, value in rules['irregular'].items():
if word == key or word == value:
self.cache[key] = value
return value
for rule in range(0, len(rules['regular'])):
match = re.search(rules['regular'][rule][0], word,
re.IGNORECASE)
if match:
groups = match.groups()
for k in range(0, len(groups)):
if groups[k] is None:
rules['regular'][rule][1] = rules['regular'][
rule][1].replace('\\' + str(k + 1), '')
self.cache[word] = re.sub(rules['regular'][rule][0],
rules['regular'][rule][1],
word)
return self.cache[word]
return Base.pluralize(self, word)
def singularize(self, word):
'''
Singularizes English nouns.
'''
rules = {
'regular': [
['(?i)([ti])a$', '\\1um'],
['(?i)(alumn|alg)ae$', '\\1a'],
['(?i)^(ox)en', '\\1'],
['(?i)a$', 'on'],
['(?i)(nda)$', 'ndum'],
['(?i)(gen|visc)era$', '\\1us'],
['(?i)(corp)ora$', '\\1us'],
['(?i)(octop|vir|alumn|bacill|cact|foc|fung)i$', '\\1us'],
['(?i)(loc|nucle|radi|stimul|styl|succub)i$', '\\1us'],
['(?i)(syllab|termin|tor)i$', '\\1us'],
['(?i)(quiz)zes$', '\\1'],
['(?i)([m|l])ice$', '\\1ouse'],
['(?i)(matr)ices$', '\\1ix'],
['(?i)(vert|ind)ices$', '\\1ex'],
['(?i)(test|ax|cris)es$', '\\1is'],
['(?i)(m)ovies$', '\\1ovie'],
['(?i)([aelor])ves$', '\\1f'],
['(?i)(tive)s$', '\\1'],
['(?i)(hive)s$', '\\1'],
['(?i)([^f])ves$', '\\1fe'],
['(?i)(x|ch|ss|sh|zz)es$', '\\1'],
['(?i)([^aeiouy]|qu)ies$', '\\1y'],
['(?i)((a)naly|(b)a|(d)iagno|(p)arenthe)ses$', '\\1sis'],
['(?i)((p)rogno|(s)ynop|(t)he)ses$', '\\1\\2sis'],
['(?i)(penis|alias|status)es$', '\\1'],
['(?i)(bus)es$', '\\1'],
['(?i)(shoe)s$', '\\1'],
['(?i)(o)es$', '\\1'],
['(?i)s$', '']
],
'irregular': {
# 'albinos': 'albino',
# 'armadillos': 'armadillo',
# 'autos': 'auto',
# 'cellos': 'cello',
# 'chiefs': 'chief',
'children': 'child',
# 'combos': 'combo',
# 'egos': 'ego',
'feet': 'foot',
'geese': 'goose',
# 'halos': 'halo',
# 'infernos': 'inferno',
# 'lassos': 'lasso',
'men': 'man',
# 'mementos': 'memento',
# 'memos': 'memo',
# 'moves': 'move',
'people': 'person',
# 'pianos': 'piano',
# 'photos': 'photo',
# 'pros': 'pro',
# 'safes': 'safe',
# 'sexes': 'sex',
# 'silos': 'silo',
# 'solos': 'solo',
'staves': 'staff',
# 'tacos': 'taco',
'teeth': 'tooth',
# 'tuxedos': 'tuxedo',
# 'typos': 'typo',
# 'vetos': 'veto',
# 'yos': 'yo'
},
'countable': [
'aircraft',
'cannon',
'deer',
'elk',
'equipment',
'fish',
'glasses',
'information',
'money',
'moose',
'news',
'pants',
'pliers',
'politics',
'rice',
'savings',
'scissors',
'series',
'sheep',
'species',
'swine'
]
}
word = word.lower()
for key, value in self.cache.items():
if word == key or word == value:
return key
if word in rules['countable']:
self.cache[word] = word
return word
for key, value in rules['irregular'].items():
if word == key or word == value:
self.cache[value] = key
return value
for rule in range(0, len(rules['regular'])):
match = re.search(rules['regular'][rule][0], word)
if match is not None:
groups = match.groups()
for k in range(0, len(groups)):
if groups[k] is None:
rules['regular'][rule][1] = rules['regular'][
rule][1].replace('\\' + str(k + 1), '')
key = re.sub(rules['regular'][rule][0],
rules['regular'][rule][1],
word)
self.cache[key] = word
return key
return Base.singularize(self, word)
def __str__(self):
return "JoomlaPack: English Inflector"
| renebentes/JoomlaPack | lib/inflector/english.py | Python | mit | 8,988 | [
"Elk",
"MOOSE"
] | 7cdcc17d1017586d5a414457388c3ccbd19ab72c0694b936c7d95b23f9e87b78 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAffxparser(RPackage):
"""Affymetrix File Parsing SDK.
Package for parsing Affymetrix files (CDF, CEL, CHP, BPMAP, BAR). It
provides methods for fast and memory efficient parsing of Affymetrix
files using the Affymetrix' Fusion SDK. Both ASCII- and binary-based
files are supported. Currently, there are methods for reading chip
definition file (CDF) and a cell intensity file (CEL). These files can
be read either in full or in part. For example, probe signals from a few
probesets can be extracted very quickly from a set of CEL files into a
convenient list structure."""
homepage = "https://bioconductor.org/packages/affxparser"
git = "https://git.bioconductor.org/packages/affxparser.git"
version('1.56.0', commit='20d27701ad2bdfacf34d857bb8ecb4f505b4d056')
version('1.54.0', commit='dce83d23599a964086a84ced4afd13fc43e7cd4f')
version('1.52.0', commit='8e0c4b89ee1cb4ff95f58a5dd947249dc718bc58')
version('1.50.0', commit='01ef641727eadc2cc17b5dbb0b1432364436e3d5')
version('1.48.0', commit='2461ea88f310b59c4a9a997a4b3dadedbd65a4aa')
depends_on('r@2.14.0:', type=('build', 'run'))
| iulian787/spack | var/spack/repos/builtin/packages/r-affxparser/package.py | Python | lgpl-2.1 | 1,412 | [
"Bioconductor"
] | 54fa85642e8180e5d51a79f509fd7c0465f3f32a120f6c39d0df8ca08938e179 |
import numpy as np
from ase import Atoms, units
from ase.data import chemical_symbols, atomic_numbers
from ase.io import PickleTrajectory
from ase.md import VelocityVerlet
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution
from ase.lattice.cubic import SimpleCubic
from asap3 import Morse
from asap3.testtools import ReportTest
#from calculators.morse import MorsePotential as Morse
# Define constants and calculator
elements = np.array([atomic_numbers['Ru'], atomic_numbers['Ar']])
epsilon = np.array([[5.720, 0.092], [0.092, 0.008]])
alpha = np.array([[1.475, 2.719], [2.719, 1.472]])
rmin = np.array([[2.110, 2.563], [2.563, 4.185]])
rcut = rmin.max() + 6.0 / alpha.min()
def TestPotentialCutoff():
print "Running TestPotentialCutoff..."
for e1 in elements:
for e2 in elements:
calc = Morse(elements, epsilon, alpha, rmin)
atoms = Atoms([e1, e2], [[0.0, 0.0, 0.0], [rcut + 1.0, 0.0, 0.0]])
atoms.set_calculator(calc)
energy = atoms.get_potential_energy()
s1, s2 = chemical_symbols[e1], chemical_symbols[e2]
ReportTest("Energy for %s-%s with r > rcut" % (s1, s2),
energy, 0.0, 1e-12, silent=True)
def TestPotentialMinimum():
print "Running TestPotentialMinimum..."
for i, e1 in enumerate(elements):
for j, e2 in enumerate(elements):
calc = Morse(elements, epsilon, alpha, rmin)
atoms = Atoms([e1, e2], [[0.0, 0.0, 0.0], [rmin[i, j], 0.0, 0.0]])
atoms.set_calculator(calc)
energy = atoms.get_potential_energy()
s1, s2 = chemical_symbols[e1], chemical_symbols[e2]
ReportTest("Energy for %s-%s with r = rmin" % (s1, s2),
energy, -epsilon[i, j], 1e-2, silent=True)
def TestEnergyConservation():
print "Running TestEnergyConservation..."
calc = Morse(elements, epsilon, alpha, rmin)
atoms = SimpleCubic('Ar', size=(10,10,10), latticeconstant=5.0)
n = 0
while n < 100:
i = np.random.randint(len(atoms)-1)
if atoms[i].number != atomic_numbers['Ru']:
atoms[i].number = atomic_numbers['Ru']
n += 1
atoms.set_calculator(calc)
# Set initial momentum
MaxwellBoltzmannDistribution(atoms, 300*units.kB)
# Run dynamics
dyn = VelocityVerlet(atoms, 1.0 * units.fs, logfile='test-energy.dat', loginterval=10)
dyn.run(10)
etot = (atoms.get_potential_energy() + atoms.get_kinetic_energy())/len(atoms)
print "%-9s %-9s %-9s" % ("Epot", "Ekin", "Sum")
for i in range(25):
if i:
dyn.run(100)
epot = atoms.get_potential_energy()/len(atoms)
ekin = atoms.get_kinetic_energy()/len(atoms)
print "%9.5f %9.5f %9.5f" % (epot, ekin, epot+ekin)
ReportTest("Step %i." % (i,), epot+ekin, etot, 1e-3, silent=True)
TestPotentialCutoff()
TestPotentialMinimum()
TestEnergyConservation()
ReportTest.Summary()
| auag92/n2dm | Asap-3.8.4/Test/Morse.py | Python | mit | 2,979 | [
"ASE"
] | 5967e71abd33bae9bec456adea08c790974d16481eec1a53ebda30b13894c4b5 |
from gpaw.transport.analysor import Transport_Plotter
import numpy as np
import sys
from pylab import *
if '*' in sys.argv[1]:
fd=0
bias_step = int(sys.argv[1].split('*')[0])
else:
fd=1
bias_step = int(sys.argv[1])
plotter=Transport_Plotter(fd)
plotter.plot_setup()
tc = plotter.tc(bias_step)
ee=np.linspace(-5,5,201)
plot(ee, tc, 'b-o')
dense_level=1
if dense_level>1:
from scipy import interpolate
tck = interpolate.splrep(ee, tc, s=0)
numb = len(ee)
newee = np.linspace(ee[0], ee[-1], numb * (dense_level))
newtc = interpolate.splev(newee, tck, der=0)
ee = newee
tc = newtc
plot(ee, tc, 'r-o')
eye = np.zeros([10, 1]) + 1
bias = plotter.get_info('bias', bias_step)
f1 = bias[0] * eye
f2 = bias[1] * eye
a1 = np.max(tc)
l1 = np.linspace(0, a1, 10)
plot(f1, l1, 'r--')
plot(f2, l1, 'r--')
xlabel('Energy(eV)')
ylabel('Transmission Coefficient')
show()
| qsnake/gpaw | doc/documentation/transport/transport_analysis_scripts/tc.py | Python | gpl-3.0 | 914 | [
"GPAW"
] | 843b48b6079788fc1aac4911ad81429c9b65078d93f8b131ac16c03960dd3547 |
""" The Bdii2CSAgent performs checking BDII for availability of CE
resources for a given or any configured VO. It detects resources not yet
present in the CS and notifies the administrators.
For the CEs already present in the CS, the agent is updating
if necessary settings which were changed in the BDII recently
The following options can be set for the Bdii2CSAgent.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN Bdii2CSAgent
:end-before: ##END
:dedent: 2
:caption: Bdii2CSAgent options
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOs, getVOOption
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getQueues, getCESiteMapping
from DIRAC.ConfigurationSystem.Client.Utilities import getGridCEs, getSiteUpdates
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.Glue2 import getGlue2CEInfo
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
class Bdii2CSAgent(AgentModule):
def __init__(self, *args, **kwargs):
"""Defines default parameters"""
super(Bdii2CSAgent, self).__init__(*args, **kwargs)
self.addressTo = ""
self.addressFrom = ""
self.voName = []
self.subject = self.am_getModuleParam("fullName")
self.alternativeBDIIs = []
self.voBdiiCEDict = {}
self.voBdiiSEDict = {}
self.host = "cclcgtopbdii01.in2p3.fr:2170"
self.injectSingleCoreQueues = False
self.csAPI = None
# What to get
self.processCEs = True
self.selectedSites = []
# Update the CS or not?
self.dryRun = False
def initialize(self):
"""Gets run paramaters from the configuration"""
self.addressTo = self.am_getOption("MailTo", self.addressTo)
self.addressFrom = self.am_getOption("MailFrom", self.addressFrom)
# Create a list of alternative bdii urls
self.alternativeBDIIs = self.am_getOption("AlternativeBDIIs", self.alternativeBDIIs)
self.host = self.am_getOption("Host", self.host)
self.injectSingleCoreQueues = self.am_getOption("InjectSingleCoreQueues", self.injectSingleCoreQueues)
# Check if the bdii url is appended by a port number, if not append the default 2170
for index, url in enumerate(self.alternativeBDIIs):
if not url.split(":")[-1].isdigit():
self.alternativeBDIIs[index] += ":2170"
if self.addressTo and self.addressFrom:
self.log.info("MailTo", self.addressTo)
self.log.info("MailFrom", self.addressFrom)
if self.alternativeBDIIs:
self.log.info("AlternativeBDII URLs:", self.alternativeBDIIs)
self.processCEs = self.am_getOption("ProcessCEs", self.processCEs)
self.selectedSites = self.am_getOption("SelectedSites", [])
self.dryRun = self.am_getOption("DryRun", self.dryRun)
self.voName = self.am_getOption("VirtualOrganization", self.voName)
if not self.voName:
self.voName = self.am_getOption("VO", [])
if not self.voName or (len(self.voName) == 1 and self.voName[0].lower() == "all"):
# Get all VOs defined in the configuration
self.voName = []
result = getVOs()
if result["OK"]:
vos = result["Value"]
for vo in vos:
vomsVO = getVOOption(vo, "VOMSName")
if vomsVO:
self.voName.append(vomsVO)
if self.voName:
self.log.info("Agent will manage VO(s) %s" % self.voName)
else:
self.log.fatal("VirtualOrganization option not defined for agent")
return S_ERROR()
self.csAPI = CSAPI()
return self.csAPI.initialize()
def execute(self):
"""General agent execution method"""
self.voBdiiCEDict = {}
# Get a "fresh" copy of the CS data
result = self.csAPI.downloadCSData()
if not result["OK"]:
self.log.warn("Could not download a fresh copy of the CS data", result["Message"])
# Refresh the configuration from the master server
gConfig.forceRefresh(fromMaster=True)
if self.processCEs:
self.__lookForNewCEs()
self.__updateCEs()
return S_OK()
def __lookForNewCEs(self):
"""Look up BDII for CEs not yet present in the DIRAC CS"""
bannedCEs = self.am_getOption("BannedCEs", [])
for vo in self.voName:
# get the known CEs for a given VO, so we can know the unknowns, or no longer supported,
# for a VO
res = getQueues(community=vo)
if not res["OK"]:
return res
knownCEs = set()
for _site, ces in res["Value"].items():
knownCEs.update(ces)
knownCEs.update(bannedCEs)
result = self.__getGlue2CEInfo(vo)
if not result["OK"]:
continue
bdiiInfo = result["Value"]
result = getGridCEs(vo, bdiiInfo=bdiiInfo, ceBlackList=knownCEs)
if not result["OK"]:
self.log.error("Failed to get unused CEs", result["Message"])
continue # next VO
siteDict = result["Value"]
unknownCEs = set(result["UnknownCEs"]) - set(bannedCEs)
body = ""
for site in siteDict:
newCEs = set(siteDict[site]) # pylint: disable=no-member
if not newCEs:
continue
ceString = ""
for ce in newCEs:
queueString = ""
ceInfo = bdiiInfo[site]["CEs"][ce]
newCEString = "CE: %s, GOCDB Site Name: %s" % (ce, site)
systemTuple = siteDict[site][ce]["System"]
osString = "%s_%s_%s" % (systemTuple)
newCEString = "\n%s\n%s\n" % (newCEString, osString)
for queue in ceInfo["Queues"]:
queueStatus = ceInfo["Queues"][queue].get("GlueCEStateStatus", "UnknownStatus")
if "production" in queueStatus.lower():
ceType = ceInfo["Queues"][queue].get("GlueCEImplementationName", "")
queueString += " %s %s %s\n" % (queue, queueStatus, ceType)
if queueString:
ceString += newCEString
ceString += "Queues:\n"
ceString += queueString
if ceString:
body += ceString
if siteDict:
body = "\nWe are glad to inform You about new CE(s) possibly suitable for %s:\n" % vo + body
body += "\n\nTo suppress information about CE add its name to BannedCEs list.\n"
body += "Add new Sites/CEs for vo %s with the command:\n" % vo
body += "dirac-admin-add-resources --vo %s --ce\n" % vo
if unknownCEs:
body += "\n\n"
body += "There is no (longer) information about the following CEs for the %s VO.\n" % vo
body += "\n".join(sorted(unknownCEs))
body += "\n\n"
if body:
self.log.info(body)
if self.addressTo and self.addressFrom:
notification = NotificationClient()
result = notification.sendMail(
self.addressTo, self.subject, body, self.addressFrom, localAttempt=False
)
if not result["OK"]:
self.log.error("Can not send new site notification mail", result["Message"])
return S_OK()
def __getGlue2CEInfo(self, vo):
if vo in self.voBdiiCEDict:
return S_OK(self.voBdiiCEDict[vo])
self.log.info("Check for available CEs for VO", vo)
totalResult = S_OK({})
message = ""
mainResult = getGlue2CEInfo(vo, host=self.host)
if not mainResult["OK"]:
self.log.error("Failed getting information from default bdii", mainResult["Message"])
message = mainResult["Message"]
for bdii in reversed(self.alternativeBDIIs):
resultAlt = getGlue2CEInfo(vo, host=bdii)
if resultAlt["OK"]:
totalResult["Value"].update(resultAlt["Value"])
else:
self.log.error("Failed getting information from %s " % bdii, resultAlt["Message"])
message = (message + "\n" + resultAlt["Message"]).strip()
if mainResult["OK"]:
totalResult["Value"].update(mainResult["Value"])
if not totalResult["Value"] and message: # Dict is empty and we have an error message
self.log.error("Error during BDII request", message)
totalResult = S_ERROR(message)
else:
self.voBdiiCEDict[vo] = totalResult["Value"]
self.__purgeSites(totalResult["Value"])
return totalResult
def __updateCEs(self):
"""Update the Site/CE/queue settings in the CS if they were changed in the BDII"""
bdiiChangeSet = set()
bannedCEs = self.am_getOption("BannedCEs", [])
for vo in self.voName:
result = self.__getGlue2CEInfo(vo)
if not result["OK"]:
continue
ceBdiiDict = result["Value"]
for _siteName, ceDict in ceBdiiDict.items():
for bannedCE in bannedCEs:
ceDict["CEs"].pop(bannedCE, None)
result = getSiteUpdates(vo, bdiiInfo=ceBdiiDict, log=self.log, onecore=self.injectSingleCoreQueues)
if not result["OK"]:
continue
bdiiChangeSet = bdiiChangeSet.union(result["Value"])
# We have collected all the changes, consolidate VO settings
result = self.__updateCS(bdiiChangeSet)
return result
def __purgeSites(self, ceBdiiDict):
"""Remove all sites that are not in self.selectedSites.
Modifies the ceBdiiDict!
"""
if not self.selectedSites:
return
for site in list(ceBdiiDict):
ces = list(ceBdiiDict[site]["CEs"])
if not ces:
self.log.error("No CE information for site:", site)
continue
siteInCS = "Not_In_CS"
for ce in ces:
res = getCESiteMapping(ce)
if not res["OK"]:
self.log.error("Failed to get DIRAC site name for ce", "%s: %s" % (ce, res["Message"]))
continue
# if the ce is not in the CS the returned value will be empty
if ce in res["Value"]:
siteInCS = res["Value"][ce]
break
self.log.debug("Checking site %s (%s), aka %s" % (site, ces, siteInCS))
if siteInCS in self.selectedSites:
continue
self.log.info("Dropping site %s, aka %s" % (site, siteInCS))
ceBdiiDict.pop(site)
return
def __updateCS(self, bdiiChangeSet):
queueVODict = {}
changeSet = set()
for entry in bdiiChangeSet:
section, option, _value, new_value = entry
if option == "VO":
queueVODict.setdefault(section, set())
queueVODict[section] = queueVODict[section].union(set(new_value.split(",")))
else:
changeSet.add(entry)
for section, VOs in queueVODict.items(): # can be an iterator
changeSet.add((section, "VO", "", ",".join(VOs)))
if changeSet:
changeList = sorted(changeSet)
body = "\n".join(["%s/%s %s -> %s" % entry for entry in changeList])
if body and self.addressTo and self.addressFrom:
notification = NotificationClient()
result = notification.sendMail(self.addressTo, self.subject, body, self.addressFrom, localAttempt=False)
if body:
self.log.info("The following configuration changes were detected:")
self.log.info(body)
for section, option, value, new_value in changeSet:
if value == "Unknown" or not value:
self.csAPI.setOption(cfgPath(section, option), new_value)
else:
self.csAPI.modifyValue(cfgPath(section, option), new_value)
if self.dryRun:
self.log.info("Dry Run: CS won't be updated")
self.csAPI.showDiff()
else:
result = self.csAPI.commit()
if not result["OK"]:
self.log.error("Error while committing to CS", result["Message"])
else:
self.log.info("Successfully committed %d changes to CS" % len(changeList))
return result
else:
self.log.info("No changes found")
return S_OK()
| ic-hep/DIRAC | src/DIRAC/ConfigurationSystem/Agent/Bdii2CSAgent.py | Python | gpl-3.0 | 13,380 | [
"DIRAC"
] | 975bf488349d6c7e41643a54dac727fe38ca2c8433550d4f6e6fa583a1c8de41 |
#!/usr/bin/env python3
#Copyright 2018 OSIsoft, LLC
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#<http://www.apache.org/licenses/LICENSE-2.0>
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# NOTE: this script was designed using the v1.0
# version of the OMF specification, as outlined here:
# http://omf-docs.readthedocs.io/en/v1.0/index.html
# NOTE: this script was designed to run on BeagleBone Blue. To learn
# more about the BeagleBone Blue, visit https://beagleboard.org/blue, where
# you can also find purchasing links
# ************************************************************************
# Import necessary packages
# ************************************************************************
# Import packages
import json
import time
import platform
import socket
import datetime
#import random # Used to generate sample data; comment out this line if real data is used
import requests
import urllib3 # Used to disable warnings about insecure SSL (optional)
# Import any special packages needed for a particular hardware platform,
# for example, for a Raspberry PI,
# import RPi.GPIO as GPIO
# This automatically initizalizes the robotics cape; to install this library, see
# https://github.com/mcdeoliveira/rcpy
import rcpy
# ************************************************************************
# Specify constant values (names, target URLS, et centera) needed by the script
# ************************************************************************
# Specify the name of this device, or simply use the hostname; this is the name
# of the PI AF Element that will be created, and it'll be included in the names
# of PI Points that get created as well
#DEVICE_NAME = (socket.gethostname()) + ""
DEVICE_NAME = "BBBlue Robot Controller 01"
# Specify a device location (optional); this will be added as a static
# string attribute to the AF Element that is created
DEVICE_LOCATION = "IoT Test Lab"
# Specify the name of the Assets type message; this will also end up becoming
# part of the name of the PI AF Element template that is created; for example, this could be
# "AssetsType_RaspberryPI" or "AssetsType_Dragonboard"
# You will want to make this different for each general class of IoT module that you use
ASSETS_MESSAGE_TYPE_NAME = DEVICE_NAME + "_assets_type"
#ASSETS_MESSAGE_TYPE_NAME = "assets_type" + "IoT Device Model 74656" # An example
# Similarly, specify the name of for the data values type; this should likewise be unique
# for each general class of IoT device--for example, if you were running this
# script on two different devices, each with different numbers and kinds of sensors,
# you'd specify a different data values message type name
# when running the script on each device. If both devices were the same,
# you could use the same DATA_VALUES_MESSAGE_TYPE_NAME
DATA_VALUES_MESSAGE_TYPE_NAME = DEVICE_NAME + "_data_values_type"
#DATA_VALUES_MESSAGE_TYPE_NAME = "data_values_type" + "IoT Device Model 74656" # An example
# Store the id of the container that will be used to receive live data values
DATA_VALUES_CONTAINER_ID = DEVICE_NAME + "_data_values_container"
# Specify the number of seconds to sleep in between value messages
NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES = 2
# Specify whether you're sending data to OSIsoft cloud services or not
SEND_DATA_TO_OSISOFT_CLOUD_SERVICES = False
# Specify the address of the destination endpoint; it should be of the form
# http://<host/ip>:<port>/ingress/messages
# For example, "https://myservername:8118/ingress/messages"
TARGET_URL = "https://lopezpiserver:777/ingress/messages"
# !!! Note: if sending data to OSIsoft cloud services,
# uncomment the below line in order to set the target URL to the OCS OMF endpoint:
#TARGET_URL = "https://qi-data.osisoft.com/api/omf"
# Specify the producer token, a unique token used to identify and authorize a given OMF producer. Consult the OSIsoft Cloud Services or PI Connector Relay documentation for further information.
PRODUCER_TOKEN = "OMFv1"
#PRODUCER_TOKEN = "778408" # An example
# !!! Note: if sending data to OSIsoft cloud services, the producer token should be the
# security token obtained for a particular Tenant and Publisher; see
# http://qi-docs.readthedocs.io/en/latest/OMF_Ingress_Specification.html#headers
#PRODUCER_TOKEN = ""
# ************************************************************************
# Specify options for sending web requests to the target
# ************************************************************************
# If self-signed certificates are used (true by default),
# do not verify HTTPS SSL certificates; normally, leave this as is
VERIFY_SSL = False
# Specify the timeout, in seconds, for sending web requests
# (if it takes longer than this to send a message, an error will be thrown)
WEB_REQUEST_TIMEOUT_SECONDS = 30
# ************************************************************************
# Helper function: run any code needed to initialize local sensors, if necessary for this hardware
# ************************************************************************
# Below is where you can initialize any global variables that are needed by your applicatio;
# certain sensors, for example, will require global interface or sensor variables
# myExampleInterfaceKitGlobalVar = None
# The following function is where you can insert specific initialization code to set up
# sensors for a particular IoT module or platform
def initialize_sensors():
print("\n--- Sensors initializing...")
try:
#For a raspberry pi, for example, to set up pins 4 and 5, you would add
#GPIO.setmode(GPIO.BCM)
#GPIO.setup(4, GPIO.IN)
#GPIO.setup(5, GPIO.IN)
# Set state to rcpy.RUNNING
rcpy.set_state(rcpy.RUNNING)
# Activate the magnetometer on the BeagleBone Blue
rcpy.mpu9250.initialize(enable_magnetometer = True)
print("--- Sensors initialized!")
# In short, in this example, by default,
# this function is called but doesn't do anything (it's just a placeholder)
except Exception as ex:
# Log any error, if it occurs
print(str(datetime.datetime.now()) + " Error when initializing sensors: " + str(ex))
# ************************************************************************
# Helper function: REQUIRED: create a JSON message that contains sensor data values
# ************************************************************************
# The following function you can customize to allow this script to send along any
# number of different data values, so long as the values that you send here match
# up with the values defined in the "DataValuesType" OMF message type (see the next section)
# In this example, this function simply generates two random values for the sensor values,
# but here is where you could change this function to reference a library that actually
# reads from sensors attached to the device that's running the script
def create_data_values_message():
# Read data from the BeagleBone Blue's built-in sensors
boardTemperature = rcpy.mpu9250.read_imu_temp() * 9/5 + 32
accelRotationAndMagneticData = rcpy.mpu9250.read()
# Get the current timestamp in ISO format
timestamp = datetime.datetime.utcnow().isoformat() + 'Z'
# Assemble a JSON object containing the streamId and any data values
return [
{
"containerid": DATA_VALUES_CONTAINER_ID,
"values": [
{
"Time": timestamp,
# Again, in this example,
# we're just sending along random values for these two "sensors"
#"Raw Sensor Reading 1": 100*random.random(),
#"Raw Sensor Reading 2": 100*random.random()
# For the BeagleBone Blue, indexes 0, 1, and 2 correspond to X, Y, and Z
# Moreover, we're dividing acceleration by 9.80665 to convert it to units of Gs
"X-acceleration": accelRotationAndMagneticData['accel'][0]/9.80665,
"Y-acceleration": accelRotationAndMagneticData['accel'][1]/9.80665,
"Z-acceleration": accelRotationAndMagneticData['accel'][2]/9.80665,
"X-rotation": accelRotationAndMagneticData['gyro'][0],
"Y-rotation": accelRotationAndMagneticData['gyro'][1],
"Z-rotation": accelRotationAndMagneticData['gyro'][2],
"X-magnetic field": accelRotationAndMagneticData['mag'][0],
"Y-magnetic field": accelRotationAndMagneticData['mag'][1],
"Z-magnetic field": accelRotationAndMagneticData['mag'][2],
"Board Temperature": boardTemperature
# If you wanted to read, for example, the digital GPIO pins
# 4 and 5 on a Raspberry PI,
# you would add to the earlier package import section:
# import RPi.GPIO as GPIO
# then add the below 3 lines to the above initialize_sensors
# function to set up the GPIO pins:
# GPIO.setmode(GPIO.BCM)
# GPIO.setup(4, GPIO.IN)
# GPIO.setup(5, GPIO.IN)
# and then lastly, you would change the two Raw Sensor reading lines above to
# "Raw Sensor Reading 1": GPIO.input(4),
# "Raw Sensor Reading 2": GPIO.input(5)
}
]
}
]
# ************************************************************************
# Helper function: REQUIRED: wrapper function for sending an HTTPS message
# ************************************************************************
# Define a helper function to allow easily sending web request messages;
# this function can later be customized to allow you to port this script to other languages.
# All it does is take in a data object and a message type, and it sends an HTTPS
# request to the target OMF endpoint
def send_omf_message_to_endpoint(action, message_type, message_json):
try:
# Assemble headers that contain the producer token and message type
# Note: in this example, the only action that is used is "create",
# which will work totally fine;
# to expand this application, you could modify it to use the "update"
# action to, for example, modify existing AF element template types
web_request_header = {
'producertoken': PRODUCER_TOKEN,
'messagetype': message_type,
'action': action,
'messageformat': 'JSON',
'omfversion': '1.0'
}
# !!! Note: if desired, ucomment the below line to print the outgoing message
print('\nOutgoing message: ' + json.dumps(message_json));
# Send the request, and collect the response; json.dumps is used to
# properly format the message JSON so that it can be sent as a web request
response = requests.post(
TARGET_URL,
headers=web_request_header,
data=json.dumps(message_json),
verify=VERIFY_SSL,
timeout=WEB_REQUEST_TIMEOUT_SECONDS
)
# Print a debug message, if desired; note: you should receive a
# response code 200 or 202 if the request was successful!
print(
'Response from sending a message of type ' +
'"{0}" with action "{1}": {2} {3}'.format(
message_type,
action,
response.status_code,
response.text
)
)
except Exception as ex:
# Log any error, if it occurs
print(str(datetime.datetime.now()) + " Error during web request: " + str(ex))
# ************************************************************************
# Turn off HTTPS warnings, if desired
# (if the default certificate configuration was used by the PI Connector)
# ************************************************************************
# Suppress insecure HTTPS warnings, if an untrusted certificate is used by the target endpoint
# Remove if targetting trusted targets
try:
if not VERIFY_SSL:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings()
except Exception as ex:
# Log any error, if it occurs
print(str(datetime.datetime.now()) + " Possible non-fatal error when disabling SSL validation: " + str(ex))
print(
'\n--- Setup: targeting endpoint "' + TARGET_URL + '"...' +
'\n--- Now sending types, defining containers, and creating assets and links...' +
'\n--- (Note: a successful message will return a 20X response code.)\n'
)
# ************************************************************************
# Create a JSON packet to define the types of streams that will be sent
# ************************************************************************
DYNAMIC_TYPES_MESSAGE_JSON = [
# ************************************************************************
# There are several different message types that will be used by this script, but
# you can customize this script for your own needs by modifying the types:
# First, you can modify the "AssetsType", which will allow you to customize which static
# attributes are added to the new PI AF Element that will be created, and second,
# you can modify the "DataValuesType", which will allow you to customize this script to send
# additional sensor values, in addition to (or instead of) the two shown here
# This values type is going to be used to send real-time values; feel free to rename the
# values from "Raw Sensor Reading 1" to, say, "Temperature", or "Pressure"
# Note:
# all keywords ("id", "type", "classification", etc. are case sensitive!)
# For a list of the specific keywords used in these messages,
# see http://omf-docs.readthedocs.io/
{
"id": DATA_VALUES_MESSAGE_TYPE_NAME,
"type": "object",
"classification": "dynamic",
"properties": {
"Time": {
"format": "date-time",
"type": "string",
"isindex": True
},
#"Raw Sensor Reading 1": {"type": "number"},
#"Raw Sensor Reading 2": {"type": "number"}
"X-acceleration": {"type": "number", "description": "in Gs"},
"Y-acceleration": {"type": "number", "description": "in Gs"},
"Z-acceleration": {"type": "number", "description": "in Gs"},
"X-rotation": {"type": "number", "description": "in degrees per second"},
"Y-rotation": {"type": "number", "description": "in degrees per second"},
"Z-rotation": {"type": "number", "description": "in degrees per second"},
"X-magnetic field": {"type": "number", "description": "in microteslas"},
"Y-magnetic field": {"type": "number", "description": "in microteslas"},
"Z-magnetic field": {"type": "number", "description": "in microteslas"},
"Board Temperature": {"type": "number", "description": "in Fahrenheit"}
# For example, to allow you to send a string-type live data value,
# such as "Status", you would add
#"Status": {
# "type": "string"
#}
}
}
]
# ************************************************************************
# Send the DYNAMIC types message, so that these types can be referenced in all later messages
# ************************************************************************
send_omf_message_to_endpoint("create", "Type", DYNAMIC_TYPES_MESSAGE_JSON)
# !!! Note: if sending data to OCS, static types are not included!
if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES:
STATIC_TYPES_MESSAGE_JSON = [
# This asset type is used to define a PI AF Element that will be created;
# this type also defines two static string attributes that will be created
# as well; feel free to rename these or add additional
# static attributes for each Element (PI Point attributes will be added later)
# The name of this type will also end up being part of the name of the PI AF Element template
# that is automatically created
{
"id": ASSETS_MESSAGE_TYPE_NAME,
"type": "object",
"classification": "static",
"properties": {
"Name": {
"type": "string",
"isindex": True
},
"Device Type": {
"type": "string"
},
"Location": {
"type": "string"
},
"Data Ingress Method": {
"type": "string"
}
# For example, to add a number-type static
# attribute for the device model, you would add
# "Model": {
# "type": "number"
#}
}
}
]
# ************************************************************************
# Send the STATIC types message, so that these types can be referenced in all later messages
# ************************************************************************
send_omf_message_to_endpoint("create", "Type", STATIC_TYPES_MESSAGE_JSON)
# ************************************************************************
# Create a JSON packet to define containerids and the type
# (using the types listed above) for each new data events container
# ************************************************************************
# The device name that you specified earlier will be used as the AF Element name!
NEW_AF_ELEMENT_NAME = DEVICE_NAME
CONTAINERS_MESSAGE_JSON = [
{
"id": DATA_VALUES_CONTAINER_ID,
"typeid": DATA_VALUES_MESSAGE_TYPE_NAME
}
]
# ************************************************************************
# Send the container message, to instantiate this particular container;
# we can now directly start sending data to it using its Id
# ************************************************************************
send_omf_message_to_endpoint("create", "Container", CONTAINERS_MESSAGE_JSON)
# !!! Note: if sending data to OCS, static types are not included!
if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES:
# ************************************************************************
# Create a JSON packet to containing the asset and
# linking data for the PI AF asset that will be made
# ************************************************************************
# Here is where you can specify values for the static PI AF attributes;
# in this case, we're auto-populating the Device Type,
# but you can manually hard-code in values if you wish
# we also add the LINKS to be made, which will both position the new PI AF
# Element, so it will show up in AF, and will associate the PI Points
# that will be created with that Element
ASSETS_AND_LINKS_MESSAGE_JSON = [
{
# This will end up creating a new PI AF Element with
# this specific name and static attribute values
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"values": [
{
"Name": NEW_AF_ELEMENT_NAME,
"Device Type": (
platform.machine() + " - " + platform.platform() + " - " + platform.processor()
),
"Location": DEVICE_LOCATION,
"Data Ingress Method": "OMF"
}
]
},
{
"typeid": "__Link",
"values": [
# This first link will locate such a newly created AF Element under
# the root PI element targeted by the PI Connector in your target AF database
# This was specfied in the Connector Relay Admin page; note that a new
# parent element, with the same name as the PRODUCER_TOKEN, will also be made
{
"Source": {
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"index": "_ROOT"
},
"Target": {
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"index": NEW_AF_ELEMENT_NAME
}
},
# This second link will map new PI Points (created by messages
# sent to the data values container) to a newly create element
{
"Source": {
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"index": NEW_AF_ELEMENT_NAME
},
"Target": {
"containerid": DATA_VALUES_CONTAINER_ID
}
}
]
}
]
# ************************************************************************
# Send the message to create the PI AF asset; it won't appear in PI AF,
# though, because it hasn't yet been positioned...
# ************************************************************************
send_omf_message_to_endpoint("create", "Data", ASSETS_AND_LINKS_MESSAGE_JSON)
# ************************************************************************
# Initialize sensors prior to sending data (if needed), using the function defined earlier
# ************************************************************************
initialize_sensors()
# ************************************************************************
# Finally, loop indefinitely, sending random events
# conforming to the value type that we defined earlier
# ************************************************************************
print(
'\n--- Now sending live data every ' + str(NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES) +
' second(s) for device "' + NEW_AF_ELEMENT_NAME + '"... (press CTRL+C to quit at any time)\n'
)
if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES:
print(
'--- (Look for a new AF Element named "' + NEW_AF_ELEMENT_NAME + '".)\n'
)
while True:
# Call the custom function that builds a JSON object that
# contains new data values; see the beginning of this script
VALUES_MESSAGE_JSON = create_data_values_message()
# Send the JSON message to the target URL
send_omf_message_to_endpoint("create", "Data", VALUES_MESSAGE_JSON)
# Send the next message after the required interval
time.sleep(NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES)
| osisoft/OMF-Samples | Community Samples/Python3/SendOMFDataToPISystemFromBeagleBoneBlue.py | Python | apache-2.0 | 23,066 | [
"VisIt"
] | c556c70d974aa74dee0940121d2827a6e6ccce37013fb357b802be3da21304f9 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, absolute_import, print_function
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
import logging
import re
import time
from datetime import datetime, timedelta
from dateutil.parser import parse as dateutil_parse
from sqlalchemy import Table, Column, Integer, String, Unicode, Date, DateTime, Time, or_, and_
from sqlalchemy.orm import relation
from sqlalchemy.schema import ForeignKey
from flexget import db_schema
from flexget import plugin
from flexget.event import event
from flexget.terminal import console
from flexget.manager import Session
from flexget.plugin import get_plugin_by_name
from flexget.utils import requests
from flexget.utils.database import with_session, json_synonym
from flexget.utils.simple_persistence import SimplePersistence
from flexget.utils.tools import TimedDict
Base = db_schema.versioned_base('api_trakt', 7)
AuthBase = db_schema.versioned_base('trakt_auth', 0)
log = logging.getLogger('api_trakt')
# Production Site
CLIENT_ID = '57e188bcb9750c79ed452e1674925bc6848bd126e02bb15350211be74c6547af'
CLIENT_SECRET = 'db4af7531e8df678b134dbc22445a2c04ebdbdd7213be7f5b6d17dfdfabfcdc2'
API_URL = 'https://api.trakt.tv/'
PIN_URL = 'https://trakt.tv/pin/346'
# Stores the last time we checked for updates for shows/movies
updated = SimplePersistence('api_trakt')
# Oauth account authentication
class TraktUserAuth(AuthBase):
__tablename__ = 'trakt_user_auth'
account = Column(Unicode, primary_key=True)
access_token = Column(Unicode)
refresh_token = Column(Unicode)
created = Column(DateTime)
expires = Column(DateTime)
def __init__(self, account, access_token, refresh_token, created, expires):
self.account = account
self.access_token = access_token
self.refresh_token = refresh_token
self.expires = token_expire_date(expires)
self.created = token_created_date(created)
def token_expire_date(expires):
return datetime.now() + timedelta(seconds=expires)
def token_created_date(created):
return datetime.fromtimestamp(created)
def device_auth():
data = {'client_id': CLIENT_ID}
try:
r = requests.post(get_api_url('oauth/device/code'), data=data).json()
device_code = r['device_code']
user_code = r['user_code']
expires_in = r['expires_in']
interval = r['interval']
console('Please visit {0} and authorize Flexget. Your user code is {1}. Your code expires in '
'{2} minutes.'.format(r['verification_url'], user_code, expires_in / 60.0))
log.debug('Polling for user authorization.')
data['code'] = device_code
data['client_secret'] = CLIENT_SECRET
end_time = time.time() + expires_in
console('Waiting...', end='')
# stop polling after expires_in seconds
while time.time() < end_time:
time.sleep(interval)
polling_request = requests.post(get_api_url('oauth/device/token'), data=data,
raise_status=False)
if polling_request.status_code == 200: # success
return polling_request.json()
elif polling_request.status_code == 400: # pending -- waiting for user
console('...', end='')
elif polling_request.status_code == 404: # not found -- invalid device_code
raise plugin.PluginError('Invalid device code. Open an issue on Github.')
elif polling_request.status_code == 409: # already used -- user already approved
raise plugin.PluginError('User code has already been approved.')
elif polling_request.status_code == 410: # expired -- restart process
break
elif polling_request.status_code == 418: # denied -- user denied code
raise plugin.PluginError('User code has been denied.')
elif polling_request.status_code == 429: # polling too fast
log.warning('Polling too quickly. Upping the interval. No action required.')
interval += 1
raise plugin.PluginError('User code has expired. Please try again.')
except requests.RequestException as e:
raise plugin.PluginError('Device authorization with Trakt.tv failed: {0}'.format(e))
def token_oauth(data):
try:
return requests.post(get_api_url('oauth/token'), data=data).json()
except requests.RequestException as e:
raise plugin.PluginError('Token exchange with trakt failed: {0}'.format(e))
def delete_account(account):
with Session() as session:
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first()
if not acc:
raise plugin.PluginError('Account %s not found.' % account)
session.delete(acc)
def get_access_token(account, token=None, refresh=False, re_auth=False, called_from_cli=False):
"""
Gets authorization info from a pin or refresh token.
:param account: Arbitrary account name to attach authorization to.
:param unicode token: The pin or refresh token, as supplied by the trakt website.
:param bool refresh: If True, refresh the access token using refresh_token from db.
:param bool re_auth: If True, account is re-authorized even if it already exists in db.
:raises RequestException: If there is a network error while authorizing.
"""
data = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob'
}
with Session() as session:
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first()
if acc and datetime.now() < acc.expires and not refresh and not re_auth:
return acc.access_token
else:
if acc and (refresh or datetime.now() >= acc.expires - timedelta(days=5)) and not re_auth:
log.debug('Using refresh token to re-authorize account %s.', account)
data['refresh_token'] = acc.refresh_token
data['grant_type'] = 'refresh_token'
token_dict = token_oauth(data)
elif token:
# We are only in here if a pin was specified, so it's safe to use console instead of logging
console('Warning: PIN authorization has been deprecated. Use Device Authorization instead.')
data['code'] = token
data['grant_type'] = 'authorization_code'
token_dict = token_oauth(data)
elif called_from_cli:
log.debug('No pin specified for an unknown account %s. Attempting to authorize device.', account)
token_dict = device_auth()
else:
raise plugin.PluginError('Account %s has not been authorized. See `flexget trakt auth -h` on how to.' %
account)
try:
new_acc = TraktUserAuth(account, token_dict['access_token'], token_dict['refresh_token'],
token_dict.get('created_at', time.time()), token_dict['expires_in'])
session.merge(new_acc)
return new_acc.access_token
except requests.RequestException as e:
raise plugin.PluginError('Token exchange with trakt failed: {0}'.format(e))
def make_list_slug(name):
"""Return the slug for use in url for given list name."""
slug = name.lower()
# These characters are just stripped in the url
for char in '!@#$%^*()[]{}/=?+\\|':
slug = slug.replace(char, '')
# These characters get replaced
slug = slug.replace('&', 'and')
slug = slug.replace(' ', '-')
return slug
def get_session(account=None, token=None):
"""
Creates a requests session ready to talk to trakt API with FlexGet's api key.
Can also add user level authentication if `account` parameter is given.
:param account: An account authorized via `flexget trakt auth` CLI command. If given, returned session will be
authenticated for that account.
"""
# default to username if account name is not specified
session = requests.Session()
session.headers = {
'Content-Type': 'application/json',
'trakt-api-version': '2',
'trakt-api-key': CLIENT_ID,
}
if account:
access_token = get_access_token(account, token) if account else None
if access_token:
session.headers.update({'Authorization': 'Bearer %s' % access_token})
return session
def get_api_url(*endpoint):
"""
Get the address of a trakt API endpoint.
:param endpoint: Can by a string endpoint (e.g. 'sync/watchlist') or an iterable (e.g. ('sync', 'watchlist')
Multiple parameters can also be specified instead of a single iterable.
:returns: The absolute url to the specified API endpoint.
"""
if len(endpoint) == 1 and not isinstance(endpoint[0], basestring):
endpoint = endpoint[0]
# Make sure integer portions are turned into strings first too
url = API_URL + '/'.join(map(str, endpoint))
return url
@db_schema.upgrade('api_trakt')
def upgrade(ver, session):
if ver is None or ver <= 6:
raise db_schema.UpgradeImpossible
return ver
def get_entry_ids(entry):
"""Creates a trakt ids dict from id fields on an entry. Prefers already populated info over lazy lookups."""
ids = {}
for lazy in [False, True]:
if entry.get('trakt_movie_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_movie_id']
elif entry.get('trakt_show_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_show_id']
elif entry.get('trakt_episode_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_episode_id']
if entry.get('tmdb_id', eval_lazy=lazy):
ids['tmdb'] = entry['tmdb_id']
if entry.get('tvdb_id', eval_lazy=lazy):
ids['tvdb'] = entry['tvdb_id']
if entry.get('imdb_id', eval_lazy=lazy):
ids['imdb'] = entry['imdb_id']
if entry.get('tvrage_id', eval_lazy=lazy):
ids['tvrage'] = entry['tvrage_id']
if ids:
break
return ids
class TraktMovieTranslation(Base):
__tablename__ = 'trakt_movie_translations'
id = Column(Integer, primary_key=True, autoincrement=True)
language = Column(Unicode)
overview = Column(Unicode)
tagline = Column(Unicode)
title = Column(Unicode)
movie_id = Column(Integer, ForeignKey('trakt_movies.id'))
def __init__(self, translation, session):
super(TraktMovieTranslation, self).__init__()
self.update(translation, session)
def update(self, translation, session):
for col in translation.keys():
setattr(self, col, translation.get(col))
class TraktShowTranslation(Base):
__tablename__ = 'trakt_show_translations'
id = Column(Integer, primary_key=True, autoincrement=True)
language = Column(Unicode)
overview = Column(Unicode)
title = Column(Unicode)
show_id = Column(Integer, ForeignKey('trakt_shows.id'))
def __init__(self, translation, session):
super(TraktShowTranslation, self).__init__()
self.update(translation, session)
def update(self, translation, session):
for col in translation.keys():
setattr(self, col, translation.get(col))
def get_translations(ident, style):
url = get_api_url(style + 's', ident, 'translations')
trakt_translation = TraktShowTranslation if style == 'show' else TraktMovieTranslation
trakt_translation_id = getattr(trakt_translation, style + '_id')
translations = []
req_session = get_session()
try:
results = req_session.get(url, params={'extended': 'full'}).json()
with Session() as session:
for result in results:
translation = session.query(trakt_translation).filter(and_(
trakt_translation.language == result.get('language'),
trakt_translation_id == ident)).first()
if not translation:
translation = trakt_translation(result, session)
translations.append(translation)
return translations
except requests.RequestException as e:
log.debug('Error adding translations to trakt id %s: %s', ident, e)
class TraktGenre(Base):
__tablename__ = 'trakt_genres'
name = Column(Unicode, primary_key=True)
show_genres_table = Table('trakt_show_genres', Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('genre_id', Unicode, ForeignKey('trakt_genres.name')))
Base.register_table(show_genres_table)
movie_genres_table = Table('trakt_movie_genres', Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('genre_id', Unicode, ForeignKey('trakt_genres.name')))
Base.register_table(movie_genres_table)
class TraktActor(Base):
__tablename__ = 'trakt_actors'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Unicode)
slug = Column(Unicode)
tmdb = Column(Integer)
imdb = Column(Unicode)
biography = Column(Unicode)
birthday = Column(Date)
death = Column(Date)
homepage = Column(Unicode)
def __init__(self, actor, session):
super(TraktActor, self).__init__()
self.update(actor, session)
def update(self, actor, session):
if self.id and self.id != actor.get('ids').get('trakt'):
raise Exception('Tried to update db actors with different actor data')
elif not self.id:
self.id = actor.get('ids').get('trakt')
self.name = actor.get('name')
ids = actor.get('ids')
self.imdb = ids.get('imdb')
self.slug = ids.get('slug')
self.tmdb = ids.get('tmdb')
self.biography = actor.get('biography')
if actor.get('birthday'):
self.birthday = dateutil_parse(actor.get('birthday'))
if actor.get('death'):
self.death = dateutil_parse(actor.get('death'))
self.homepage = actor.get('homepage')
def to_dict(self):
return {
'name': self.name,
'trakt_id': self.id,
'imdb_id': self.imdb,
'tmdb_id': self.tmdb,
}
show_actors_table = Table('trakt_show_actors', Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')))
Base.register_table(show_actors_table)
movie_actors_table = Table('trakt_movie_actors', Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')))
Base.register_table(movie_actors_table)
def get_db_actors(ident, style):
actors = {}
url = get_api_url(style + 's', ident, 'people')
req_session = get_session()
try:
results = req_session.get(url, params={'extended': 'full'}).json()
with Session() as session:
for result in results.get('cast'):
trakt_id = result.get('person').get('ids').get('trakt')
# sometimes an actor can occur twice in the list by mistake. This check is to avoid this unlikely event
if trakt_id in actors:
continue
actor = session.query(TraktActor).filter(TraktActor.id == trakt_id).first()
if not actor:
actor = TraktActor(result.get('person'), session)
actors[trakt_id] = actor
return list(actors.values())
except requests.RequestException as e:
log.debug('Error searching for actors for trakt id %s', e)
return
def get_translations_dict(translate, style):
res = {}
for lang in translate:
info = {
'overview': lang.overview,
'title': lang.title,
}
if style == 'movie':
info['tagline'] = lang.tagline
res[lang.language] = info
return res
def list_actors(actors):
res = {}
for actor in actors:
info = {
'trakt_id': actor.id,
'name': actor.name,
'imdb_id': str(actor.imdb),
'trakt_slug': actor.slug,
'tmdb_id': str(actor.tmdb),
'birthday': actor.birthday.strftime("%Y/%m/%d") if actor.birthday else None,
'biography': actor.biography,
'homepage': actor.homepage,
'death': actor.death.strftime("%Y/%m/%d") if actor.death else None,
}
res[str(actor.id)] = info
return res
class TraktEpisode(Base):
__tablename__ = 'trakt_episodes'
id = Column(Integer, primary_key=True, autoincrement=False)
tvdb_id = Column(Integer)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
title = Column(Unicode)
season = Column(Integer)
number = Column(Integer)
number_abs = Column(Integer)
overview = Column(Unicode)
first_aired = Column(DateTime)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=False)
def __init__(self, trakt_episode, session):
super(TraktEpisode, self).__init__()
self.update(trakt_episode, session)
def update(self, trakt_episode, session):
"""Updates this record from the trakt media object `trakt_episode` returned by the trakt api."""
if self.id and self.id != trakt_episode['ids']['trakt']:
raise Exception('Tried to update db ep with different ep data')
elif not self.id:
self.id = trakt_episode['ids']['trakt']
self.imdb_id = trakt_episode['ids']['imdb']
self.tmdb_id = trakt_episode['ids']['tmdb']
self.tvrage_id = trakt_episode['ids']['tvrage']
self.tvdb_id = trakt_episode['ids']['tvdb']
self.first_aired = None
if trakt_episode.get('first_aired'):
self.first_aired = dateutil_parse(trakt_episode['first_aired'], ignoretz=True)
self.updated_at = dateutil_parse(trakt_episode.get('updated_at'), ignoretz=True)
self.cached_at = datetime.now()
for col in ['title', 'season', 'number', 'number_abs', 'overview']:
setattr(self, col, trakt_episode.get(col))
@property
def expired(self):
# TODO should episode have its own expiration function?
return False
class TraktSeason(Base):
__tablename__ = 'trakt_seasons'
id = Column(Integer, primary_key=True, autoincrement=False)
tvdb_id = Column(Integer)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
title = Column(Unicode)
number = Column(Integer)
episode_count = Column(Integer)
aired_episodes = Column(Integer)
overview = Column(Unicode)
first_aired = Column(DateTime)
ratings = Column(Integer)
votes = Column(Integer)
cached_at = Column(DateTime)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=False)
def __init__(self, trakt_season, session):
super(TraktSeason, self).__init__()
self.update(trakt_season, session)
def update(self, trakt_season, session):
"""Updates this record from the trakt media object `trakt_episode` returned by the trakt api."""
if self.id and self.id != trakt_season['ids']['trakt']:
raise Exception('Tried to update db season with different season data')
elif not self.id:
self.id = trakt_season['ids']['trakt']
self.tmdb_id = trakt_season['ids']['tmdb']
self.tvrage_id = trakt_season['ids']['tvrage']
self.tvdb_id = trakt_season['ids']['tvdb']
self.first_aired = None
if trakt_season.get('first_aired'):
self.first_aired = dateutil_parse(trakt_season['first_aired'], ignoretz=True)
self.cached_at = datetime.now()
for col in ['title', 'number', 'episode_count', 'aired_episodes', 'ratings', 'votes', 'overview']:
setattr(self, col, trakt_season.get(col))
@property
def expired(self):
# TODO should season have its own expiration function?
return False
class TraktShow(Base):
__tablename__ = 'trakt_shows'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(Unicode)
year = Column(Integer)
slug = Column(Unicode)
tvdb_id = Column(Integer)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
overview = Column(Unicode)
first_aired = Column(DateTime)
air_day = Column(Unicode)
air_time = Column(Time)
timezone = Column(Unicode)
runtime = Column(Integer)
certification = Column(Unicode)
network = Column(Unicode)
country = Column(Unicode)
status = Column(String)
rating = Column(Integer)
votes = Column(Integer)
language = Column(Unicode)
homepage = Column(Unicode)
trailer = Column(Unicode)
aired_episodes = Column(Integer)
_translations = relation(TraktShowTranslation)
_translation_languages = Column('translation_languages', Unicode)
translation_languages = json_synonym('_translation_languages')
episodes = relation(TraktEpisode, backref='show', cascade='all, delete, delete-orphan', lazy='dynamic')
seasons = relation(TraktSeason, backref='show', cascade='all, delete, delete-orphan', lazy='dynamic')
genres = relation(TraktGenre, secondary=show_genres_table)
_actors = relation(TraktActor, secondary=show_actors_table)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
def to_dict(self):
return {
"id": self.id,
"title": self.title,
"year": self.year,
"slug": self.slug,
"tvdb_id": self.tvdb_id,
"imdb_id": self.imdb_id,
"tmdb_id": self.tmdb_id,
"tvrage_id": self.tvrage_id,
"overview": self.overview,
"first_aired": self.first_aired,
"air_day": self.air_day,
"air_time": self.air_time.strftime("%H:%M") if self.air_time else None,
"timezone": self.timezone,
"runtime": self.runtime,
"certification": self.certification,
"network": self.network,
"country": self.country,
"status": self.status,
"rating": self.rating,
"votes": self.votes,
"language": self.language,
"homepage": self.homepage,
"number_of_aired_episodes": self.aired_episodes,
"genres": [g.name for g in self.genres],
"updated_at": self.updated_at,
"cached_at": self.cached_at
}
def __init__(self, trakt_show, session):
super(TraktShow, self).__init__()
self.update(trakt_show, session)
def update(self, trakt_show, session):
"""Updates this record from the trakt media object `trakt_show` returned by the trakt api."""
if self.id and self.id != trakt_show['ids']['trakt']:
raise Exception('Tried to update db show with different show data')
elif not self.id:
self.id = trakt_show['ids']['trakt']
self.slug = trakt_show['ids']['slug']
self.imdb_id = trakt_show['ids']['imdb']
self.tmdb_id = trakt_show['ids']['tmdb']
self.tvrage_id = trakt_show['ids']['tvrage']
self.tvdb_id = trakt_show['ids']['tvdb']
if trakt_show.get('airs'):
airs = trakt_show.get('airs')
self.air_day = airs.get('day')
self.timezone = airs.get('timezone')
if airs.get('time'):
self.air_time = datetime.strptime(airs.get('time'), '%H:%M').time()
else:
self.air_time = None
if trakt_show.get('first_aired'):
self.first_aired = dateutil_parse(trakt_show.get('first_aired'), ignoretz=True)
else:
self.first_aired = None
self.updated_at = dateutil_parse(trakt_show.get('updated_at'), ignoretz=True)
for col in ['overview', 'runtime', 'rating', 'votes', 'language', 'title', 'year',
'runtime', 'certification', 'network', 'country', 'status', 'aired_episodes',
'trailer', 'homepage']:
setattr(self, col, trakt_show.get(col))
# Sometimes genres and translations are None but we really do want a list, hence the "or []"
self.genres = [TraktGenre(name=g.replace(' ', '-')) for g in trakt_show.get('genres') or []]
self.cached_at = datetime.now()
self.translation_languages = trakt_show.get('available_translations') or []
def get_episode(self, season, number, session, only_cached=False):
# TODO: Does series data being expired mean all episode data should be refreshed?
episode = self.episodes.filter(TraktEpisode.season == season).filter(TraktEpisode.number == number).first()
if not episode or self.expired:
url = get_api_url('shows', self.id, 'seasons', season, 'episodes', number, '?extended=full')
if only_cached:
raise LookupError('Episode %s %s not found in cache' % (season, number))
log.debug('Episode %s %s not found in cache, looking up from trakt.', season, number)
try:
ses = get_session()
data = ses.get(url).json()
except requests.RequestException:
raise LookupError('Error Retrieving Trakt url: %s' % url)
if not data:
raise LookupError('No data in response from trakt %s' % url)
episode = self.episodes.filter(TraktEpisode.id == data['ids']['trakt']).first()
if episode:
episode.update(data, session)
else:
episode = TraktEpisode(data, session)
self.episodes.append(episode)
return episode
def get_season(self, number, session, only_cached=False):
# TODO: Does series data being expired mean all season data should be refreshed?
season = self.seasons.filter(TraktSeason.number == number).first()
if not season or self.expired:
url = get_api_url('shows', self.id, 'seasons', '?extended=full')
if only_cached:
raise LookupError('Season %s not found in cache' % number)
log.debug('Season %s not found in cache, looking up from trakt.', number)
try:
ses = get_session()
data = ses.get(url).json()
except requests.RequestException:
raise LookupError('Error Retrieving Trakt url: %s' % url)
if not data:
raise LookupError('No data in response from trakt %s' % url)
# We fetch all seasons for the given show because we barely get any data otherwise
for season_result in data:
db_season = self.seasons.filter(TraktSeason.id == season_result['ids']['trakt']).first()
if db_season:
db_season.update(season_result, session)
else:
db_season = TraktSeason(season_result, session)
self.seasons.append(db_season)
if number == season_result['number']:
season = db_season
if not season:
raise LookupError('Season %s not found for show %s' % (number, self.title))
return season
@property
def expired(self):
"""
:return: True if show details are considered to be expired, ie. need of update
"""
# TODO stolen from imdb plugin, maybe there's a better way?
if self.cached_at is None:
log.debug('cached_at is None: %s', self)
return True
refresh_interval = 2
# if show has been cancelled or ended, then it is unlikely to be updated often
if self.year and (self.status == 'ended' or self.status == 'canceled'):
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
log.debug('show `%s` age %i expires in %i days', self.title, age, refresh_interval)
return self.cached_at < datetime.now() - timedelta(days=refresh_interval)
@property
def translations(self):
if not self._translations:
self._translations = get_translations(self.id, 'show')
return self._translations
@property
def actors(self):
if not self._actors:
self._actors[:] = get_db_actors(self.id, 'show')
return self._actors
def __repr__(self):
return '<name=%s, id=%s>' % (self.title, self.id)
class TraktMovie(Base):
__tablename__ = 'trakt_movies'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(Unicode)
year = Column(Integer)
slug = Column(Unicode)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tagline = Column(Unicode)
overview = Column(Unicode)
released = Column(Date)
runtime = Column(Integer)
rating = Column(Integer)
votes = Column(Integer)
trailer = Column(Unicode)
homepage = Column(Unicode)
language = Column(Unicode)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
_translations = relation(TraktMovieTranslation, backref='movie')
_translation_languages = Column('translation_languages', Unicode)
translation_languages = json_synonym('_translation_languages')
genres = relation(TraktGenre, secondary=movie_genres_table)
_actors = relation(TraktActor, secondary=movie_actors_table)
def __init__(self, trakt_movie, session):
super(TraktMovie, self).__init__()
self.update(trakt_movie, session)
def to_dict(self):
return {
"id": self.id,
"title": self.title,
"year": self.year,
"slug": self.slug,
"imdb_id": self.imdb_id,
"tmdb_id": self.tmdb_id,
"tagline": self.tagline,
"overview": self.overview,
"released": self.released,
"runtime": self.runtime,
"rating": self.rating,
"votes": self.votes,
"language": self.language,
"homepage": self.homepage,
"trailer": self.trailer,
"genres": [g.name for g in self.genres],
"updated_at": self.updated_at,
"cached_at": self.cached_at
}
def update(self, trakt_movie, session):
"""Updates this record from the trakt media object `trakt_movie` returned by the trakt api."""
if self.id and self.id != trakt_movie['ids']['trakt']:
raise Exception('Tried to update db movie with different movie data')
elif not self.id:
self.id = trakt_movie['ids']['trakt']
self.slug = trakt_movie['ids']['slug']
self.imdb_id = trakt_movie['ids']['imdb']
self.tmdb_id = trakt_movie['ids']['tmdb']
for col in ['title', 'overview', 'runtime', 'rating', 'votes',
'language', 'tagline', 'year', 'trailer', 'homepage']:
setattr(self, col, trakt_movie.get(col))
if trakt_movie.get('released'):
self.released = dateutil_parse(trakt_movie.get('released'), ignoretz=True).date()
self.updated_at = dateutil_parse(trakt_movie.get('updated_at'), ignoretz=True)
self.genres = [TraktGenre(name=g.replace(' ', '-')) for g in trakt_movie.get('genres', [])]
self.cached_at = datetime.now()
self.translation_languages = trakt_movie.get('available_translations', [])
@property
def expired(self):
"""
:return: True if movie details are considered to be expired, ie. need of update
"""
# TODO stolen from imdb plugin, maybe there's a better way?
if self.updated_at is None:
log.debug('updated_at is None: %s', self)
return True
refresh_interval = 2
if self.year:
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
log.debug('movie `%s` age %i expires in %i days', self.title, age, refresh_interval)
return self.cached_at < datetime.now() - timedelta(days=refresh_interval)
@property
def translations(self):
if not self._translations:
self._translations = get_translations(self.id, 'movie')
return self._translations
@property
def actors(self):
if not self._actors:
self._actors[:] = get_db_actors(self.id, 'movie')
return self._actors
class TraktShowSearchResult(Base):
__tablename__ = 'trakt_show_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, unique=True, nullable=False)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=True)
series = relation(TraktShow, backref='search_strings')
def __init__(self, search, series_id=None, series=None):
self.search = search.lower()
if series_id:
self.series_id = series_id
if series:
self.series = series
class TraktMovieSearchResult(Base):
__tablename__ = 'trakt_movie_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, unique=True, nullable=False)
movie_id = Column(Integer, ForeignKey('trakt_movies.id'), nullable=True)
movie = relation(TraktMovie, backref='search_strings')
def __init__(self, search, movie_id=None, movie=None):
self.search = search.lower()
if movie_id:
self.movie_id = movie_id
if movie:
self.movie = movie
def split_title_year(title):
"""Splits title containing a year into a title, year pair."""
# We only recognize years from the 2nd and 3rd millennium, FlexGetters from the year 3000 be damned!
match = re.search(r'[\s(]([12]\d{3})\)?$', title)
if match:
title = title[:match.start()].strip()
year = int(match.group(1))
else:
year = None
return title, year
@with_session
def get_cached(style=None, title=None, year=None, trakt_id=None, trakt_slug=None, tmdb_id=None, imdb_id=None,
tvdb_id=None, tvrage_id=None, session=None):
"""
Get the cached info for a given show/movie from the database.
:param type: Either 'show' or 'movie'
"""
ids = {
'id': trakt_id,
'slug': trakt_slug,
'tmdb_id': tmdb_id,
'imdb_id': imdb_id,
}
if style == 'show':
ids['tvdb_id'] = tvdb_id
ids['tvrage_id'] = tvrage_id
model = TraktShow
else:
model = TraktMovie
result = None
if any(ids.values()):
result = session.query(model).filter(
or_(getattr(model, col) == val for col, val in ids.items() if val)).first()
elif title:
title, y = split_title_year(title)
year = year or y
query = session.query(model).filter(model.title == title)
if year:
query = query.filter(model.year == year)
result = query.first()
return result
def get_trakt(style=None, title=None, year=None, trakt_id=None, trakt_slug=None, tmdb_id=None, imdb_id=None,
tvdb_id=None, tvrage_id=None):
"""Returns the matching media object from trakt api."""
# TODO: Better error messages
# Trakt api accepts either id or slug (there is a rare possibility for conflict though, e.g. 24)
trakt_id = trakt_id or trakt_slug
if not any([title, trakt_id, tmdb_id, imdb_id, tvdb_id, tvrage_id]):
raise LookupError('No lookup arguments provided.')
req_session = get_session()
last_search_query = None # used if no results are found
last_search_type = None
if not trakt_id:
# Try finding trakt_id based on other ids
ids = {
'imdb': imdb_id,
'tmdb': tmdb_id
}
if style == 'show':
ids['tvdb'] = tvdb_id
ids['tvrage'] = tvrage_id
for id_type, identifier in ids.items():
if not identifier:
continue
try:
last_search_query = identifier
last_search_type = id_type
log.debug('Searching with params: %s=%s', id_type, identifier)
results = req_session.get(get_api_url('search'), params={'id_type': id_type, 'id': identifier}).json()
except requests.RequestException as e:
raise LookupError('Searching trakt for %s=%s failed with error: %s' % (id_type, identifier, e))
for result in results:
if result['type'] != style:
continue
trakt_id = result[style]['ids']['trakt']
break
if not trakt_id and title:
last_search_query = title
last_search_type = 'title'
# Try finding trakt id based on title and year
if style == 'show':
parsed_title, y = split_title_year(title)
y = year or y
else:
title_parser = get_plugin_by_name('parsing').instance.parse_movie(title)
y = year or title_parser.year
parsed_title = title_parser.name
try:
params = {'query': parsed_title, 'type': style, 'year': y}
log.debug('Type of title: %s', type(parsed_title))
log.debug('Searching with params: %s', ', '.join('{}={}'.format(k, v) for (k, v) in params.items()))
results = req_session.get(get_api_url('search'), params=params).json()
except requests.RequestException as e:
raise LookupError('Searching trakt for %s failed with error: %s' % (title, e))
for result in results:
if year and result[style]['year'] != year:
continue
if parsed_title.lower() == result[style]['title'].lower():
trakt_id = result[style]['ids']['trakt']
break
# grab the first result if there is no exact match
if not trakt_id and results:
trakt_id = results[0][style]['ids']['trakt']
if not trakt_id:
raise LookupError('Unable to find %s="%s" on trakt.' % (last_search_type, last_search_query))
# Get actual data from trakt
try:
return req_session.get(get_api_url(style + 's', trakt_id), params={'extended': 'full'}).json()
except requests.RequestException as e:
raise LookupError('Error getting trakt data for id %s: %s' % (trakt_id, e))
def update_collection_cache(style_ident, username=None, account=None):
if account and not username:
username = 'me'
url = get_api_url('users', username, 'collection', style_ident)
session = get_session(account=account)
try:
data = session.get(url).json()
if not data:
log.warning('No collection data returned from trakt.')
return
cache = get_user_cache(username=username, account=account)['collection'][style_ident]
log.verbose('Received %d records from trakt.tv %s\'s collection', len(data), username)
if style_ident == 'movies':
for movie in data:
movie_id = movie['movie']['ids']['trakt']
cache[movie_id] = movie['movie']
cache[movie_id]['collected_at'] = dateutil_parse(movie['collected_at'], ignoretz=True)
else:
for series in data:
series_id = series['show']['ids']['trakt']
cache[series_id] = series['show']
cache[series_id]['seasons'] = series['seasons']
cache[series_id]['collected_at'] = dateutil_parse(series['last_collected_at'], ignoretz=True)
except requests.RequestException as e:
raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e)
def update_watched_cache(style_ident, username=None, account=None):
if account and not username:
username = 'me'
url = get_api_url('users', username, 'watched', style_ident)
session = get_session(account=account)
try:
data = session.get(url).json()
if not data:
log.warning('No watched data returned from trakt.')
return
cache = get_user_cache(username=username, account=account)['watched'][style_ident]
log.verbose('Received %d record(s) from trakt.tv %s\'s watched history', len(data), username)
if style_ident == 'movies':
for movie in data:
movie_id = movie['movie']['ids']['trakt']
cache[movie_id] = movie['movie']
cache[movie_id]['watched_at'] = dateutil_parse(movie['last_watched_at'], ignoretz=True)
cache[movie_id]['plays'] = movie['plays']
else:
for series in data:
series_id = series['show']['ids']['trakt']
cache[series_id] = series['show']
cache[series_id]['seasons'] = series['seasons']
cache[series_id]['watched_at'] = dateutil_parse(series['last_watched_at'], ignoretz=True)
cache[series_id]['plays'] = series['plays']
except requests.RequestException as e:
raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e)
def update_user_ratings_cache(style_ident, username=None, account=None):
if account and not username:
username = 'me'
url = get_api_url('users', username, 'ratings', style_ident)
session = get_session(account=account)
try:
data = session.get(url).json()
if not data:
log.warning('No user ratings data returned from trakt.')
return
cache = get_user_cache(username=username, account=account)['user_ratings']
log.verbose('Received %d record(s) from trakt.tv %s\'s %s user ratings', len(data), username, style_ident)
for item in data:
# get the proper cache from the type returned by trakt
item_type = item['type']
item_cache = cache[item_type + 's']
# season cannot be put into shows because the code would turn to spaghetti later when retrieving from cache
# instead we put some season info inside the season cache key'd to series id
# eg. cache['seasons'][<show_id>][<season_number>] = ratings and stuff
if item_type == 'season':
show_id = item['show']['ids']['trakt']
season = item['season']['number']
item_cache.setdefault(show_id, {})
item_cache[show_id].setdefault(season, {})
item_cache = item_cache[show_id]
item_id = season
else:
item_id = item[item_type]['ids']['trakt']
item_cache[item_id] = item[item_type]
item_cache[item_id]['rated_at'] = dateutil_parse(item['rated_at'], ignoretz=True)
item_cache[item_id]['rating'] = item['rating']
except requests.RequestException as e:
raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e)
def get_user_cache(username=None, account=None):
identifier = '{}|{}'.format(account, username or 'me')
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('watched', {}).setdefault('shows', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('watched', {}).setdefault('movies', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('collection', {}).setdefault('shows', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('collection', {}).setdefault('movies', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('user_ratings', {}).setdefault('shows', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('user_ratings', {}).setdefault('seasons', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('user_ratings', {}).setdefault('episodes', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('user_ratings', {}).setdefault('movies', {})
return ApiTrakt.user_cache[identifier]
class ApiTrakt(object):
user_cache = TimedDict(cache_time='15 minutes')
@staticmethod
@with_session
def lookup_series(session=None, only_cached=None, **lookup_params):
series = get_cached('show', session=session, **lookup_params)
title = lookup_params.get('title') or ''
found = None
if not series and title:
found = session.query(TraktShowSearchResult).filter(TraktShowSearchResult.search == title.lower()).first()
if found and found.series:
log.debug('Found %s in previous search results as %s', title, found.series.title)
series = found.series
if only_cached:
if series:
return series
raise LookupError('Series %s not found from cache' % lookup_params)
if series and not series.expired:
return series
try:
trakt_show = get_trakt('show', **lookup_params)
except LookupError as e:
if series:
log.debug('Error refreshing show data from trakt, using cached. %s', e)
return series
raise
series = session.merge(TraktShow(trakt_show, session))
if series and title.lower() == series.title.lower():
return series
elif series and title and not found:
if not session.query(TraktShowSearchResult).filter(TraktShowSearchResult.search == title.lower()).first():
log.debug('Adding search result to db')
session.merge(TraktShowSearchResult(search=title, series=series))
elif series and found:
log.debug('Updating search result in db')
found.series = series
return series
@staticmethod
@with_session
def lookup_movie(session=None, only_cached=None, **lookup_params):
movie = get_cached('movie', session=session, **lookup_params)
title = lookup_params.get('title') or ''
found = None
if not movie and title:
found = session.query(TraktMovieSearchResult).filter(TraktMovieSearchResult.search == title.lower()).first()
if found and found.movie:
log.debug('Found %s in previous search results as %s', title, found.movie.title)
movie = found.movie
if only_cached:
if movie:
return movie
raise LookupError('Movie %s not found from cache' % lookup_params)
if movie and not movie.expired:
return movie
try:
trakt_movie = get_trakt('movie', **lookup_params)
except LookupError as e:
if movie:
log.debug('Error refreshing movie data from trakt, using cached. %s', e)
return movie
raise
movie = session.merge(TraktMovie(trakt_movie, session))
if movie and title.lower() == movie.title.lower():
return movie
if movie and title and not found:
if not session.query(TraktMovieSearchResult).filter(TraktMovieSearchResult.search == title.lower()).first():
log.debug('Adding search result to db')
session.merge(TraktMovieSearchResult(search=title, movie=movie))
elif movie and found:
log.debug('Updating search result in db')
found.movie = movie
return movie
@staticmethod
def collected(style, trakt_data, title, username=None, account=None):
style_ident = 'movies' if style == 'movie' else 'shows'
cache = get_user_cache(username=username, account=account)
if not cache['collection'][style_ident]:
log.debug('No collection found in cache.')
update_collection_cache(style_ident, username=username, account=account)
if not cache['collection'][style_ident]:
log.warning('No collection data returned from trakt.')
return
in_collection = False
cache = cache['collection'][style_ident]
if style == 'show':
if trakt_data.id in cache:
series = cache[trakt_data.id]
# specials are not included
number_of_collected_episodes = sum(len(s['episodes']) for s in series['seasons'] if s['number'] > 0)
in_collection = number_of_collected_episodes >= trakt_data.aired_episodes
elif style == 'episode':
if trakt_data.show.id in cache:
series = cache[trakt_data.show.id]
for s in series['seasons']:
if s['number'] == trakt_data.season:
# extract all episode numbers currently in collection for the season number
episodes = [ep['number'] for ep in s['episodes']]
in_collection = trakt_data.number in episodes
break
elif style == 'season':
if trakt_data.show.id in cache:
series = cache[trakt_data.show.id]
for s in series['seasons']:
if trakt_data.number == s['number']:
in_collection = True
break
else:
if trakt_data.id in cache:
in_collection = True
log.debug('The result for entry "%s" is: %s', title,
'Owned' if in_collection else 'Not owned')
return in_collection
@staticmethod
def watched(style, trakt_data, title, username=None, account=None):
style_ident = 'movies' if style == 'movie' else 'shows'
cache = get_user_cache(username=username, account=account)
if not cache['watched'][style_ident]:
log.debug('No watched history found in cache.')
update_watched_cache(style_ident, username=username, account=account)
if not cache['watched'][style_ident]:
log.warning('No watched data returned from trakt.')
return
watched = False
cache = cache['watched'][style_ident]
if style == 'show':
if trakt_data.id in cache:
series = cache[trakt_data.id]
# specials are not included
number_of_watched_episodes = sum(len(s['episodes']) for s in series['seasons'] if s['number'] > 0)
watched = number_of_watched_episodes == trakt_data.aired_episodes
elif style == 'episode':
if trakt_data.show.id in cache:
series = cache[trakt_data.show.id]
for s in series['seasons']:
if s['number'] == trakt_data.season:
# extract all episode numbers currently in collection for the season number
episodes = [ep['number'] for ep in s['episodes']]
watched = trakt_data.number in episodes
break
elif style == 'season':
if trakt_data.show.id in cache:
series = cache[trakt_data.show.id]
for s in series['seasons']:
if trakt_data.number == s['number']:
watched = True
break
else:
if trakt_data.id in cache:
watched = True
log.debug('The result for entry "%s" is: %s', title,
'Watched' if watched else 'Not watched')
return watched
@staticmethod
def user_ratings(style, trakt_data, title, username=None, account=None):
style_ident = style + 's'
cache = get_user_cache(username=username, account=account)
if not cache['user_ratings'][style_ident]:
log.debug('No user ratings found in cache.')
update_user_ratings_cache(style_ident, username=username, account=account)
if not cache['user_ratings'][style_ident]:
log.warning('No user ratings data returned from trakt.')
return
user_rating = None
cache = cache['user_ratings'][style_ident]
# season ratings are a little annoying and require butchering the code
if style == 'season' and trakt_data.series_id in cache:
if trakt_data.number in cache[trakt_data.series_id]:
user_rating = cache[trakt_data.series_id][trakt_data.number]['rating']
if trakt_data.id in cache:
user_rating = cache[trakt_data.id]['rating']
log.debug('User rating for entry "%s" is: %s', title, user_rating)
return user_rating
@event('plugin.register')
def register_plugin():
plugin.register(ApiTrakt, 'api_trakt', api_ver=2, interfaces=[])
| qk4l/Flexget | flexget/plugins/internal/api_trakt.py | Python | mit | 53,240 | [
"VisIt"
] | ff9fc1551a88d7ee772ec86eefc82f5fbb428db3617b0e918e89d0641e4d28ea |
"""
Views for user API
"""
import six
from django.contrib.auth.signals import user_logged_in
from django.shortcuts import redirect
from django.utils import dateparse
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import UsageKey
from rest_framework import generics, views
from rest_framework.decorators import api_view
from rest_framework.response import Response
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from django.contrib.auth.models import User
from lms.djangoapps.courseware.access import is_mobile_available_for_user
from lms.djangoapps.courseware.courses import get_current_child
from lms.djangoapps.courseware.model_data import FieldDataCache
from lms.djangoapps.courseware.module_render import get_module_for_descriptor
from lms.djangoapps.courseware.views.index import save_positions_recursively_up
from lms.djangoapps.courseware.access_utils import ACCESS_GRANTED
from mobile_api.utils import API_V05
from openedx.features.course_duration_limits.access import check_course_expired
from student.models import CourseEnrollment, User
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .. import errors
from ..decorators import mobile_course_access, mobile_view
from .serializers import CourseEnrollmentSerializer, CourseEnrollmentSerializerv05, UserSerializer
@mobile_view(is_user=True)
class UserDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get information about the specified user and access other resources
the user has permissions for.
Users are redirected to this endpoint after they sign in.
You can use the **course_enrollments** value in the response to get a
list of courses the user is enrolled in.
**Example Request**
GET /api/mobile/{version}/users/{username}
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* course_enrollments: The URI to list the courses the currently signed
in user is enrolled in.
* email: The email address of the currently signed in user.
* id: The ID of the user.
* name: The full name of the currently signed in user.
* username: The username of the currently signed in user.
"""
queryset = (
User.objects.all().select_related('profile')
)
serializer_class = UserSerializer
lookup_field = 'username'
def get_serializer_context(self):
context = super(UserDetail, self).get_serializer_context()
context['api_version'] = self.kwargs.get('api_version')
return context
@mobile_view(is_user=True)
class UserCourseStatus(views.APIView):
"""
**Use Cases**
Get or update the ID of the module that the specified user last
visited in the specified course.
**Example Requests**
GET /api/mobile/{version}/users/{username}/course_status_info/{course_id}
PATCH /api/mobile/{version}/users/{username}/course_status_info/{course_id}
**PATCH Parameters**
The body of the PATCH request can include the following parameters.
* last_visited_module_id={module_id}
* modification_date={date}
The modification_date parameter is optional. If it is present, the
update will only take effect if the modification_date in the
request is later than the modification_date saved on the server.
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* last_visited_module_id: The ID of the last module that the user
visited in the course.
* last_visited_module_path: The ID of the modules in the path from the
last visited module to the course module.
"""
http_method_names = ["get", "patch"]
def _last_visited_module_path(self, request, course):
"""
Returns the path from the last module visited by the current user in the given course up to
the course module. If there is no such visit, the first item deep enough down the course
tree is used.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id, course=course
)
path = [course_module]
chapter = get_current_child(course_module, min_depth=2)
if chapter is not None:
path.append(chapter)
section = get_current_child(chapter, min_depth=1)
if section is not None:
path.append(section)
path.reverse()
return path
def _get_course_info(self, request, course):
"""
Returns the course status
"""
path = self._last_visited_module_path(request, course)
path_ids = [six.text_type(module.location) for module in path]
return Response({
"last_visited_module_id": path_ids[0],
"last_visited_module_path": path_ids,
})
def _update_last_visited_module_id(self, request, course, module_key, modification_date):
"""
Saves the module id if the found modification_date is less recent than the passed modification date
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
try:
module_descriptor = modulestore().get_item(module_key)
except ItemNotFoundError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
module = get_module_for_descriptor(
request.user, request, module_descriptor, field_data_cache, course.id, course=course
)
if modification_date:
key = KeyValueStore.Key(
scope=Scope.user_state,
user_id=request.user.id,
block_scope_id=course.location,
field_name='position'
)
original_store_date = field_data_cache.last_modified(key)
if original_store_date is not None and modification_date < original_store_date:
# old modification date so skip update
return self._get_course_info(request, course)
save_positions_recursively_up(request.user, request, field_data_cache, module, course=course)
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def get(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Get the ID of the module that the specified user last visited in the specified course.
"""
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def patch(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Update the ID of the module that the specified user last visited in the specified course.
"""
module_id = request.data.get("last_visited_module_id")
modification_date_string = request.data.get("modification_date")
modification_date = None
if modification_date_string:
modification_date = dateparse.parse_datetime(modification_date_string)
if not modification_date or not modification_date.tzinfo:
return Response(errors.ERROR_INVALID_MODIFICATION_DATE, status=400)
if module_id:
try:
module_key = UsageKey.from_string(module_id)
except InvalidKeyError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
return self._update_last_visited_module_id(request, course, module_key, modification_date)
else:
# The arguments are optional, so if there's no argument just succeed
return self._get_course_info(request, course)
@mobile_view(is_user=True)
class UserCourseEnrollmentsList(generics.ListAPIView):
"""
**Use Case**
Get information about the courses that the currently signed in user is
enrolled in.
v1 differs from v0.5 version by returning ALL enrollments for
a user rather than only the enrollments the user has access to (that haven't expired).
An additional attribute "expiration" has been added to the response, which lists the date
when access to the course will expire or null if it doesn't expire.
**Example Request**
GET /api/mobile/v1/users/{username}/course_enrollments/
**Response Values**
If the request for information about the user is successful, the
request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* expiration: The course expiration date for given user course pair
or null if the course does not expire.
* certificate: Information about the user's earned certificate in the
course.
* course: A collection of the following data about the course.
* courseware_access: A JSON representation with access information for the course,
including any access errors.
* course_about: The URL to the course about page.
* course_sharing_utm_parameters: Encoded UTM parameters to be included in course sharing url
* course_handouts: The URI to get data for course handouts.
* course_image: The path to the course image.
* course_updates: The URI to get data for course updates.
* discussion_url: The URI to access data for course discussions if
it is enabled, otherwise null.
* end: The end date of the course.
* id: The unique ID of the course.
* name: The name of the course.
* number: The course number.
* org: The organization that created the course.
* start: The date and time when the course starts.
* start_display:
If start_type is a string, then the advertised_start date for the course.
If start_type is a timestamp, then a formatted date for the start of the course.
If start_type is empty, then the value is None and it indicates that the course has not yet started.
* start_type: One of either "string", "timestamp", or "empty"
* subscription_id: A unique "clean" (alphanumeric with '_') ID of
the course.
* video_outline: The URI to get the list of all videos that the user
can access in the course.
* created: The date the course was created.
* is_active: Whether the course is currently active. Possible values
are true or false.
* mode: The type of certificate registration for this course (honor or
certified).
* url: URL to the downloadable version of the certificate, if exists.
"""
queryset = CourseEnrollment.objects.all()
lookup_field = 'username'
# In Django Rest Framework v3, there is a default pagination
# class that transmutes the response data into a dictionary
# with pagination information. The original response data (a list)
# is stored in a "results" value of the dictionary.
# For backwards compatibility with the existing API, we disable
# the default behavior by setting the pagination_class to None.
pagination_class = None
def is_org(self, check_org, course_org):
"""
Check course org matches request org param or no param provided
"""
return check_org is None or (check_org.lower() == course_org.lower())
def get_serializer_context(self):
context = super(UserCourseEnrollmentsList, self).get_serializer_context()
context['api_version'] = self.kwargs.get('api_version')
return context
def get_serializer_class(self):
api_version = self.kwargs.get('api_version')
if api_version == API_V05:
return CourseEnrollmentSerializerv05
return CourseEnrollmentSerializer
def get_queryset(self):
api_version = self.kwargs.get('api_version')
enrollments = self.queryset.filter(
user__username=self.kwargs['username'],
is_active=True
).order_by('created').reverse()
org = self.request.query_params.get('org', None)
same_org = (
enrollment for enrollment in enrollments
if enrollment.course_overview and self.is_org(org, enrollment.course_overview.org)
)
mobile_available = (
enrollment for enrollment in same_org
if is_mobile_available_for_user(self.request.user, enrollment.course_overview)
)
not_duration_limited = (
enrollment for enrollment in mobile_available
if check_course_expired(self.request.user, enrollment.course) == ACCESS_GRANTED
)
if api_version == API_V05:
# for v0.5 don't return expired courses
return list(not_duration_limited)
else:
# return all courses, with associated expiration
return list(mobile_available)
@api_view(["GET"])
@mobile_view()
def my_user_info(request, api_version):
"""
Redirect to the currently-logged-in user's info page
"""
# update user's last logged in from here because
# updating it from the oauth2 related code is too complex
user_logged_in.send(sender=User, user=request.user, request=request)
return redirect("user-detail", api_version=api_version, username=request.user.username)
| cpennington/edx-platform | lms/djangoapps/mobile_api/users/views.py | Python | agpl-3.0 | 13,824 | [
"VisIt"
] | ed0ccbfe35bedf7051a9812e3f79d3ff1fd61dc60059bb9196e6e8a1756512d2 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import warnings
import subprocess
import numpy as np
import os.path
import copy
from pymatgen.core import Structure, Lattice, PeriodicSite, Molecule
from pymatgen.core.structure import FunctionalGroups
from pymatgen.analysis.local_env import MinimumDistanceNN
from pymatgen.util.coord import lattice_points_in_supercell
from pymatgen.vis.structure_vtk import EL_COLORS
from monty.json import MSONable
from monty.os.path import which
from operator import itemgetter
from collections import namedtuple
from scipy.spatial import KDTree
try:
import networkx as nx
from networkx.readwrite import json_graph
from networkx.drawing.nx_agraph import write_dot
except ImportError:
raise ImportError("This module requires the NetworkX "
"graph library to be installed.")
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
__author__ = "Matthew Horton, Evan Spotte-Smith"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "mkhorton@lbl.gov"
__status__ = "Beta"
__date__ = "August 2017"
ConnectedSite = namedtuple('ConnectedSite', 'site, jimage, index, weight, dist')
class StructureGraph(MSONable):
"""
This is a class for annotating a Structure with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, structure, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given crystallographic
structure easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
For periodic graphs, class stores information on the graph
edges of what lattice image the edge belongs to.
:param structure: a Structure object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(structure, StructureGraph):
# just make a copy from input
graph_data = structure.as_dict()['graphs']
self.structure = structure
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if 'id' in d:
del d['id']
if 'key' in d:
del d['key']
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if 'to_jimage' in d:
d['to_jimage'] = tuple(d['to_jimage'])
if 'from_jimage' in d:
d['from_jimage'] = tuple(d['from_jimage'])
@classmethod
def with_empty_graph(cls, structure, name="bonds",
edge_weight_name=None,
edge_weight_units=None):
"""
Constructor for StructureGraph, returns a StructureGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Structure).
:param structure (Structure):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (StructureGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError("Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless.")
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name)
graph.add_nodes_from(range(len(structure)))
graph_data = json_graph.adjacency_data(graph)
return cls(structure, graph_data=graph_data)
@staticmethod
def with_local_env_strategy(structure, strategy):
"""
Constructor for StructureGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param structure: Structure object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:return:
"""
sg = StructureGraph.with_empty_graph(structure, name="bonds",
edge_weight_name="weight",
edge_weight_units="")
for n in range(len(structure)):
neighbors = strategy.get_nn_info(structure, n)
for neighbor in neighbors:
# local_env will always try to add two edges
# for any one bond, one from site u to site v
# and another form site v to site u: this is
# harmless, so warn_duplicates=False
sg.add_edge(from_index=n,
from_jimage=(0, 0, 0),
to_index=neighbor['site_index'],
to_jimage=neighbor['image'],
weight=neighbor['weight'],
warn_duplicates=False)
return sg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph['name']
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph['edge_weight_name']
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph['edge_weight_units']
def add_edge(self, from_index, to_index,
from_jimage=(0, 0, 0), to_jimage=None,
weight=None, warn_duplicates=True,
edge_properties=None):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param from_jimage (tuple of ints): lattice vector of periodic
image, e.g. (1, 0, 0) for periodic image in +x direction
:param to_jimage (tuple of ints): lattice vector of image
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
to_jimage, from_jimage = from_jimage, to_jimage
# constrain all from_jimages to be (0, 0, 0),
# initial version of this class worked even if
# from_jimage != (0, 0, 0), but making this
# assumption simplifies logic later
if not np.array_equal(from_jimage, (0, 0, 0)):
shift = from_jimage
from_jimage = np.subtract(from_jimage, shift)
to_jimage = np.subtract(to_jimage, shift)
# automatic detection of to_jimage if user doesn't specify
# will try and detect all equivalent images and add multiple
# edges if appropriate
if to_jimage is None:
# assume we want the closest site
warnings.warn("Please specify to_jimage to be unambiguous, "
"trying to automatically detect.")
dist, to_jimage = self.structure[from_index]\
.distance_and_image(self.structure[to_index])
if dist == 0:
# this will happen when from_index == to_index,
# typically in primitive single-atom lattices
images = [1, 0, 0], [0, 1, 0], [0, 0, 1]
dists = []
for image in images:
dists.append(self.structure[from_index]
.distance_and_image(self.structure[from_index],
jimage=image)[0])
dist = min(dists)
equiv_sites = self.structure.get_neighbors_in_shell(self.structure[from_index].coords,
dist,
dist*0.01,
include_index=True)
for site, dist, to_index in equiv_sites:
to_jimage = np.subtract(site.frac_coords, self.structure[from_index].frac_coords)
to_jimage = to_jimage.astype(int)
self.add_edge(from_index=from_index, from_jimage=(0, 0, 0),
to_jimage=to_jimage, to_index=to_index)
return
# sanitize types
from_jimage, to_jimage = tuple(map(int, from_jimage)), tuple(map(int, to_jimage))
from_index, to_index = int(from_index), int(to_index)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between a given (site, jimage) pair and another
# (site, jimage) pair
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data:
for key, d in existing_edge_data.items():
if d["to_jimage"] == to_jimage:
if warn_duplicates:
warnings.warn("Trying to add an edge that already exists from "
"site {} to site {} in {}.".format(from_index,
to_index,
to_jimage))
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index,
to_jimage=to_jimage,
weight=weight,
**edge_properties)
else:
self.graph.add_edge(from_index, to_index,
to_jimage=to_jimage,
**edge_properties)
def get_connected_sites(self, n, jimage=(0, 0, 0)):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Structure
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
out_edges = [(u, v, d, 'out') for u, v, d in self.graph.out_edges(n, data=True)]
in_edges = [(u, v, d, 'in') for u, v, d in self.graph.in_edges(n, data=True)]
for u, v, d, dir in out_edges + in_edges:
to_jimage = d['to_jimage']
if dir == 'in':
u, v = v, u
to_jimage = np.multiply(-1, to_jimage)
site_d = self.structure[v].as_dict()
site_d['abc'] = np.add(site_d['abc'], to_jimage).tolist()
to_jimage = tuple(map(int, np.add(to_jimage, jimage)))
site = PeriodicSite.from_dict(site_d)
# from_site if jimage arg != (0, 0, 0)
relative_jimage = np.subtract(to_jimage, jimage)
dist = self.structure[u].distance(self.structure[v], jimage=relative_jimage)
weight = d.get('weight', None)
connected_site = ConnectedSite(site=site,
jimage=to_jimage,
index=v,
weight=weight,
dist=dist)
connected_sites.add(connected_site)
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(self, filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp"):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires "
"GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {'nodesep': 10.0, 'dpi': 300, 'overlap': "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.structure[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.structure[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = '#000000' if 1 - (c[0] * 0.299 + c[1] * 0.587
+ c[2] * 0.114) / 255 < 0.5 else '#ffffff'
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(n, fillcolor=color, fontcolor=fontcolor, label=label,
fontname="Helvetica-bold", style="filled", shape="circle")
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
to_image = d['to_jimage']
# set edge style
d['style'] = "solid"
if to_image != (0, 0, 0):
d['style'] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d['arrowhead'] = "none"
# only add labels for images that are not the origin
if image_labels:
d['headlabel'] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d['arrowhead'] = "normal" if d['headlabel'] else "none"
# optionally color edges using node colors
color_u = g.node[u]['fillcolor']
color_v = g.node[v]['fillcolor']
d['color_uv'] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get('edge_weight_units', "")
if d.get('weight'):
d['label'] = "{:.2f} {}".format(d['weight'], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d['to_jimage']) in diff['self']:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d['to_jimage']) in diff['other']:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({'color_uv': '#00ff00'})
for u, v, k in red_edges:
g.edges[u, v, k].update({'color_uv': '#ff0000'})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename+".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename+".dot"]
rs = subprocess.Popen(args,
stdout=f,
stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename+".dot")
def as_dict(self):
"""
As in :Class: `pymatgen.core.Structure` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"graphs": json_graph.adjacency_data(self.graph)}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Structure` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
s = Structure.from_dict(d['structure'])
return cls(s, d['graphs'])
def __mul__(self, scaling_matrix):
"""
Replicates the graph, creating a supercell,
intelligently joining together
edges that lie on periodic boundaries.
In principle, any operations on the expanded
graph could also be done on the original
graph, but a larger graph can be easier to
visualize and reason about.
:param scaling_matrix: same as Structure.__mul__
:return:
"""
# Developer note: a different approach was also trialed, using
# a simple Graph (instead of MultiDiGraph), with node indices
# representing both site index and periodic image. Here, the
# number of nodes != number of sites in the Structure. This
# approach has many benefits, but made it more difficult to
# keep the graph in sync with its corresponding Structure.
# Broadly, it would be easier to multiply the Structure
# *before* generating the StructureGraph, but this isn't
# possible when generating the graph using critic2 from
# charge density.
# Multiplication works by looking for the expected position
# of an image node, and seeing if that node exists in the
# supercell. If it does, the edge is updated. This is more
# computationally expensive than just keeping track of the
# which new lattice images present, but should hopefully be
# easier to extend to a general 3x3 scaling matrix.
# code adapted from Structure.__mul__
scale_matrix = np.array(scaling_matrix, np.int16)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
else:
# TODO: test __mul__ with full 3x3 scaling matrices
raise NotImplementedError('Not tested with 3x3 scaling matrices yet.')
new_lattice = Lattice(np.dot(scale_matrix, self.structure.lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
new_graphs = []
for v in c_lat:
# create a map of nodes from original graph to its image
mapping = {n: n + len(new_sites) for n in range(len(self.structure))}
for idx, site in enumerate(self.structure):
s = PeriodicSite(site.species_and_occu, site.coords + v,
new_lattice, properties=site.properties,
coords_are_cartesian=True, to_unit_cell=False)
new_sites.append(s)
new_graphs.append(nx.relabel_nodes(self.graph, mapping, copy=True))
new_structure = Structure.from_sites(new_sites)
# merge all graphs into one big graph
new_g = nx.MultiDiGraph()
for new_graph in new_graphs:
new_g = nx.union(new_g, new_graph)
edges_to_remove = [] # tuple of (u, v, k)
edges_to_add = [] # tuple of (u, v, attr_dict)
# list of new edges inside supercell
# for duplicate checking
edges_inside_supercell = [{u, v} for u, v, d in new_g.edges(data=True)
if d['to_jimage'] == (0, 0, 0)]
new_periodic_images = []
orig_lattice = self.structure.lattice
# use k-d tree to match given position to an
# existing Site in Structure
kd_tree = KDTree(new_structure.cart_coords)
# tolerance in Å for sites to be considered equal
# this could probably be a lot smaller
tol = 0.05
for u, v, k, d in new_g.edges(keys=True, data=True):
to_jimage = d['to_jimage'] # for node v
# reduce unnecessary checking
if to_jimage != (0, 0, 0):
# get index in original site
n_u = u % len(self.structure)
n_v = v % len(self.structure)
# get fractional co-ordinates of where atoms defined
# by edge are expected to be, relative to original
# lattice (keeping original lattice has
# significant benefits)
v_image_frac = np.add(self.structure[n_v].frac_coords, to_jimage)
u_frac = self.structure[n_u].frac_coords
# using the position of node u as a reference,
# get relative Cartesian co-ordinates of where
# atoms defined by edge are expected to be
v_image_cart = orig_lattice.get_cartesian_coords(v_image_frac)
u_cart = orig_lattice.get_cartesian_coords(u_frac)
v_rel = np.subtract(v_image_cart, u_cart)
# now retrieve position of node v in
# new supercell, and get asgolute Cartesian
# co-ordinates of where atoms defined by edge
# are expected to be
v_expec = new_structure[u].coords + v_rel
# now search in new structure for these atoms
# query returns (distance, index)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
# check if image sites now present in supercell
# and if so, delete old edge that went through
# periodic boundary
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
# node now inside supercell
new_d['to_jimage'] = (0, 0, 0)
edges_to_remove.append((u, v, k))
# make sure we don't try to add duplicate edges
# will remove two edges for everyone one we add
if {new_u, new_v} not in edges_inside_supercell:
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
edges_inside_supercell.append({new_u, new_v})
edges_to_add.append((new_u, new_v, new_d))
else:
# want to find new_v such that we have
# full periodic boundary conditions
# so that nodes on one side of supercell
# are connected to nodes on opposite side
v_expec_frac = new_structure.lattice.get_fractional_coords(v_expec)
# find new to_jimage
# use np.around to fix issues with finite precision leading to incorrect image
v_expec_image = np.around(v_expec_frac, decimals=3)
v_expec_image = v_expec_image - v_expec_image%1
v_expec_frac = np.subtract(v_expec_frac, v_expec_image)
v_expec = new_structure.lattice.get_cartesian_coords(v_expec_frac)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
new_to_jimage = tuple(map(int, v_expec_image))
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
new_to_jimage = tuple(np.multiply(-1, d['to_jimage']).astype(int))
new_d['to_jimage'] = new_to_jimage
edges_to_remove.append((u, v, k))
if (new_u, new_v, new_to_jimage) not in new_periodic_images:
edges_to_add.append((new_u, new_v, new_d))
new_periodic_images.append((new_u, new_v, new_to_jimage))
logger.debug("Removing {} edges, adding {} new edges.".format(len(edges_to_remove),
len(edges_to_add)))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
new_g.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
new_g.add_edge(u, v, **d)
# return new instance of StructureGraph with supercell
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": new_structure.as_dict(),
"graphs": json_graph.adjacency_data(new_g)}
sg = StructureGraph.from_dict(d)
return sg
def __rmul__(self, other):
return self.__mul__(other)
def _edges_to_string(self, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-"*max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0,1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.3e}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))),
data.get("weight", 0))
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v,
str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Structure / number of nodes in graph
"""
return len(self.structure)
def sort(self, key=None, reverse=False):
"""
Same as Structure.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_structure = self.structure.copy()
# sort Structure
self.structure._sites = sorted(self.structure._sites, key=key, reverse=reverse)
# apply Structure ordering to graph
mapping = {idx:self.structure.index(site) for idx, site in enumerate(old_structure)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d['to_jimage'] = tuple(np.multiply(-1, d['to_jimage']).astype(int))
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return StructureGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two StructureGraphs are equal if they have equal Structures,
and have the same edges between Sites. Edge weights can be
different and StructureGraphs can still be considered equal.
:param other: StructureGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords):self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d['to_jimage'])
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d['to_jimage'])
for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and \
(self.structure == other_sorted.structure)
def diff(self, other, strict=True):
"""
Compares two StructureGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one StructureGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two StructureGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the StructureGraph this method is called
from, not the 'other' StructureGraph: there
is no guarantee the node indices will be the
same if the underlying Structures are ordered
differently.
:param other: StructureGraph
:param strict: if False, will compare bonds
from different Structures, with node indices
replaced by Specie strings, will not count
number of occurrences of bonds
:return:
"""
if self.structure != other.structure and strict:
return ValueError("Meaningless to compare StructureGraphs if "
"corresponding Structures are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords):self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d['to_jimage'])
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d['to_jimage'])
for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
else:
edges = {(str(self.structure[u].specie),
str(self.structure[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(str(other.structure[u].specie),
str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
'self': edges - edges_other,
'other': edges_other - edges,
'both': edges.intersection(edges_other),
'dist': jaccard_dist
}
def get_subgraphs_as_molecules(self, use_weights=False):
"""
Retrieve subgraphs as molecules, useful for extracting
molecules from periodic crystals.
Will only return unique molecules, not any duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
:param use_weights (bool): If True, only treat subgraphs
as isomorphic if edges have the same weights. Typically,
this means molecules will need to have the same bond
lengths to be defined as duplicates, otherwise bond
lengths can differ. This is a fairly robust approach,
but will treat e.g. enantiomers as being duplicates.
:return: list of unique Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
if getattr(self, '_supercell_sg', None) is None:
self._supercell_sg = supercell_sg = self*(3,3,3)
# make undirected to find connected subgraphs
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# find subgraphs
all_subgraphs = list(nx.connected_component_subgraphs(supercell_sg.graph))
# discount subgraphs that lie across *supercell* boundaries
# these will subgraphs representing crystals
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersects_boundary = any([d['to_jimage'] != (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if not intersects_boundary:
molecule_subgraphs.append(subgraph)
# add specie names to graph to be able to test for isomorphism
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1['specie'] == n2['specie']
def edge_match(e1, e2):
if use_weights:
return e1['weight'] == e2['weight']
else:
return True
# prune duplicate subgraphs
unique_subgraphs = []
for subgraph in molecule_subgraphs:
already_present = [nx.is_isomorphic(subgraph, g,
node_match=node_match,
edge_match=edge_match)
for g in unique_subgraphs]
if not any(already_present):
unique_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in unique_subgraphs:
coords = [supercell_sg.structure[n].coords for n
in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n
in subgraph.nodes()]
molecule = Molecule(species, coords)
# shift so origin is at center of mass
molecule = molecule.get_centered_molecule()
molecules.append(molecule)
return molecules
class MoleculeGraph(MSONable):
"""
This is a class for annotating a Molecule with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, molecule, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given molecule easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
:param molecule: Molecule object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(molecule, MoleculeGraph):
# just make a copy from input
graph_data = molecule.as_dict()['graphs']
self.molecule = molecule
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if 'id' in d:
del d['id']
if 'key' in d:
del d['key']
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if 'to_jimage' in d:
d['to_jimage'] = tuple(d['to_jimage'])
if 'from_jimage' in d:
d['from_jimage'] = tuple(d['from_jimage'])
@classmethod
def with_empty_graph(cls, molecule, name="bonds",
edge_weight_name=None,
edge_weight_units=None):
"""
Constructor for MoleculeGraph, returns a MoleculeGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Molecule).
:param molecule (Molecule):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (MoleculeGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError("Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless.")
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name)
graph.add_nodes_from(range(len(molecule)))
graph_data = json_graph.adjacency_data(graph)
return cls(molecule, graph_data=graph_data)
@staticmethod
def with_local_env_strategy(molecule, strategy):
"""
Constructor for MoleculeGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
Molecules will be put into a large artificial box for calculation
of bonds using a NearNeighbor strategy, since some strategies
assume periodic boundary conditions.
:param molecule: Molecule object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:return:
"""
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds",
edge_weight_name="weight",
edge_weight_units="")
# NearNeighbor classes only (generally) work with structures
# molecules have to be boxed first
coords = molecule.cart_coords
a = max(coords[:, 0]) - min(coords[:, 0]) + 100
b = max(coords[:, 1]) - min(coords[:, 1]) + 100
c = max(coords[:, 2]) - min(coords[:, 2]) + 100
molecule = molecule.get_boxed_structure(a, b, c, no_cross=True)
for n in range(len(molecule)):
neighbors = strategy.get_nn_info(molecule, n)
for neighbor in neighbors:
# all bonds in molecules should not cross
# (artificial) periodic boundaries
if not np.array_equal(neighbor['image'], [0, 0, 0]):
continue
# local_env will always try to add two edges
# for any one bond, one from site u to site v
# and another form site v to site u: this is
# harmless, so warn_duplicates=False
mg.add_edge(from_index=n,
to_index=neighbor['site_index'],
weight=neighbor['weight'],
warn_duplicates=False)
return mg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph['name']
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph['edge_weight_name']
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph['edge_weight_units']
def add_edge(self, from_index, to_index,
weight=None, warn_duplicates=True,
edge_properties=None):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
# sanitize types
from_index, to_index = int(from_index), int(to_index)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between two sites
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data and warn_duplicates:
warnings.warn("Trying to add an edge that already exists from "
"site {} to site {}.".format(from_index,
to_index))
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index,
weight=weight,
**edge_properties)
else:
self.graph.add_edge(from_index, to_index,
**edge_properties)
def alter_edge(self, from_index, to_index,
new_weight=None, new_edge_properties=None):
"""
Alters either the weight or the edge_properties of
an edge in the MoleculeGraph.
:param from_index: int
:param to_index: int
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edge = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edge:
raise ValueError("Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
))
# Third index should always be 0 because there should only be one edge between any two nodes
if new_weight is not None:
self.graph[from_index][to_index][0]['weight'] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][0][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, allow_reverse=False):
"""
Remove an edge from the MoleculeGraph
:param from_index: int
:param to_index: int
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edge = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if existing_edge:
self.graph.remove_edge(from_index, to_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index,
from_index)
if existing_reverse:
self.graph.remove_edge(to_index, from_index)
else:
raise ValueError("Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
))
def split_molecule_subgraphs(self, bonds, allow_reverse=False, alterations=None):
"""
Split MoleculeGraph into two MoleculeGraphs by
breaking a set of bonds. This function uses
MoleculeGraph.break_edge repeatedly to create
disjoint graphs (two or more separate molecules).
This function does not only alter the graph
information, but also changes the underlying
Moledules.
If the bonds parameter does not include sufficient
bonds to separate two bonds, then this function will
fail.
Currently, this function naively assigns the charge
of the total molecule to a single submolecule. A
later effort will be to actually accurately assign
charge.
NOTE: This function does not modify the original
MoleculeGraph. It creates a copy, modifies that, and
returns two or more new MoleculeGraph objects.
:param bonds: list of tuples (from_index, to_index)
representing bonds to be broken to split the MoleculeGraph.
:param alterations: a dict {(from_index, to_index): alt},
where alt is a dictionary including weight and/or edge
properties to be changed following the split.
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return: list of MoleculeGraphs
"""
original = copy.deepcopy(self)
for bond in bonds:
original.break_edge(bond[0], bond[1], allow_reverse=allow_reverse)
if nx.is_weakly_connected(original.graph):
raise RuntimeError("Cannot split molecule; \
MoleculeGraph is still connected.")
else:
# alter any bonds before partition, to avoid remapping
if alterations is not None:
for (u, v) in alterations.keys():
if "weight" in alterations[(u, v)]:
weight = alterations[(u, v)]["weight"]
del alterations[(u, v)]["weight"]
edge_properties = alterations[(u, v)] \
if len(alterations[(u, v)]) != 0 else None
original.alter_edge(u, v, new_weight=weight,
new_edge_properties=edge_properties)
else:
original.alter_edge(u, v,
new_edge_properties=alterations[(u, v)])
sub_mols = []
# Had to use nx.weakly_connected_components because of deprecation
# of nx.weakly_connected_component_subgraphs
components = nx. weakly_connected_components(original.graph)
subgraphs = [original.graph.subgraph(c) for c in components]
for subg in subgraphs:
# start by extracting molecule information
pre_mol = original.molecule
nodes = subg.nodes
# create mapping to translate edges from old graph to new
# every list (species, coords, etc.) automatically uses this
# mapping, because they all form lists sorted by rising index
mapping = {}
for i in range(len(nodes)):
mapping[list(nodes)[i]] = i
# there must be a more elegant way to do this
sites = [pre_mol._sites[n] for n in
range(len(pre_mol._sites)) if n in nodes]
# just give charge to whatever subgraph has node with index 0
# TODO: actually figure out how to distribute charge
if 0 in nodes:
charge = pre_mol.charge
else:
charge = 0
new_mol = Molecule.from_sites(sites, charge=charge)
# relabel nodes in graph to match mapping
new_graph = nx.relabel_nodes(subg, mapping)
graph_data = json_graph.adjacency_data(new_graph)
# create new MoleculeGraph
sub_mols.append(MoleculeGraph(new_mol, graph_data=graph_data))
return sub_mols
def substitute_group(self, index, func_grp, bond_order=1, graph_dict=None, strategy=None, strategy_params=None):
"""
Builds off of Molecule.substitute to replace an atom in self.molecule
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: using a MoleculeGraph will generally produce a different graph
compared with using a Molecule or str (when not using graph_dict).
This is because of the reordering that occurs when using some of the
local_env strategies.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecie X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param grp_graph: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy: Class from pymatgen.analysis.local_env. If None,
MinimumDistanceNN will be used.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
def map_indices(func_grp):
mapping = {}
# Get indices now occupied by functional group
# Subtracting 1 because the dummy atom X should not count
atoms = len(func_grp) - 1
offset = len(self.molecule) - atoms
for i in range(atoms):
mapping[i] = i + offset
return mapping
# Work is simplified if a graph is already in place
if isinstance(func_grp, MoleculeGraph):
self.molecule.substitute(index, func_grp.molecule,
bond_order=bond_order)
mapping = map_indices(func_grp.molecule)
for (u, v) in list(func_grp.graph.edges()):
edge_props = func_grp.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(mapping[u], mapping[v],
weight=weight, edge_properties=edge_props)
else:
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except:
raise RuntimeError("Can't find functional group in list. "
"Provide explicit coordinate instead")
self.molecule.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(mapping[u], mapping[v],
weight=weight, edge_properties=edge_props)
else:
if strategy_params is None:
strategy_params = {}
strat = MinimumDistanceNN(**strategy_params) \
if strategy is None else strategy(**strategy_params)
graph = self.with_local_env_strategy(func_grp, strat)
for (u, v) in list(graph.graph.edges()):
edge_props = graph.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(mapping[u], mapping[v],
weight=weight, edge_properties=edge_props)
def find_rings(self, including=None):
"""
Find ring structures in the MoleculeGraph.
NOTE: Currently, this function behaves as
expected for single rings, but fails (miserably)
on molecules with more than one ring.
:param including: list of site indices. If
including is not None, then find_rings will
only return those rings including the specified
sites. By default, this parameter is None, and
all rings will be returned.
:return: dict {index:cycle}. Each
entry will be a ring (cycle, in graph
theory terms) including the index found in the Molecule.
If there is no cycle including an index, the value will
be an empty list.
"""
# Copies self.graph such that all edges (u, v) matched by edges (v, u)
undirected = self.graph.to_undirected()
directed = undirected.to_directed()
cycles_nodes = []
cycles_edges = []
# Remove all two-edge cycles
all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2]
# Using to_directed() will mean that each cycle always appears twice
# So, we must also remove duplicates
unique_sorted = []
unique_cycles = []
for cycle in all_cycles:
if sorted(cycle) not in unique_sorted:
unique_sorted.append(sorted(cycle))
unique_cycles.append(cycle)
if including is None:
cycles_nodes = unique_cycles
else:
for i in including:
for cycle in unique_cycles:
if i in cycle and cycle not in cycles_nodes:
cycles_nodes.append(cycle)
for cycle in cycles_nodes:
edges = []
for i, e in enumerate(cycle):
edges.append((cycle[i-1], e))
cycles_edges.append(edges)
return cycles_edges
def get_connected_sites(self, n):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Structure
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
out_edges = [(u, v, d) for u, v, d in self.graph.out_edges(n, data=True)]
in_edges = [(u, v, d) for u, v, d in self.graph.in_edges(n, data=True)]
for u, v, d in out_edges + in_edges:
site = self.molecule[v]
dist = self.molecule[u].distance(self.molecule[v])
weight = d.get('weight', None)
connected_site = ConnectedSite(site=site,
jimage=(0, 0, 0),
index=v,
weight=weight,
dist=dist)
connected_sites.add(connected_site)
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(self, filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp"):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires "
"GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {'nodesep': 10.0, 'dpi': 300, 'overlap': "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.molecule[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.molecule[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = '#000000' if 1 - (c[0] * 0.299 + c[1] * 0.587
+ c[2] * 0.114) / 255 < 0.5 else '#ffffff'
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(n, fillcolor=color, fontcolor=fontcolor, label=label,
fontname="Helvetica-bold", style="filled", shape="circle")
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
if "to_image" in d:
to_image = d['to_jimage']
else:
to_image = (0, 0, 0)
# set edge style
d['style'] = "solid"
if to_image != (0, 0, 0):
d['style'] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d['arrowhead'] = "none"
# only add labels for images that are not the origin
if image_labels:
d['headlabel'] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d['arrowhead'] = "normal" if d['headlabel'] else "none"
# optionally color edges using node colors
color_u = g.node[u]['fillcolor']
color_v = g.node[v]['fillcolor']
d['color_uv'] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get('edge_weight_units', "")
if d.get('weight'):
d['label'] = "{:.2f} {}".format(d['weight'], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d['to_jimage']) in diff['self']:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d['to_jimage']) in diff['other']:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({'color_uv': '#00ff00'})
for u, v, k in red_edges:
g.edges[u, v, k].update({'color_uv': '#ff0000'})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename+".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename+".dot"]
rs = subprocess.Popen(args,
stdout=f,
stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename+".dot")
def as_dict(self):
"""
As in :Class: `pymatgen.core.Molecule` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"graphs": json_graph.adjacency_data(self.graph)}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Molecule` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
m = Molecule.from_dict(d['molecule'])
return cls(m, d['graphs'])
def _edges_to_string(self, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-"*max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0, 1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.3e}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))),
data.get("weight", 0))
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v,
str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Molecule / number of nodes in graph
"""
return len(self.molecule)
def sort(self, key=None, reverse=False):
"""
Same as Molecule.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_molecule = self.molecule.copy()
# sort Molecule
self.molecule._sites = sorted(self.molecule._sites, key=key, reverse=reverse)
# apply Molecule ordering to graph
mapping = {idx: self.molecule.index(site) for idx, site in enumerate(old_molecule)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d['to_jimage'] = (0, 0, 0)
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return MoleculeGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two MoleculeGraphs are equal if they have equal Molecules,
and have the same edges between Sites. Edge weights can be
different and MoleculeGraphs can still be considered equal.
:param other: MoleculeGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.coords):self.molecule.index(site) for site in other.molecule}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.coords)])
edges = {(u, v)
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and \
(self.molecule == other_sorted.molecule)
def diff(self, other, strict=True):
"""
Compares two MoleculeGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one MoleculeGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two MoleculeGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the MoleculeGraph this method is called
from, not the 'other' MoleculeGraph: there
is no guarantee the node indices will be the
same if the underlying Molecules are ordered
differently.
:param other: MoleculeGraph
:param strict: if False, will compare bonds
from different Molecules, with node indices
replaced by Specie strings, will not count
number of occurrences of bonds
:return:
"""
if self.molecule != other.molecule and strict:
return ValueError("Meaningless to compare MoleculeGraphs if "
"corresponding Molecules are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords):self.molecule.index(site) for site in other.molecule}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d.get('to_jimage', (0, 0, 0)))
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d.get('to_jimage', (0, 0, 0)))
for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
else:
edges = {(str(self.molecule[u].specie),
str(self.molecule[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(str(other.structure[u].specie),
str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
'self': edges - edges_other,
'other': edges_other - edges,
'both': edges.intersection(edges_other),
'dist': jaccard_dist
}
| nisse3000/pymatgen | pymatgen/analysis/graphs.py | Python | mit | 80,226 | [
"CRYSTAL",
"Jmol",
"pymatgen"
] | aa6440ab1cc66e9de923df8cd42145d78440e4f651d5240b8fd0feb00e363083 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio-Query-Parser.
# Copyright (C) 2014, 2015, 2016 CERN.
#
# Invenio-Query-Parser is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio-Query-Parser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Implement query printer."""
from invenio_query_parser import ast
from invenio_query_parser.contrib.spires.config import SPIRES_KEYWORDS
from invenio_query_parser.visitor import make_visitor
from ..ast import SpiresOp
class SpiresToInvenio(object):
visitor = make_visitor()
# pylint: disable=W0613,E0102
@visitor(ast.AndOp)
def visit(self, node, left, right):
return type(node)(left, right)
@visitor(ast.OrOp)
def visit(self, node, left, right):
return type(node)(left, right)
@visitor(ast.KeywordOp)
def visit(self, node, left, right):
return type(node)(left, right)
@visitor(ast.RangeOp)
def visit(self, node, left, right):
return type(node)(left, right)
@visitor(ast.NotOp)
def visit(self, node, op):
return type(node)(op)
@visitor(ast.GreaterOp)
def visit(self, node, op):
return type(node)(op)
@visitor(ast.LowerOp)
def visit(self, node, op):
return type(node)(op)
@visitor(ast.GreaterEqualOp)
def visit(self, node, op):
return type(node)(op)
@visitor(ast.LowerEqualOp)
def visit(self, node, op):
return type(node)(op)
@visitor(ast.Keyword)
def visit(self, node):
return type(node)(node.value)
@visitor(ast.Value)
def visit(self, node):
return type(node)(node.value)
@visitor(ast.ValueQuery)
def visit(self, node, op):
return type(node)(op)
@visitor(ast.SingleQuotedValue)
def visit(self, node):
return type(node)(node.value)
@visitor(ast.DoubleQuotedValue)
def visit(self, node):
return type(node)(node.value)
@visitor(ast.RegexValue)
def visit(self, node):
return type(node)(node.value)
@visitor(ast.EmptyQuery)
def visit(self, node):
return type(node)(node.value)
@visitor(SpiresOp)
def visit(self, node, left, right):
left.value = SPIRES_KEYWORDS[left.value]
if left.value is 'author':
return ast.KeywordOp(left, ast.DoubleQuotedValue(right.value))
return ast.KeywordOp(left, right)
# pylint: enable=W0612,E0102
| Panos512/invenio-query-parser | invenio_query_parser/contrib/spires/walkers/spires_to_invenio.py | Python | gpl-2.0 | 3,179 | [
"VisIt"
] | d43c5be14676deafeac9c35ab01d676a4c9498e91fd1affd213942f8bb12e833 |
#!/usr/bin/env python
#------------------------------------------------------------
# Script compares efficiency of automatic derivatives vs
# analytical in mpfit.py
# Vog, 31 okt 2011
#------------------------------------------------------------
import numpy
from matplotlib.pyplot import figure, show, rc
from kapteyn import kmpfit
from matplotlib.patches import Polygon
def confpred_band(x, dfdp, prob, fitobj, f, prediction, abswei=False, err=None):
#----------------------------------------------------------
# Return values for a confidence or a prediction band.
# See documentation for methods confidence_band and
# prediction_band
#----------------------------------------------------------
from scipy.stats import t
# Given the confidence or prediction probability prob = 1-alpha
# we derive alpha = 1 - prob
alpha = 1 - prob
prb = 1.0 - alpha/2
tval = t.ppf(prb, fitobj.dof)
C = fitobj.covar
n = len(fitobj.params) # Number of parameters from covariance matrix
p = fitobj.params
N = len(x)
if abswei:
covscale = 1.0 # Do not apply correction with red. chi^2
else:
covscale = fitobj.rchi2_min
df2 = numpy.zeros(N)
for j in range(n):
for k in range(n):
df2 += dfdp[j]*dfdp[k]*C[j,k]
if prediction:
df = numpy.sqrt(err*err+covscale*df2)
else:
df = numpy.sqrt(covscale*df2)
y = f(p, x)
delta = tval * df
upperband = y + delta
lowerband = y - delta
return y, upperband, lowerband
def confidence_band(x, dfdp, confprob, fitobj, f, err=None, abswei=False):
#----------------------------------------------------------
# Given a value for x, calculate the error df in y = model(p,x)
# This function returns for each x in a NumPy array, the
# upper and lower value of the confidence interval.
# The arrays with limits are returned and can be used to
# plot confidence bands.
#
#
# Input:
#
# x NumPy array with values for which you want
# the confidence interval.
#
# dfdp A list with derivatives. There are as many entries in
# this list as there are parameters in your model.
#
# confprob Confidence probability in percent (e.g. 90% or 95%).
# From this number we derive the confidence level
# (e.g. 0.05). The Confidence Band
# is a 100*(1-alpha)% band. This implies
# that for a given value of x the probability that
# the 'true' value of f(p,x) falls within these limits is
# 100*(1-alpha)%.
#
# fitobj The Fitter object from a fit with kmpfit
#
# f A function that returns a value y = f(p,x)
# p are the best-fit parameters and x is a NumPy array
# with values of x for which you want the confidence interval.
#
# abswei Are the weights absolute? For absolute weights we take
# unscaled covariance matrix elements in our calculations.
# For unit weighting (i.e. unweighted) and relative
# weighting, we scale the covariance matrix elements with
# the value of the reduced chi squared.
#
# Returns:
#
# y The model values at x: y = f(p,x)
# upperband The upper confidence limits
# lowerband The lower confidence limits
#
# Note:
#
# If parameters were fixed in the fit, the corresponding
# error is 0 and there is no contribution to the confidence
# interval.
#----------------------------------------------------------
return confpred_band(x, dfdp, confprob, fitobj, f, prediction=False, err=err, abswei=abswei)
def prediction_band(x, dfdp, predprob, fitobj, f, err=None, abswei=False):
#----------------------------------------------------------
# Given a value for x, calculate the error df in y = model(p,x)
# This function returns for each x in a NumPy array, the
# upper and lower value of the prediction interval.
# The arrays with limits are returned and can be used to
# plot confidence bands.
#
#
# Input:
#
# x NumPy array with values for which you want
# the prediction interval.
#
# dfdp A list with derivatives. There are as many entries in
# this list as there are parameters in your model.
#
# predprob Prediction probability in percent (e.g. 0.9 or 0.95).
# From this number we derive the prediction level
# (e.g. 0.05). The Prediction Band
# is a 100*(1-alpha)% band. This implies
# that values of one or more future observations from
# the same population from which a given data set was sampled,
# will fall in this band with a probability of 100*(1-alpha)%
#
# fitobj The Fitter object from a fit with kmpfit
#
# f A function that returns a value y = f(p,x)
# p are the best-fit parameters and x is a NumPy array
# with values of x for which you want the confidence interval.
#
# abswei Are the weights absolute? For absolute weights we take
# unscaled covariance matrix elements in our calculations.
# For unit weighting (i.e. unweighted) and relative
# weighting, we scale the covariance matrix elements with
# the value of the reduced chi squared.
#
# Returns:
#
# y The model values at x: y = f(p,x)
# upperband The upper prediction limits
# lowerband The lower prediction limits
#
# Note:
#
# If parameters were fixed in the fit, the corresponding
# error is 0 and there is no contribution to the prediction
# interval.
#----------------------------------------------------------
return confpred_band(x, dfdp, predprob, fitobj, f,
prediction=True, err=err, abswei=abswei)
def my_model(p, x):
#-----------------------------------------------------------------------
# This describes the model and its parameters for which we want to find
# the best fit. 'p' is a sequence of parameters (array/list/tuple).
#-----------------------------------------------------------------------
A, mu, sigma, zerolev = p
return( A * numpy.exp(-(x-mu)*(x-mu)/(2.0*sigma*sigma)) + zerolev )
def my_residuals(p, data):
#-----------------------------------------------------------------------
# This function is the function called by the fit routine in kmpfit
# It returns a weighted residual. De fit routine calculates the
# square of these values.
#-----------------------------------------------------------------------
x, y, err = data
return (y-my_model(p,x)) / err
def my_derivs(p, data, dflags):
#-----------------------------------------------------------------------
# This function is used by the fit routine to find the values for
# the explicit partial derivatives. Argument 'dflags' is a list
# with booleans. If an element is True then an explicit partial
# derivative is required.
#-----------------------------------------------------------------------
x, y, err = data # y is dummy here
A, mu, sigma, zerolev = p
pderiv = numpy.zeros([len(p), len(x)]) # You need to create the required array
sig2 = sigma * sigma
sig3 = sig2 * sigma
xmu = x-mu
xmu2 = xmu**2
expo = numpy.exp(-xmu2/(2.0*sig2))
fx = A * expo
for i, flag in enumerate(dflags):
if flag:
if i == 0:
pderiv[0] = expo
elif i == 1:
pderiv[1] = fx * xmu/(sig2)
elif i == 2:
pderiv[2] = fx * xmu2/(sig3)
elif i == 3:
pderiv[3] = 1.0
return pderiv/-err
# Artificial data
N = 50
x = numpy.linspace(-5, 10, N)
truepars = [10.0, 5.0, 1.0, 0.0]
p0 = [9, 4.5, 0.8, 0]
rms_data = 0.8
rms_err = 0.1
y = my_model(truepars, x) + numpy.random.normal(0.0, rms_data, N)
err = numpy.random.normal(0.6, rms_err, N)
#err = err*0 + 1
# The fit
fitobj = kmpfit.Fitter(residuals=my_residuals, deriv=my_derivs, data=(x, y, err))
try:
fitobj.fit(params0=p0)
except Exception as mes:
print("Something wrong with fit: ", mes)
raise SystemExit
print("\n\n======== Results kmpfit with explicit partial derivatives =========")
print("Params: ", fitobj.params)
print("Errors from covariance matrix : ", fitobj.xerror)
print("Uncertainties assuming reduced Chi^2=1: ", fitobj.stderr)
print("Chi^2 min: ", fitobj.chi2_min)
print("Reduced Chi^2: ", fitobj.rchi2_min)
print("Iterations: ", fitobj.niter)
print("Function ev: ", fitobj.nfev)
print("Status: ", fitobj.status)
print("Status Message:", fitobj.message)
print("Covariance:\n", fitobj.covar)
# Re-use my_derivs() but rescale derivatives back again with -err
dervs = my_derivs(fitobj.params, (x,y,err), (True,True,True,True))*-err
dfdp = [dervs[0], dervs[1], dervs[2], dervs[3]]
confprob = 0.95
ydummy, upperband, lowerband = confidence_band(x, dfdp, confprob, fitobj, my_model)
verts_conf = list(zip(x, lowerband)) + list(zip(x[::-1], upperband[::-1]))
predprob = 0.90
ydummy, upperband, lowerband = prediction_band(x, dfdp, predprob, fitobj, my_model,
err=err, abswei=False)
verts_pred = list(zip(x, lowerband)) + list(zip(x[::-1], upperband[::-1]))
# Plot the result
rc('font', size=9)
rc('legend', fontsize=8)
fig = figure()
frame = fig.add_subplot(1,1,1)
X = numpy.linspace(x.min(), x.max(), 100)
frame.errorbar(x, y, yerr=err, fmt='go', alpha=0.7, label="Noisy data")
frame.plot(X, my_model(truepars,X), 'r', label="True data")
frame.plot(X, my_model(fitobj.params,X), 'b', lw=2, label="Fit with kmpfit")
poly = Polygon(verts_conf, closed=True, fc='g', ec='g', alpha=0.3,
label="CI (%g)"%confprob)
frame.add_patch(poly)
poly = Polygon(verts_pred, closed=True, fc='r', ec='r', alpha=0.3,
label="PI (%g)"%predprob)
frame.add_patch(poly)
frame.set_xlabel("X")
frame.set_ylabel("Measurement data")
frame.set_title("Confidence- and prediction bands for Gaussian model",
fontsize=10)
delta = (x.max()-x.min())/10.0
frame.set_xlim(x.min()-delta, x.max()+delta)
frame.grid(True)
# Check prediction intervals
"""
for i in range(500):
y = my_model(truepars, x) + numpy.random.normal(0.0, rms_data, N)
err = numpy.random.normal(0.0, rms_err, N)
#frame.plot(x,y,'o')
frame.errorbar(x, y, yerr=err, fmt='o')
"""
# A nice background for the entire plot
from matplotlib.cm import copper
frame.imshow([[0, 0],[1,1]], interpolation='bicubic', cmap=copper,
vmin=-0.5, vmax=0.5,
extent=(frame.get_xlim()[0], frame.get_xlim()[1],
frame.get_ylim()[0], frame.get_ylim()[1]),
alpha=1)
leg = frame.legend(loc=2)
show()
| kapteyn-astro/kapteyn | doc/source/EXAMPLES/kmpfit_example_partialdervs_confidence.py | Python | bsd-3-clause | 10,832 | [
"Gaussian"
] | fa85bad58fec9071023a19ac4abbb7302fa312c6c2204c2942bb83298a63240c |
"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
from dynd import nd, ndt, _lowlevel
from ..traversal import visit
class CKernelLifter(object):
"""
Lift ckernels to their appropriate rank so they always consume the
full array arguments.
If the environment defines 'stream-outer' as True, then the
outermost dimension is skipped, so that the operation can be
chunked along that dimension.
"""
def __init__(self, env):
self.env = env
def get_arg_type(self, arg):
dynd_types = self.env['dynd-types']
if arg in dynd_types:
return dynd_types[arg]
else:
return ndt.type(str(arg.type))
def op_ckernel(self, op):
op_ndim = len(op.type.shape)
result_ndim = self.env.get('result-ndim', 0)
ckernel, args = op.args
in_types = [self.get_arg_type(arg) for arg in args[1:]]
out_type = ndt.type(str(args[0].type))
if isinstance(ckernel, dict):
tag = ckernel['tag']
if tag == 'reduction':
ck = ckernel['ckernel']
assoc = ckernel['assoc']
comm = ckernel['comm']
ident = ckernel['ident']
ident = None if ident is None else nd.asarray(ident)
axis = ckernel['axis']
keepdims = ckernel['keepdims']
op.args[0] = _lowlevel.lift_reduction_ckernel_deferred(
ck, in_types[0],
axis=axis, keepdims=keepdims,
associative=assoc, commutative=comm,
reduction_identity=ident)
else:
raise RuntimeError('unnrecognized ckernel tag %s' % tag)
elif op.metadata['rank'] < op_ndim:
# Replace the leading dimension type with 'strided' in each operand
# if we're streaming it for processing BLZ
if self.env.get('stream-outer', False) and result_ndim == op_ndim:
# TODO: Add dynd tp.subarray(N) function like datashape has
for i, tp in enumerate(in_types):
if tp.ndim == result_ndim:
in_types[i] = ndt.make_strided_dim(tp.element_type)
out_type = ndt.make_strided_dim(out_type.element_type)
op.args[0] = _lowlevel.lift_ckernel_deferred(ckernel,
[out_type] + in_types)
def run(func, env):
visit(CKernelLifter(env), func)
| talumbau/blaze | blaze/compute/air/frontend/ckernel_lift.py | Python | bsd-3-clause | 2,666 | [
"VisIt"
] | 4ebbf89603261e9493040628f9a0c0118454265c88eeff93c9624175f8e6d998 |
"""Next gen sequence alignments with Bowtie2.
http://bowtie-bio.sourceforge.net/bowtie2/index.shtml
"""
import os
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio import bam, utils
from bcbio.pipeline import datadict as dd
from bcbio.rnaseq import gtf
from bcbio.ngsalign import alignprep, postalign
def _bowtie2_args_from_config(config, curcl):
"""Configurable high level options for bowtie2.
"""
qual_format = config["algorithm"].get("quality_format", "")
if qual_format.lower() == "illumina":
qual_flags = ["--phred64-quals"]
else:
qual_flags = []
num_cores = config["algorithm"].get("num_cores", 1)
core_flags = ["-p", str(num_cores)] if num_cores > 1 else []
user_opts = config_utils.get_resources("bowtie2", config).get("options", [])
for flag_opt in (o for o in user_opts if str(o).startswith("-")):
if flag_opt in curcl:
raise ValueError("Duplicate option %s in resources and bcbio commandline: %s %s" %
flag_opt, user_opts, curcl)
return core_flags + qual_flags + user_opts
def align(fastq_file, pair_file, ref_file, names, align_dir, data,
extra_args=None):
"""Alignment with bowtie2.
"""
config = data["config"]
analysis_config = ANALYSIS.get(data["analysis"].lower())
assert analysis_config, "Analysis %s is not supported by bowtie2" % (data["analysis"])
out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data)))
if data.get("align_split"):
final_file = out_file
out_file, data = alignprep.setup_combine(final_file, data)
fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data)
else:
final_file = None
if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)):
with postalign.tobam_cl(data, out_file, pair_file is not None) as (tobam_cl, tx_out_file):
cl = [config_utils.get_program("bowtie2", config)]
cl += extra_args if extra_args is not None else []
cl += ["-q",
"-x", ref_file]
cl += analysis_config.get("params", [])
if pair_file:
cl += ["-1", fastq_file, "-2", pair_file]
else:
cl += ["-U", fastq_file]
if names and "rg" in names:
cl += ["--rg-id", names["rg"]]
for key, tag in [("sample", "SM"), ("pl", "PL"), ("pu", "PU"), ("lb", "LB")]:
if names.get(key):
cl += ["--rg", "%s:%s" % (tag, names[key])]
cl += _bowtie2_args_from_config(config, cl)
cl = [str(i) for i in cl]
cmd = "unset JAVA_HOME && " + " ".join(cl) + " | " + tobam_cl
do.run(cmd, "Aligning %s and %s with Bowtie2." % (fastq_file, pair_file))
return out_file
# Optional galaxy location file. Falls back on remap_index_fn if not found
galaxy_location_file = "bowtie2_indices.loc"
def remap_index_fn(ref_file):
"""Map sequence references to equivalent bowtie2 indexes.
"""
return os.path.splitext(ref_file)[0].replace("/seq/", "/bowtie2/")
def filter_multimappers(align_file, data):
"""
It does not seem like bowtie2 has a corollary to the -m 1 flag in bowtie,
there are some options that are close but don't do the same thing. Bowtie2
sets the XS flag for reads mapping in more than one place, so we can just
filter on that. This will not work for other aligners.
"""
config = dd.get_config(data)
type_flag = "" if bam.is_bam(align_file) else "S"
base, ext = os.path.splitext(align_file)
out_file = base + ".unique" + ext
bed_file = dd.get_variant_regions(data) or dd.get_sample_callable(data)
bed_cmd = '-L {0}'.format(bed_file) if bed_file else " "
if utils.file_exists(out_file):
return out_file
base_filter = '-F "[XS] == null and not unmapped {paired_filter}" '
if bam.is_paired(align_file):
paired_filter = "and paired and proper_pair"
else:
paired_filter = ""
filter_string = base_filter.format(paired_filter=paired_filter)
sambamba = config_utils.get_program("sambamba", config)
num_cores = dd.get_num_cores(data)
with file_transaction(out_file) as tx_out_file:
cmd = ('{sambamba} view -h{type_flag} '
'--nthreads {num_cores} '
'-f bam {bed_cmd} '
'{filter_string} '
'{align_file} '
'> {tx_out_file}')
message = "Removing multimapped reads from %s." % align_file
do.run(cmd.format(**locals()), message)
bam.index(out_file, config)
return out_file
ANALYSIS = {"chip-seq": {"params": ["-X", 2000, "--very-sensitive"]},
"variant2": {"params": ["-X", 2000]},
"standard": {"params": ["-X", 2000]},
"rna-seq": {"params": ["--sensitive", "-X", 2000]},
"smallrna-seq": {"params": ["-N", 1, "-k", 1000, "--sensitive", "-X", 200]}}
def index_transcriptome(gtf_file, ref_file, data):
"""
use a GTF file and a reference FASTA file to index the transcriptome
"""
gtf_fasta = gtf.gtf_to_fasta(gtf_file, ref_file)
bowtie2_index = os.path.splitext(gtf_fasta)[0]
bowtie2_build = config_utils.get_program("bowtie2", data["config"]) + "-build"
cmd = "{bowtie2_build} --offrate 1 {gtf_fasta} {bowtie2_index}".format(**locals())
message = "Creating transcriptome index of %s with bowtie2." % (gtf_fasta)
do.run(cmd, message)
return bowtie2_index
def align_transcriptome(fastq_file, pair_file, ref_file, data):
"""
bowtie2 with settings for aligning to the transcriptome for eXpress/RSEM/etc
"""
work_bam = dd.get_work_bam(data)
base, ext = os.path.splitext(work_bam)
out_file = base + ".transcriptome" + ext
if utils.file_exists(out_file):
data = dd.set_transcriptome_bam(data, out_file)
return data
bowtie2 = config_utils.get_program("bowtie2", data["config"])
gtf_file = dd.get_gtf_file(data)
gtf_index = index_transcriptome(gtf_file, ref_file, data)
num_cores = data["config"]["algorithm"].get("num_cores", 1)
fastq_cmd = "-1 %s" % fastq_file if pair_file else "-U %s" % fastq_file
pair_cmd = "-2 %s " % pair_file if pair_file else ""
cmd = ("{bowtie2} -p {num_cores} -a -X 600 --rdg 6,5 --rfg 6,5 --score-min L,-.6,-.4 --no-discordant --no-mixed -x {gtf_index} {fastq_cmd} {pair_cmd} ")
with file_transaction(data, out_file) as tx_out_file:
message = "Aligning %s and %s to the transcriptome." % (fastq_file, pair_file)
cmd += "| " + postalign.sam_to_sortbam_cl(data, tx_out_file, name_sort=True)
do.run(cmd.format(**locals()), message)
data = dd.set_transcriptome_bam(data, out_file)
return data
| vladsaveliev/bcbio-nextgen | bcbio/ngsalign/bowtie2.py | Python | mit | 6,941 | [
"Bowtie",
"Galaxy"
] | a9f611abfe175c844e7fd59dc794c21de8f45c416b47bb3a3ab15a199ab577f1 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module defines some useful design patterns.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
class Enum(set):
"""
Creates an enum out of a set.
"""
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
| sonium0/pymatgen | pymatgen/core/design_patterns.py | Python | mit | 624 | [
"pymatgen"
] | b65caf48e34e6b730b9d5a2dce120dd57a76f2bba1c02c894eb0f2f7fc92d25a |
import os
import glob
import mdtraj as md
from fahmunge import fah
target_id_map = {"NSD1_HUMAN_D0":"91f51e8a-7b40-4adc-8ca9-38786a9fe654", "SETD2_HUMAN_D0":"aa5325a2-2fec-46ac-a14a-ce1d46328a4c", "NSD2_HUMAN_D0":"23166b8f-dd82-408f-b1e8-b5657c67141c"}
for target, target_id in target_id_map.iteritems():
storage_path = "/home/kyleb/dat/siegetank/"
output_directory = "/home/kyleb/dat/siegetank_analysis/%s/" % target
trj0 = md.load("/home/kyleb/src/choderalab/FAHNVT/%s/oldRUN0/system.pdb" % target)
streams = glob.glob(os.path.join(storage_path, target_id, "*"))
for stream in streams:
output_filename = os.path.split(stream)[-1][0:25]
output_filename = os.path.join(output_directory, output_filename + ".h5")
fah.concatenate_ocore(stream, trj0, output_filename)
| kyleabeauchamp/fah-projects | code/analysis/concat_siegetank_trajectories.py | Python | gpl-2.0 | 811 | [
"MDTraj"
] | ccfa56ccca826c0b5f10f63aa11c0b87f59302a2778d0985395a77f86513778c |
#!/usr/bin/env python
'A model of the earth and its magnetic field, with earth center as origin.'
from __future__ import division # avoid integer division
import numpy as np
import mayavi
from mayavi.mlab import *
def makeRcoordinates(x,y,z):
'''Makes r length and r^hat from coordinates'''
r = np.sqrt(x**2 + y**2 + z**2)
x_hat = x / r
y_hat = y / r
z_hat = z / r
return r, x_hat, y_hat, z_hat
def calcMdotRhat(m, x_hat, y_hat, z_hat):
'''Calculates m dot r^hat'''
mr = m[0]*x_hat + m[1]*y_hat + m[2]*z_hat
return mr
def calcBfield(r, x_hat, y_hat, z_hat, m, mr):
'''Calculate B-field from r^-3*(3*(m dot r^hat)*r^hat - m)'''
bx = r**-3*(3*mr*x_hat-m[0])
by = r**-3*(3*mr*y_hat-m[1])
bz = r**-3*(3*mr*z_hat-m[2])
return np.array([ bx, by, bz ])
def particleTrajectory(x0,y0,z0,v0,k,maxIterations):
'''Calculate particle trajectory, with starting posistion x0,y0,z0, starting velocity v0, numerical factor k (k*(vxB)), and maximum number of iterations.'''
# constants
m = np.array([-2*np.sin(13./180*np.pi),0,2*np.cos(13./180*np.pi)]) # rot tilt ~23deg, mag tilt ~10deg from rot -> ~13deg from z-axis
limit = 1+np.max(np.abs([x0,y0,z0])) # max x,y or z coordinate
dt = 50/720/np.linalg.norm(v0) # from -25 to 25, at 720 pixels, expect resolution ~50/720=v0*dt
# arrays for particle trajectory
v = np.zeros((maxIterations, 3))
x = np.zeros(maxIterations)
y = np.zeros(maxIterations)
z = np.zeros(maxIterations)
# Initial conditions
v[0] = v0 # initial speed
x[0] = x0 # initial position
y[0] = y0
z[0] = z0
# F = q (v x B), F = ma, v = v0 + a*dt
# a = q/m (v x B) = k(vxB)
# -> v = v0 + k(vxB)dt
# r = r + v*dt = r + v0*dt + k(vxB)*dt^2
#print 'Limit:' + `limit` #debug
for i in range(maxIterations-1):
r, x_hat, y_hat, z_hat = makeRcoordinates(x[i],y[i],z[i])
mr = calcMdotRhat(m, x_hat, y_hat, z_hat)
B = calcBfield(r, x_hat, y_hat, z_hat, m, mr)
vxB = k * np.cross(v[i],B)
v[i+1] = v[i] + vxB*dt
x[i+1] = x[i] + v[i,0]*dt
y[i+1] = y[i] + v[i,1]*dt
z[i+1] = z[i] + v[i,2]*dt
x_max = np.abs(x[i+1])
y_max = np.abs(y[i+1])
z_max = np.abs(z[i+1])
if (x_max > limit or y_max > limit or z_max > limit):
# dont continue when particle "get lost"
#print 'Breaking with position: ' + `x_max`, `y_max`, `z_max` #debug
x=x[0:i+1]
y=y[0:i+1]
z=z[0:i+1]
v=v[0:i+1]
break
#print 'Iteration numer: ' + `i`
# plot
surface = plot3d(x,y,z)
#visibility
surface.actor.mapper.scalar_range = np.array([ 0., 1.])
surface.actor.mapper.scalar_visibility = True
surface.actor.property.specular_color = (1.0, 0.7250934615091172, 0.0)
surface.actor.property.diffuse_color = (1.0, 0.7250934615091172, 0.0)
surface.actor.property.ambient_color = (1.0, 0.7250934615091172, 0.0)
surface.actor.property.color = (1.0, 0.7250934615091172, 0.0)
surface.actor.property.point_size = 2.0
surface.actor.property.representation = 'points'
# debug
#print 'First: ' + `x[0]`, `y[0]`, `z[0]`
#print 'Last: ' + `x[-1]`, `y[-1]`, `z[-1]`
v_last = np.linalg.norm(v[-1])
v_first = np.linalg.norm(v[0])
v_diff = v_first - v_last
v_diff_percent = v_diff / v_first * 100
print 'Percentage difference in start/end velocity: ' + `v_diff_percent` + '%'
return surface, i # also return i, such that we can control animation calculations better
def trajectoryAnimation():
x0 = 25
start = -25 # y0,z0
v0 = 400/6371*10 # v=400km/s, rEarth=6371km, rEarth is 10 in mgrid.py
k = 2e2
it = 6 # number of points in grid
maxIterations = 10000 # max iterations
step = 10
# create pictures to animate particle trajectory
imageName = 0
for i in range(-1,2): #several starting points
y0 = 5*i
for j in range(2):
z0 = -25 + 5*j
for stop in range(100,maxIterations,step):
r = (x0**2 + y0**2 + z0**2)**0.5
r_hat = np.array([ x0/r, y0/r, z0/r ])
v = -r_hat*v0 # direction straight at earth
surface, num = particleTrajectory(x0,y0,z0,v,k,stop)
if (num != stop-2):
#print 'num, stop: ' + `num`, `stop` #debug
break # we do not need to generate more pictures, particle out of bound
savefig(`imageName` + '.png',size=(720,720))
imageName+=1
surface.remove()
fig.scene.camera.elevation(1)
fig.scene.camera.orthogonalize_view_up() # http://public.kitware.com/pipermail/vtkusers/2003-July/018794.html
def xTowards():
'''Plot all trajectories in yz-plane, where x=25'''
x0 = 25
start = -25 # y0,z0
v0 = 400/6371*10 # v=400km/s, rEarth=6371km, rEarth is 10 in mgrid.py
k = 2e2
it = 4 # number of points in grid
maxIterations = 10000 # max iterations
step = 10
# plot all trajectories
for i in range(it): #several starting points
for j in range(it):
y0 = start + 50*i/it
z0 = start + 50*j/it
r = (x0**2 + y0**2 + z0**2)**0.5
r_hat = np.array([ x0/r, y0/r, z0/r ])
v = -r_hat*v0 # direction straight at earth
surface, num = particleTrajectory(x0,y0,z0,v,k,maxIterations)
def zTowards():
'''Plot all trajectories in yz-plane, where x=25'''
x0 = 25
start = -25 # y0,z0
v0 = 400/6371*10 # v=400km/s, rEarth=6371km, rEarth is 10 in mgrid.py
k = 2e2
it = 4 # number of points in grid
maxIterations = 10000 # max iterations
step = 10
# plot all trajectories
for i in range(it): #several starting points
for j in range(it):
y0 = start + 50*i/it
z0 = start + 50*j/it
r = (x0**2 + y0**2 + z0**2)**0.5
r_hat = np.array([ x0/r, y0/r, z0/r ])
v = -r_hat*v0 # direction straight at earth
surface, num = particleTrajectory(z0,y0,x0,v,k,maxIterations)
def xStraigth():
'''Plot all trajectories in yz-plane, where x=25'''
x0 = 25
start = -25 # y0,z0
v0 = 400/6371*10 # v=400km/s, rEarth=6371km, rEarth is 10 in mgrid.py
k = 2e2
it = 4 # number of points in grid
maxIterations = 10000 # max iterations
step = 10
# plot all trajectories
for i in range(it): #several starting points
for j in range(it):
y0 = start + 50*i/it
z0 = start + 50*j/it
v = np.array([-v0,0,0])
surface, num = particleTrajectory(x0,y0,z0,v,k,maxIterations)
def zStraight():
'''Plot all trajectories in yz-plane, where x=25'''
x0 = 25
start = -25 # y0,z0
v0 = 400/6371*10 # v=400km/s, rEarth=6371km, rEarth is 10 in mgrid.py
k = 2e2
it = 4 # number of points in grid
maxIterations = 10000 # max iterations
step = 10
# plot all trajectories
for i in range(it): #several starting points
for j in range(it):
y0 = start + 50*i/it
z0 = start + 50*j/it
v = np.array([0,0,-v0])
surface, num = particleTrajectory(z0,y0,x0,v,k,maxIterations)
# create earth
earthRadius = 10
theta, phi = np.mgrid[0:np.pi:11j, 0:np.pi*2:21j]
ex = earthRadius * np.sin(theta) * np.cos(phi)
ey = earthRadius * np.sin(theta) * np.sin(phi)
ez = earthRadius * np.cos(theta)
fig = figure(size=(720,720))
fig.scene.background = (1,1,1) # white background
fig.scene.y_plus_view() # see from Y-axis
fig.scene.camera.elevation(45)
fig.scene.camera.azimuth(10)
fig.scene.show_axes = True
earthSurface = mesh(ex, ey, ez, color=(0, 0, 0))
| arve0/TFY4240-Semester-project | src/solar_wind.py | Python | mit | 7,906 | [
"Mayavi"
] | b3330096f54d6cefa5459690b996aff6844c438aca8650ae66fd5fbb88c0473b |
#!/usr/bin/env python
# Contact: Jacob Schreiber
# jacobtribe@soe.ucsc.com
# parsers.py
#
# This program will read in an abf file using read_abf.py and
# pull out the events, saving them as text files.
from __future__ import division, print_function
import sys
from itertools import tee,izip,chain
import re
import PyPore
import time
import numpy as np
try:
from PyQt4 import QtGui as Qt
from PyQt4 import QtCore as Qc
except:
pass
from core import *
import pyximport
pyximport.install( setup_args={'include_dirs':np.get_include()})
from PyPore.cparsers import FastStatSplit
import json
#########################################
# EVENT PARSERS
#########################################
class parser( object ):
def __init__( self ):
pass
def __repr__( self ):
''' Returns a representation of the parser in the form of all arguments. '''
return self.to_json()
def to_dict( self ):
d = { key: val for key, val in self.__dict__.items() if key != 'param_dict'
if type(val) in (int, float)
or ('Qt' not in repr(val) )
and 'lambda' not in repr(val) }
d['name'] = self.__class__.__name__
return d
def to_json( self, filename=False ):
_json = json.dumps( self.to_dict(), indent=4, separators=( ',', ' : ' ) )
if filename:
with open( filename, 'r' ) as out:
out.write( _json )
return _json
def parse( self, current ):
''' Takes in a current segment, and returns a list of segment objects. '''
return [ Segment( current=current, start=0, duration=current.shape[0]/100000 ) ]
def GUI( self ):
'''
A generic GUI built using PyQt4 based off the arguments presented in upon
initialization. Requires PyQt4 to use, but the parser class does not require
PyQt4 to run.
'''
grid = Qt.QGridLayout()
param_dict = {}
for i, (key, val) in enumerate( self.__dict__.items() ):
param_dict[key] = Qt.QLineEdit()
param_dict[key].setText( str(val) )
grid.addWidget( Qt.QLabel(key), i, 0 )
grid.addWidget( param_dict[key], i, 1 )
self.param_dict = param_dict
return grid
def set_params( self ):
'''
Updates each paramater presented in the GUI to the value input in the lineEdit
corresponding to that value.
'''
try:
for key, lineEdit in self.param_dict.items():
val = lineEdit.text()
if '.' in val:
setattr( self, key, float( val ) )
continue
for i, letter in enumerate(val):
if not letter.isdigit():
setattr( self, key, str( val ) )
continue
if i == len(val):
setattr( self, key, int( val ) )
except:
pass
@classmethod
def from_json( self, _json ):
if _json.endswith(".json"):
with open( _json, 'r' ) as infile:
_json = ''.join(line for line in infile)
d = json.loads( _json )
name = d['name']
del d['name']
return getattr( PyPore.parsers, name )( **d )
class MemoryParse( object):
'''
A parser based on being fed previous split points, and splitting a raw file based
those splits. Used predominately when loading previous split points from the
database cache, to reconstruct a parsed file from "memory.""
'''
def __init__( self, starts, ends ):
self.starts = starts
self.ends = ends
def parse( self, current ):
return [ Segment( current=np.array(current[int(s):int(e)], copy=True),
start=s,
duration=(e-s) ) for s, e in zip(self.starts, self.ends)]
class lambda_event_parser( parser ):
'''
A simple rule-based parser which defines events as a sequential series of points which are below a
certain threshold, then filtered based on other critereon such as total time or minimum current.
Rules can be passed in at initiation, or set later, but must be a lambda function takes in a PreEvent
object and performs some boolean operation.
'''
def __init__( self, threshold=90, rules=None ):
self.threshold = threshold
self.rules = rules or [ lambda event: event.duration > 100000,
lambda event: event.min > -0.5,
lambda event: event.max < self.threshold ]
def _lambda_select( self, events ):
'''
From all of the events, filter based on whatever set of rules has been initiated with.
'''
return [ event for event in events if np.all( [ rule( event ) for rule in self.rules ] ) ]
def parse( self, current ):
'''
Perform a large capture of events by creating a boolean mask for when the current is below a threshold,
then detecting the edges in those masks, and using the edges to partitition the sample. The events are
then filtered before being returned.
'''
mask = np.where( current < self.threshold, 1, 0 ) # Find where the current is below a threshold, replace with 1's
mask = np.abs( np.diff( mask ) ) # Find the edges, marking them with a 1, by derivative
tics = np.concatenate( ( [0], np.where(mask ==1)[0]+1, [current.shape[0]] ) )
del mask
events = [ Segment(current=np.array(current), copy=True,
start=tics[i],
duration=current.shape[0] ) for i, current in enumerate( np.split( current, tics[1:-1]) ) ]
return [ event for event in self._lambda_select( events ) ]
def GUI( self ):
'''
Override the default GUI for use in the Abada GUI, allowing for customization of the rules and threshol via
the GUI.
'''
threshDefault, timeDefault = "90", "1"
maxCurrentDefault, minCurrentDefault = threshDefault, "-0.5"
grid = Qt.QGridLayout()
threshLabel = Qt.QLabel( "Maximum Current" )
threshLabel.setToolTip( "Raw ionic current threshold, which, if dropped below, indicates an event." )
grid.addWidget( threshLabel, 0, 0 )
self.threshInput = Qt.QLineEdit()
self.threshInput.setText( threshDefault )
grid.addWidget( self.threshInput, 0, 2, 1, 1 )
minCurrentLabel = Qt.QLabel( "Minimum Current (pA):" )
minCurrentLabel.setToolTip( "This sets a filter requiring all ionic current in an event be above this amount." )
grid.addWidget( minCurrentLabel, 1, 0 )
self.minCurrentInput = Qt.QLineEdit()
self.minCurrentInput.setText( minCurrentDefault )
grid.addWidget( self.minCurrentInput, 1, 2, 1, 1 )
timeLabel = Qt.QLabel( "Time:" )
timeLabel.setToolTip( "This sets a filter requiring all events are of a certain length." )
grid.addWidget( timeLabel, 3, 0 )
self.timeDirectionInput = Qt.QComboBox()
self.timeDirectionInput.addItem( ">" )
self.timeDirectionInput.addItem( "<" )
grid.addWidget( self.timeDirectionInput, 3, 1 )
self.timeInput = Qt.QLineEdit()
self.timeInput.setText( timeDefault )
grid.addWidget( self.timeInput, 3, 2, 1, 1 )
return grid
def set_params( self ):
'''
Read in the data from the GUI and use it to customize the rules or threshold of the parser.
'''
self.rules = []
self.threshold = float( self.threshInput.text() )
self.rules.append( lambda event: event.max < self.threshold )
if self.minCurrentInput.text() != '':
self.rules.append( lambda event: event.min > float( self.minCurrentInput.text() ) )
if self.timeInput.text() != '':
if str( self.timeDirectionInput.currentText() ) == '<':
self.rules.append( lambda event: event.duration < float( self.timeInput.text() ) )
elif str( self.timeDirectionInput.currentText() ) == '>':
self.rules.append( lambda event: event.duration > float( self.timeInput.text() ) )
if self.rules == []:
self.rules = None
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return izip(a, b)
class StatSplit( parser ):
"""
DEPRECATED: USE SPEEDYSTATSPLIT.
"""
def __init__(self, min_width=1000, max_width=1000000,
min_gain_per_sample=0.03,
window_width=10000,
use_log=True,
splitter="stepwise"):
"""
create a segmenter with specified minimum and maximum segment lengths.
(Default for max_width is 100*min_width)
min_gain_per_sample is the minimum reduction in variance for a split to be done;
it is multiplied by window_width to get min_gain.
If use_log, then minimize the log of varainces,
otherwise minimize the variance.
splitter is "stepwise", "slanted", or a splitter function.
"""
self.min_width = max( min_width, 1 ) # Avoid divide by 0
self.max_width = max_width or 100*min_width
self.min_gain_per_sample = min_gain_per_sample
self.window_width = window_width or 10*min_width
assert self.max_width >= self.min_width
assert self.window_width >= 2*self.min_width
self.use_log = use_log
self.splitter = splitter
def parse(self,current, start=0, end=-1):
"""
segments current[start:end], where current is a numpy array
returns list of segments:
[ (start, duration0, left_end, right_end, rms residual)
(a1, duration1, left_end, right_end, rms residual)
...
]
with min_width <= ai - a_{i-1} = duration_{i-1} <= max_width
With stepwise segmenting, left_end=right_end=mean of segment
and rms residual = standard deviation of segment.
"""
# normalize start and end to be normal subscripts
n = len(current)
if start < 0: start += n+1
if end < 0: end += n+1
if start > n: start = n
if end > n: end = n
if self.splitter=="slanted":
self.splitter = self._best_split_slanted
else:
self.splitter = self._best_split_stepwise
self.current = current
self.cum = np.cumsum( current )
self.cum2 = np.cumsum( np.multiply( current,current ) )
if self.splitter != self._best_split_stepwise:
# For covariance computation, need cumulative sum(current*time),
# where time is subscript of current array.
# Needs to be kept in double precision (or higher), since time steps of 1 can
# be small relative to total array length.
self.cum_ct = np.cumsum(np.multiply(current, np.linspace(0,end,num=end,endpoint=False)))
breakpoints = self._segment_cumulative(start, end)
# paired is pairs of breakpoints (start,a1), (a1,a2), (a2,a3), ..., (an,end)
paired = [p for p in pairwise(chain([start],breakpoints,[end])) ]
assert len(paired)==len(breakpoints)+1
if self.splitter == self._best_split_stepwise:
# if stepwise splitting is done, left and right endpoints are just the mean
# and rms residuals are just the standard deviation
means = [self._mean_c(pair[0],pair[1]) for pair in paired]
vars = [self._var_c(pair[0],pair[1]) for pair in paired]
segments = [ Segment( current=current[start:end],
start=start,
duration=(end-start) ) for start,end in paired ]
return segments
lrs = [self._lr(pair[0],pair[1]) for pair in paired]
lefts = [alpha+beta*s for (alpha,beta,var),(s,e) in izip(lrs,paired)]
rights = [alpha+beta*e for (alpha,beta,var),(s,e) in izip(lrs,paired)]
segments = [ Segment( current=current[start:end],
start=start,
duration=(end-start) ) for start,end in paired ]
return segments
def _mean_c(self, start, end):
"""mean value of current for segment start:end
(uses self.cum a numpy array that is the cumulative sum of
a current trace (that is, self.cum[i] = sum(self.current[0:i+1])
or self.cum=np.cumsum(self.current) ).
"""
if start==end: return 0
if start==0: return self.cum[end-1]/end
return (self.cum[end-1]-self.cum[start-1])/(end-start)
def _mean_c2(self, start, end):
"""mean value of current**2 for segment start:end
(uses self.cum2, a numpy array that is the cumulative sum of
the square of the current)
"""
if start==end: return 0
if start==0: return self.cum2[end-1]/end
return (self.cum2[end-1]-self.cum2[start-1])/(end-start)
def _var_c(self, start, end):
"""variance of current for segment start:end
(uses self.cum2, a numpy array that is the cumulative sum of
the square of the current)
"""
if start==end: return 0
if start==0: return self.cum2[end-1]/end - (self.cum[end-1]/end)**2
return (self.cum2[end-1]-self.cum2[start-1])/(end-start) \
- ((self.cum[end-1]-self.cum[start-1])/(end-start))**2
def _mean_ct(self, start, end):
"""mean value of current[t]*t for segment start:end
(uses self.cum_ct, a numpy array that is the cumulative sum of
the current[t]*t
"""
if start==end: return 0
if start==0: return self.cum_ct[end-1]/end
return (self.cum_ct[end-1]-self.cum_ct[start-1])/(end-start)
def _mean_t(self, start,end):
"""mean value of start, ..., end-1"""
return start+ (end-start-1)/2
def _mean_t2(self,start,end):
"""mean value of start**2, ..., (end-1)**2 """
return (2*end**2 + end*(2*start-3) + 2*start**2-3*start+1)/6.
def _lr(self,start,end):
"""does a linear regression on self.current, for segment start:end.
Returns (alpha, beta,var),
where current[i] =approx alpha+beta*i
and var is the mean square residual
"""
xy_bar = self._mean_ct(start,end)
y_bar = self._mean_c(start,end)
x_bar = self._mean_t(start,end)
x2_bar = self._mean_t2(start,end)
beta = (xy_bar - x_bar*y_bar)/(x2_bar - x_bar**2)
alpha = y_bar - beta*x_bar
# print("DEBUG: lr({},{}) x_bar={} x2_bar={}, y_bar={}, xy_bar={}, alpha={}, beta={}".format(
# start,end,x_bar, x2_bar, y_bar, xy_bar, alpha, beta))
y2_bar = self._mean_c2(start,end)
var = y2_bar - 2*alpha*y_bar- 2*beta*xy_bar +alpha**2 + 2*alpha*beta*x_bar+ beta**2*x2_bar
return (alpha,beta,var)
def _best_split_stepwise(self, start, end):
"""splits self.cum[start:end] (0<=start<end<=len(self.current)).
Needs self.cum and self.cum2:
self.cum is a numpy array that is the cumulative sum of
a current trace (that is, self.cum[i] = sum(self.current[0:i+1])
or self.cum=np.cumsum(self.current) ).
self.cum2 is a numpy array that is the cumulative sum of
the square of the current trace.
Breakpoint is chosen to maximize the probability of the two segments
modeled as two Gaussians.
Returns (x,decrease in (log)variance as a result of splitting)
so that segments are seg1=[start:x], seg2=[x:end]
with min_width <= x-start and min_width <= end-x
(If no such x, returns None.)
Note decrease in log variance is proportional to
log p1(seg1) + log p2(seg2) - log pall(seg1+seg2))
so that this is a maximum-likelihood estimator of splitting point
"""
# print("DEBUG: splitting", start,"..",end, "min=",self.min_width,file=sys.stderr)
if end-start< 2*self.min_width:
# print("DEBUG: too short", start,"..",end, file=sys.stderr)
return None
var_summed = (end-start)*(self._var_c(start,end) if not self.use_log
else np.log(self._var_c(start,end)))
max_gain=self.min_gain_per_sample*self.window_width
x=None
for i in xrange(start+self.min_width,end+1-self.min_width):
low_var_summed = (i-start)*( self._var_c(start,i) if not self.use_log
else np.log(self._var_c(start,i)))
high_var_summed = (end-i)*( self._var_c(i,end) if not self.use_log
else np.log(self._var_c(i,end)))
gain = var_summed - (low_var_summed+high_var_summed)
if gain > max_gain:
max_gain= gain
x=i
if x is None:
# print("DEBUG: nothing found", start,"..",end, file=sys.stderr)
return None
#print("# DEBUG: splitting at x=", x, "gain/sample=", max_gain/self.window_width, file=sys.stderr)
return (x,max_gain)
def _best_split_slanted(self, start, end):
"""
splits self.cum[start:end] (0<=start<end<=len(self.current)).
Needs self.cum, self.cum2, and self.cum_ct:
self.cum is a numpy array that is the cumulative sum of
a current trace (that is, self.cum[i] = sum(self.current[0:i+1])
or self.cum=np.cumsum(self.current) ).
self.cum2 is a numpy array that is the cumulative sum of
the square of the current trace.
self.cum_ct is a numpy array that is the cumulative sum of current[i]*i
Breakpoint is chosen to maximize the probability of the two segments
modeled as two straight-line segments plus Gaussian noise.
Returns (x, (log)variance decrease as a result of splitting)
so that segments are seg1=[start:x], seg2=[x:end]
with min_width <= x-start and min_width <= end-x
(If no such x, returns None.)
"""
# print("DEBUG: splitting", start,"..",end, "min=",self.min_width,file=sys.stderr)
if end-start< 2*self.min_width:
# print("DEBUG: too short", start,"..",end, file=sys.stderr)
return None
var_summed = (end-start)*( self._lr(start,end)[2] if not self.use_log
else log(self._lr(start,end)[2]))
max_gain=self.min_gain_per_sample*self.window_width
x=None
for i in xrange(start+self.min_width,end+1-self.min_width):
low_var_summed = (i-start)*(self._lr(start,i)[2] if not self.use_log
else log(self._lr(start,i)[2]))
high_var_summed = (end-i)*(self._lr(i,end)[2] if not self.use_log
else log(self._lr(i,end)[2]))
gain = var_summed - (low_var_summed+high_var_summed)
if gain > max_gain:
max_gain= gain
x=i
if x is None:
# print("DEBUG: nothing found", start,"..",end, file=sys.stderr)
return None
#print("# DEBUG: splitting at x=", x, "gain/sample=", max_gain/self.window_width, file=sys.stderr)
return (x,max_gain)
# PROBLEM: this recursive splitting can have O(n^2) behavior,
# if each split only removes min_width from one end, because
# the self.splitter routines take time proportional to the length of the segment being split.
# Keeping window_width small helps, since behavior is
# O( window_width/min_width *(end-start)
def _segment_cumulative(self, start, end):
"""segments cumulative sum of current and current**2 (in self.cum and self.cum2)
returns [a1, a2, ..., an]
so that segments are [start:a1], [a1:a2], ... [an:end]
with min_width <= ai - a_{i-1} <= max_width
(a0=start a_{n+1}=end)
"""
# scan in overlapping windows to find a spliting point
split_pair = None
pseudostart = start
for pseudostart in xrange(start, end-2*self.min_width, self.window_width//2 ):
if pseudostart> start+ self.max_width:
# scanned a long way with no splits, add a fake one at max_width
split_at = min(start+self.max_width, end-self.min_width)
#print("# DEBUG: adding fake split at ",split_at, "after", start, file=sys.stderr)
return [split_at] + self._segment_cumulative(split_at,end)
# look for a splitting point
pseudoend = min(end,pseudostart+self.window_width)
split_pair = self.splitter(pseudostart,pseudoend)
if split_pair is not None: break
if split_pair is None:
if end-start <=self.max_width:
# we've split as finely as we can, subdivide only if end-start>max_width
return []
split_at = min(start+self.max_width, end-self.min_width)
#print("# DEBUG: adding late fake split at ",split_at, "after", start, file=sys.stderr)
else:
split_at,gain = split_pair
# splitting point found, recursively try each subpart
return self._segment_cumulative(start,split_at) \
+ [split_at] \
+ self._segment_cumulative(split_at,end)
class SpeedyStatSplit( parser ):
'''
See cparsers.pyx FastStatSplit for full documentation. This is just a
wrapper for the cyton implementation to add a GUI.
'''
def __init__( self, min_width=100, max_width=1000000, window_width=10000,
min_gain_per_sample=None, false_positive_rate=None,
prior_segments_per_second=None, sampling_freq=1.e5, cutoff_freq=None ):
self.min_width = min_width
self.max_width = max_width
self.min_gain_per_sample = min_gain_per_sample
self.window_width = window_width
self.prior_segments_per_second = prior_segments_per_second
self.false_positive_rate = false_positive_rate
self.sampling_freq = sampling_freq
self.cutoff_freq = cutoff_freq
def parse( self, current ):
parser = FastStatSplit( self.min_width, self.max_width,
self.window_width, self.min_gain_per_sample, self.false_positive_rate,
self.prior_segments_per_second, self.sampling_freq, self.cutoff_freq )
return parser.parse( current )
def best_single_split( self, current ):
parser = FastStatSplit( self.min_width, self.max_width,
self.window_width, self.min_gain_per_sample, self.false_positive_rate,
self.prior_segments_per_second, self.sampling_freq )
return parser.best_single_split( current )
def GUI( self ):
grid = Qt.QGridLayout()
grid.addWidget( Qt.QLabel( "Minimum Width (samples): "), 0, 0, 1, 3)
grid.addWidget( Qt.QLabel( "Maximum Width (samples): " ), 1, 0, 1, 3 )
grid.addWidget( Qt.QLabel( "Window Width (samples): " ), 2, 0, 1, 3 )
grid.addWidget( Qt.QLabel( "Minimum Gain / Sample: " ), 3, 0, 1, 3 )
self.minWidth = Qt.QLineEdit()
self.minWidth.setText('1000')
self.maxWidth = Qt.QLineEdit()
self.maxWidth.setText('1000000')
self.windowWidth = Qt.QLineEdit('10000')
self.windowWidth.setText('10000')
self.minGain = Qt.QLineEdit()
self.minGain.setText('0.05')
grid.addWidget( self.minWidth, 0, 3 )
grid.addWidget( self.maxWidth, 1, 3 )
grid.addWidget( self.windowWidth, 2, 3 )
grid.addWidget( self.minGain, 3, 3 )
return grid
def set_params( self ):
try:
self.min_width = int(self.minWidth.text())
self.max_width = int(self.maxWidth.text())
self.window_width = int(self.windowWidth.text())
self.min_gain_per_sample = float(self.minGain.text())
except:
pass
#########################################
# STATE PARSERS
#########################################
class snakebase_parser( parser ):
'''
A simple parser based on dividing when the peak-to-peak amplitude of a wave exceeds a certain threshold.
'''
def __init__( self, threshold=1.5 ):
self.threshold = threshold
def parse( self, current ):
# Take the derivative of the current first
diff = np.abs( np.diff( current ) )
# Find the places where the derivative is low
tics = np.concatenate( ( [0], np.where( diff < 1e-3 )[0], [ diff.shape[0] ] ) )
# For pieces between these tics, make each point the cumulative sum of that piece and put it together piecewise
cumsum = np.concatenate( ( [ np.cumsum( diff[ tics[i] : tics[i+1] ] ) for i in xrange( tics.shape[0]-1 ) ] ) )
# Find the edges where the cumulative sum passes a threshold
split_points = np.where( np.abs( np.diff( np.where( cumsum > self.threshold, 1, 0 ) ) ) == 1 )[0] + 1
# Return segments which do pass the threshold
return [ Segment( current = current[ tics[i]: tics[i+1] ], start = tics[i] ) for i in xrange( 1, tics.shape[0] - 1, 2 ) ]
def GUI( self ):
threshDefault = "1.5"
grid = Qt.QGridLayout()
grid.setVerticalSpacing(0)
grid.addWidget( Qt.QLabel( "Threshold" ), 0, 0 )
self.threshInput = Qt.QLineEdit()
self.threshInput.setToolTip("Peak to peak amplitude threshold, which if gone above, indicates a state transition.")
self.threshInput.setText( threshDefault )
grid.addWidget( self.threshInput, 0, 1 )
grid.addWidget( self.mergerThreshInput, 1, 1 )
return grid
def set_params( self ):
self.threshold = float( self.threshInput.text() )
class FilterDerivativeSegmenter( parser ):
'''
This parser will segment an event using a filter-derivative method. It will
first apply a bessel filter at a certain cutoff to the current, then it will
take the derivative of that, and segment when the derivative passes a
threshold.
'''
def __init__( self, low_threshold=1, high_threshold=2, cutoff_freq=1000.,
sampling_freq=1.e5 ):
self.low_threshold = low_threshold
self.high_threshold = high_threshold
self.cutoff_freq = cutoff_freq
self.sampling_freq = sampling_freq
def parse( self, current ):
'''
Apply the filter-derivative method to filter the ionic current.
'''
# Filter the current using a first order Bessel filter twice, one in
# both directions to preserve phase
from scipy import signal
nyquist = self.sampling_freq / 2.
b, a = signal.bessel( 1, self.cutoff_freq / nyquist, btype='low', analog=0, output='ba' )
filtered_current = signal.filtfilt( b, a, np.array( current ).copy() )
# Take the derivative
deriv = np.abs( np.diff( filtered_current ) )
# Find the edges of the blocks which fulfill pass the lower threshold
blocks = np.where( deriv > self.low_threshold, 1, 0 )
block_edges = np.abs( np.diff( blocks ) )
tics = np.where( block_edges == 1 )[0] + 1
# Split points are points in the each block which pass the high
# threshold, with a maximum of one per block
split_points = [0]
for start, end in it.izip( tics[:-1:2], tics[1::2] ): # For all pairs of edges for a block..
segment = deriv[ start:end ] # Save all derivatives in that block to a segment
if np.argmax( segment ) > self.high_threshold: # If the maximum derivative in that block is above a threshold..
split_points = np.concatenate( ( split_points, [ start, end ] ) ) # Save the edges of the segment
# Now you have the edges of all transitions saved, and so the states are the current between these transitions
tics = np.concatenate( ( split_points, [ current.shape[0] ] ) )
tics = map( int, tics )
return [ Segment( current=current[ tics[i]:tics[i+1] ], start=tics[i] )
for i in xrange( 0, len(tics)-1, 2 ) ]
def GUI( self ):
lowThreshDefault = "1e-2"
highThreshDefault = "1e-1"
grid = Qt.QGridLayout()
grid.addWidget( Qt.QLabel( "Low-pass Threshold: " ), 0, 0 )
grid.addWidget( Qt.QLabel( "High-pass Threshold: " ), 1, 0 )
self.lowThreshInput = Qt.QLineEdit()
self.lowThreshInput.setText( lowThreshDefault )
self.lowThreshInput.setToolTip( "The lower threshold, of which one maximum is found." )
self.highThreshInput = Qt.QLineEdit()
self.highThreshInput.setText( highThreshDefault )
self.highThreshInput.setToolTip( "The higher threshold, of which the maximum must be abov." )
grid.addWidget( self.lowThreshInput, 0, 1 )
grid.addWidget( self.highThreshInput, 1, 1 )
return grid
def set_params( self ):
self.low_thresh = float( self.lowThreshInput.text() )
self.high_thresh = float( self.highThreshInput.text() )
| jmschrei/PyPore | PyPore/parsers.py | Python | mit | 29,489 | [
"Gaussian"
] | 79df1da2139ed790db957c7e11650dd7e38e8e65e0590ce80abb8270f7f15c0c |
# Authors: CommPy contributors
# License: BSD 3-Clause
"""
=============================================
Pulse Shaping Filters (:mod:`commpy.filters`)
=============================================
.. autosummary::
:toctree: generated/
rcosfilter -- Raised Cosine (RC) Filter.
rrcosfilter -- Root Raised Cosine (RRC) Filter.
gaussianfilter -- Gaussian Filter.
rectfilter -- Rectangular Filter.
"""
import numpy as np
__all__=['rcosfilter', 'rrcosfilter', 'gaussianfilter', 'rectfilter']
def rcosfilter(N, alpha, Ts, Fs):
"""
Generates a raised cosine (RC) filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha : float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
time_idx : 1-D ndarray (float)
Array containing the time indices, in seconds, for the impulse response.
h_rc : 1-D ndarray (float)
Impulse response of the raised cosine filter.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
sample_num = np.arange(N)
h_rc = np.zeros(N, dtype=float)
for x in sample_num:
t = (x-N/2)*T_delta
if t == 0.0:
h_rc[x] = 1.0
elif alpha != 0 and t == Ts/(2*alpha):
h_rc[x] = (np.pi/4)*(np.sin(np.pi*t/Ts)/(np.pi*t/Ts))
elif alpha != 0 and t == -Ts/(2*alpha):
h_rc[x] = (np.pi/4)*(np.sin(np.pi*t/Ts)/(np.pi*t/Ts))
else:
h_rc[x] = (np.sin(np.pi*t/Ts)/(np.pi*t/Ts))* \
(np.cos(np.pi*alpha*t/Ts)/(1-(((2*alpha*t)/Ts)*((2*alpha*t)/Ts))))
return time_idx, h_rc
def rrcosfilter(N, alpha, Ts, Fs):
"""
Generates a root raised cosine (RRC) filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha : float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
---------
time_idx : 1-D ndarray of floats
Array containing the time indices, in seconds, for
the impulse response.
h_rrc : 1-D ndarray of floats
Impulse response of the root raised cosine filter.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
sample_num = np.arange(N)
h_rrc = np.zeros(N, dtype=float)
for x in sample_num:
t = (x-N/2)*T_delta
if t == 0.0:
h_rrc[x] = 1.0 - alpha + (4*alpha/np.pi)
elif alpha != 0 and t == Ts/(4*alpha):
h_rrc[x] = (alpha/np.sqrt(2))*(((1+2/np.pi)* \
(np.sin(np.pi/(4*alpha)))) + ((1-2/np.pi)*(np.cos(np.pi/(4*alpha)))))
elif alpha != 0 and t == -Ts/(4*alpha):
h_rrc[x] = (alpha/np.sqrt(2))*(((1+2/np.pi)* \
(np.sin(np.pi/(4*alpha)))) + ((1-2/np.pi)*(np.cos(np.pi/(4*alpha)))))
else:
h_rrc[x] = (np.sin(np.pi*t*(1-alpha)/Ts) + \
4*alpha*(t/Ts)*np.cos(np.pi*t*(1+alpha)/Ts))/ \
(np.pi*t*(1-(4*alpha*t/Ts)*(4*alpha*t/Ts))/Ts)
return time_idx, h_rrc
def gaussianfilter(N, alpha, Ts, Fs):
"""
Generates a gaussian filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha : float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
time_index : 1-D ndarray of floats
Array containing the time indices for the impulse response.
h_gaussian : 1-D ndarray of floats
Impulse response of the gaussian filter.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
h_gaussian = (np.sqrt(np.pi)/alpha)*np.exp(-((np.pi*time_idx/alpha)*(np.pi*time_idx/alpha)))
return time_idx, h_gaussian
def rectfilter(N, Ts, Fs):
"""
Generates a rectangular filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
time_index : 1-D ndarray of floats
Array containing the time indices for the impulse response.
h_rect : 1-D ndarray of floats
Impulse response of the rectangular filter.
"""
h_rect = np.ones(N)
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
return time_idx, h_rect
| veeresht/CommPy | commpy/filters.py | Python | bsd-3-clause | 4,711 | [
"Gaussian"
] | a88e43d1a050590400f352f75478676477c2acb6714a4d5d85ba3e2d9603ba50 |
#!/usr/bin/env python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys, os
root = os.path.dirname(os.path.realpath(__file__))
# => Driver Code <= #
if __name__ == '__main__':
# > Working Dirname < #
if len(sys.argv) == 1:
dirname = '.'
elif len(sys.argv) == 2:
dirname = sys.argv[1]
else:
raise Exception('Usage: fsapt.py [dirname]')
# > Copy Files < #
os.system('cp %s/pymol2/*pymol %s' % (root, dirname))
| ashutoshvt/psi4 | psi4/share/psi4/fsapt/copy_pymol2.py | Python | lgpl-3.0 | 1,339 | [
"Psi4",
"PyMOL"
] | 6e6954fafb70e315f3f5fd1e968190ff850e2da1a0542a0b98e101fff220cfc2 |
# -*- coding: utf-8 -*-
#
# hl_api_models.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for model handling
"""
from ..ll_api import *
from .hl_api_helper import *
__all__ = [
'ConnectionRules',
'CopyModel',
'GetDefaults',
'Models',
'SetDefaults',
]
@check_stack
def Models(mtype="all", sel=None):
"""Return a tuple of model names, sorted by name.
All available models are neurons, devices and synapses.
Parameters
----------
mtype : str, optional
Use ``'mtype='nodes'`` to only see neuron and device models,
or ``'type='synapses'`` to only see synapse models.
sel : str, optional
String used to filter the result list and only return models
containing it.
Returns
-------
tuple
Available model names
Raises
------
ValueError
Description
Notes
-----
- Synapse model names ending with ``'_hpc'`` provide minimal memory
requirements by using thread-local target neuron IDs and fixing
the ``'rport'`` to 0.
- Synapse model names ending with ``'_lbl'`` allow to assign an individual
integer label (``'synapse_label'``) to created synapses at the cost
of increased memory requirements.
KEYWORDS: models
"""
if mtype not in ("all", "nodes", "synapses"):
raise ValueError("type has to be one of 'all', 'nodes' or 'synapses'")
models = []
if mtype in ("all", "nodes"):
sr("modeldict")
models += spp().keys()
if mtype in ("all", "synapses"):
sr("synapsedict")
models += spp().keys()
if sel is not None:
models = [x for x in models if x.find(sel) >= 0]
models.sort()
return tuple(models)
@check_stack
def ConnectionRules():
"""Return a typle of all available connection rules, sorted by name.
Returns
-------
tuple
Available connection rules
KEYWORDS: models
"""
sr('connruledict')
return tuple(sorted(spp().keys()))
@check_stack
def SetDefaults(model, params, val=None):
"""Set the default parameter values of the given model.
New default values are used for all subsequently created instances
of the model.
Parameters
----------
model : str
Name of the model
params : str or dict
Dictionary of new default parameter values
val : str, optional
If given, `params` has to be the name of a model property.
KEYWORDS: models
"""
if val is not None:
if is_literal(params):
params = {params: val}
sps(params)
sr('/{0} exch SetDefaults'.format(model))
@check_stack
def GetDefaults(model, keys=None, output=''):
"""Return default parameters of the given model, specified by a string.
Parameters
----------
model : str
Name of the model
keys : str or list, optional
String or a list of strings naming model properties. `GetDefaults` then
returns a single value or a list of values belonging to the keys
given.
output : str, optional
Whether the returned data should be in a format
(``output='json'``). Default is ''.
Returns
-------
dict
A dictionary of default parameters.
type
If keys is a string, the corrsponding default parameter is returned.
list
If keys is a list of strings, a list of corrsponding default parameters
is returned.
str :
If `output` is `json`, returns parameters in JSON format.
Raises
------
TypeError
Notes
-----
**Example**
.. code_block:: python
>>> nest.GetDefaults('iaf_psc_alpha', 'V_m')
-70.0
>>> nest.GetDefaults('iaf_psc_alpha', ['V_m', 'V_th'])
(-70.0, -55.0)
KEYWORDS: models
"""
if keys is None:
cmd = "/{0} GetDefaults".format(model)
elif is_literal(keys):
cmd = '/{0} GetDefaults /{1} get'.format(model, keys)
elif is_iterable(keys):
keys_str = " ".join("/{0}".format(x) for x in keys)
cmd = "/{0} GetDefaults [ {1} ] {{ 1 index exch get }}"\
.format(model, keys_str) + " Map exch pop"
else:
raise TypeError("keys should be either a string or an iterable")
sr(cmd)
result = spp()
if output == 'json':
result = to_json(result)
return result
@check_stack
def CopyModel(existing, new, params=None):
"""Create a new model by copying an existing one.
Parameters
----------
existing : str
Name of existing model
new : str
Name of the copied model
params : dict, optional
Default parameters assigned to the copy. Not provided parameters are
taken from the existing model.
KEYWORDS: models
"""
model_deprecation_warning(existing)
if params is not None:
sps(params)
sr("/%s /%s 3 2 roll CopyModel" % (existing, new))
else:
sr("/%s /%s CopyModel" % (existing, new))
| terhorstd/nest-simulator | pynest/nest/lib/hl_api_models.py | Python | gpl-2.0 | 5,656 | [
"NEURON"
] | 074c06c8b7d6c31a67b3e73fe3d1f17ec29b5dea26da182fc3d61f0bd48c7b16 |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Languaeg modeling experiments in mtf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.models import mtf_transformer
from tensor2tensor.models.research import moe
from tensor2tensor.utils import registry
@registry.register_hparams
def xmoe_dense_4k():
"""Series of architectural experiments on cheap language models.
For all of these architectures, we run on languagemodel_lm1b8k_packed
for 32000 steps.
All log-perplexities are per-token - multiply by 1.298 for per-word
Results:
model params(M) einsum alltoall mxu-util log-ppl
xmoe_dense_4k 30 3.0e12 0 45% 3.31
xmoe_dense_8k 46 4.7e12 0 49% 3.24
xmoe_dense_64k 282 2.8e13 0 3.06
xmoe_top_2 282 4.0e12 3.4e8 36% 3.07
xmoe_top_2_c15 282 4.5e12 4.0e8 38% 3.07
xmoe_2d 282 5.3e12 7.6e8 34% 3.06
Trained at 4x the batch size:
xmoe_2d_88 1090 2.1e13 3.0e9 24% 3.07
Note: configurations and code are likely to change without notice.
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_base_lm()
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
# The following hparams are constant across all these experiments.
hparams.batch_size = 128
hparams.d_model = 512
hparams.d_kv = 128
hparams.num_heads = 4
hparams.decoder_layers = ["att", "drd"] * 4
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_schedule = "rsqrt_decay"
# We will vary the following parameters related to the ffn/moe layers.
hparams.d_ff = 4096
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:8"
return hparams
@registry.register_hparams
def xmoe_dense_8k():
hparams = xmoe_dense_4k()
hparams.d_ff = 8192
return hparams
@registry.register_hparams
def xmoe_dense_64k():
"""Very wide layer- run on 4x4."""
hparams = xmoe_dense_4k()
hparams.d_ff = 65536
hparams.mesh_shape = "model:4,batch:8"
return hparams
@registry.register_hparams
def xmoe_top_2():
"""Mixture of experts (16 experts)."""
hparams = xmoe_dense_4k()
moe.set_default_moe_hparams(hparams)
hparams.mesh_shape = "all:8"
hparams.layout = "batch:all;experts:all"
return hparams
@registry.register_hparams
def xmoe_top_2_c15():
"""Mixture of experts."""
hparams = xmoe_top_2()
hparams.moe_capacity_factor_train = 1.5
return hparams
@registry.register_hparams
def xmoe_2d():
"""Two-dimensional hierarchical mixture of 16 experts."""
hparams = xmoe_top_2()
hparams.decoder_layers = ["att", "hmoe"] * 4
hparams.mesh_shape = "b0:2;b1:4"
hparams.outer_batch_size = 4
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.moe_num_experts = [4, 4]
return hparams
@registry.register_hparams
def xmoe_2d_debug():
"""For debugging.
Running this model on TPU without the hack of casting to bfloat16 for
alltoall results in nan on the first step.
TODO(noam): debug
Returns:
a hparams
"""
hparams = xmoe_2d()
hparams.decoder_layers = ["hmoe"] * 1
hparams.activation_dtype = "float32"
return hparams
@registry.register_hparams
def xmoe_2d_c15():
"""Mixture of experts."""
hparams = xmoe_2d()
hparams.moe_capacity_factor_train = 1.5
return hparams
@registry.register_hparams
def xmoe_2d_x64():
"""Two-dimensional hierarchical mixture of 64 experts."""
hparams = xmoe_2d()
# hparams.mesh_shape = "b0:4;b1:8"
hparams.outer_batch_size = 4
hparams.moe_num_experts = [8, 8]
return hparams
@registry.register_hparams
def xmoe2_dense(sz):
"""Series of architectural experiments on language modeling.
Larger models than the ones above.
All models are trained on sequences of 1024 tokens.
We assume infinite training data, so no dropout necessary.
We process 2^36 tokens in training = 524288 steps at batch size 128
TODO(noam): find a large enough dataset for these experiments.
You can use languagemodel_wiki_noref_v32k_l1k, but this is too small,
(1 epoch = ~46000 steps) so training will cover about 11 epochs.
Note: configurations and code are likely to change without notice.
Run on TPU 4x4 for 524288 steps unless otherwise indicated.
Args:
sz: an integer
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_paper_lm(sz)
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.max_length = 1024
hparams.batch_size = 128
hparams.learning_rate_schedule = "rsqrt_decay*linear_decay"
hparams.learning_rate_decay_steps = 65536
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:32"
return hparams
@registry.register_hparams
def xmoe2_dense_0():
return xmoe2_dense(0)
@registry.register_hparams
def xmoe2_dense_1():
return xmoe2_dense(1)
@registry.register_hparams
def xmoe2_dense_2():
return xmoe2_dense(2)
@registry.register_hparams
def xmoe2_dense_3():
return xmoe2_dense(3)
@registry.register_hparams
def xmoe2_v1():
"""Model incorporating mixture-of-experts and local-attention.
~6B parameters
32 experts in 3 hierarchichal moe layers.
Returns:
a hparams
"""
hparams = xmoe2_dense(0)
moe.set_default_moe_hparams(hparams)
hparams.decoder_layers = (
["local_att", "local_att", "drd",
"att", "drd", "local_att", "local_att", "hmoe"] * 4)[:-1]
hparams.d_ff = 2048
hparams.d_kv = 128
hparams.moe_hidden_size = 32768
hparams.mesh_shape = "b0:4;b1:8"
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.outer_batch_size = 4
hparams.moe_num_experts = [8, 4]
hparams.num_heads = 4
return hparams
@registry.register_hparams
def xmoe2_v1_x128():
"""128 experts, ~25B params - Train for 131072 steps on 8x8."""
hparams = xmoe2_v1()
hparams.moe_num_experts = [16, 8]
hparams.outer_batch_size = 8
hparams.mesh_shape = "b0:8;b1:16"
hparams.batch_size = 512
hparams.learning_rate_decay_steps = 16384
return hparams
@registry.register_hparams
def xmoe2_tiny():
"""Test on local cpu."""
hparams = xmoe2_v1()
hparams.decoder_layers = [
"local_att", "att", "compressed_att", "drd", "hmoe"]
hparams.d_model = 128
hparams.moe_hidden_size = 512
hparams.outer_batch_size = 0
hparams.batch_size = 2
hparams.mesh_shape = ""
hparams.activation_dtype = "float32"
return hparams
@registry.register_hparams
def xmoe2_v1_l4k():
"""With sequence length 4096."""
hparams = xmoe2_v1()
hparams.batch_size = 32
hparams.max_length = 4096
hparams.split_to_length = 4096
hparams.reshape_logits_hack = True
return hparams
@registry.register_hparams
def xmoe2_v1_l4k_local_only():
"""With sequence length 4096."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"local_att" if l == "att" else l for l in hparams.decoder_layers]
return hparams
@registry.register_hparams
def xmoe2_v1_l4k_global_only():
"""With sequence length 4096."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"att" if l == "local_att" else l for l in hparams.decoder_layers]
return hparams
@registry.register_hparams
def xmoe2_v1_l4k_compressed_c4():
"""With compressed attention."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"compressed_att" if l == "att" else l for l in hparams.decoder_layers]
hparams.compression_factor = 4
return hparams
@registry.register_hparams
def xmoe2_v1_l4k_compressed_c8():
"""With compressed attention."""
hparams = xmoe2_v1_l4k_compressed_c4()
hparams.compression_factor = 8
return hparams
@registry.register_hparams
def wiki_2x2_base():
"""Set of architectural experiments - language model on wikipedia on a 2x2.
1 epoch = ~180k steps at batch size 32 - we may never finish an epoch!
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_base_lm()
hparams.shared_embedding_and_softmax_weights = False
# no dropout - dataset is big enough to avoid overfitting.
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.max_length = 1024
# 4 sequences per core
hparams.batch_size = 32
# We don't use linear decay in these experiments, since we don't want
# a sharp jump in quality at the end of the training schedule.
# You can insert this once you find the right architecture.
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.mesh_shape = "all:8"
hparams.layout = "batch:all;experts:all"
# parameters for mixture-of-experts
moe.set_default_moe_hparams(hparams)
hparams.moe_num_experts = 16
hparams.moe_hidden_size = 8192
hparams.decoder_layers = ["att", "drd"] * 6
hparams.d_model = 1024
hparams.d_ff = 2048
hparams.d_kv = 128
hparams.num_heads = 4
return hparams
@registry.register_hparams
def wiki_2x2_v1():
hparams = wiki_2x2_base()
hparams.decoder_layers = (
["local_att", "local_att", "drd",
"att", "drd", "local_att", "local_att", "moe"] * 4)[:-1]
return hparams
@registry.register_hparams
def wiki_2x2_local():
hparams = wiki_2x2_base()
hparams.decoder_layers = ["local_att", "drd"] * 6
return hparams
@registry.register_hparams
def denoise_m15():
"""Denoising experiment."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.15}
return hparams
@registry.register_hparams
def denoise_m30():
"""More masking during training."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.3}
return hparams
@registry.register_hparams
def denoise_dense_2_m30():
"""More masking during training."""
hparams = xmoe2_dense_2()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.3}
return hparams
@registry.register_hparams
def denoise_z15():
"""Replace tokens instead of masking."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15}
hparams.noising_use_eval_during_train = 0.25
return hparams
@registry.register_hparams
def denoise_t15():
"""Noise up with dropout and a little transformer."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {
"type": "transformer",
"overrides": {
"noising_spec_train": {"type": "mask", "prob": 0.15},
"noising_use_eval_during_train": 0.0,
"decoder_layers": ["att", "drd"] * 4,
"num_heads": 4,
"d_model": 512,
"d_ff": 2048,
}
}
return hparams
@registry.register_hparams
def denoise_v1_m15():
"""Denoising experiment."""
hparams = xmoe2_v1()
# no local attention
# TODO(noam): non-masked version of local-attention
hparams.decoder_layers = [
"att" if l == "local_att" else l for l in hparams.decoder_layers]
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.15}
return hparams
@registry.register_hparams
def denoise_v1_m30():
"""More masking during training."""
hparams = denoise_v1_m15()
hparams.noising_spec_train = {"type": "mask", "prob": 0.3}
return hparams
@registry.register_hparams
def denoise_v1_m50():
"""More masking during training."""
hparams = denoise_v1_m15()
hparams.noising_spec_train = {"type": "mask", "prob": 0.5}
return hparams
@registry.register_hparams
def denoise_v1_z15():
"""Replace tokens instead of masking."""
hparams = denoise_v1_m15()
hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15}
return hparams
@registry.register_hparams
def denoise_v1_t15():
"""Noise up with dropout and a little transformer."""
hparams = denoise_v1_m15()
hparams.noising_spec_train = {
"type": "transformer",
"overrides": {
"noising_spec_train": {"type": "mask", "prob": 0.15},
"noising_use_eval_during_train": 0.0,
"decoder_layers": ["att", "drd"] * 4,
"num_heads": 4,
"d_model": 512,
"d_ff": 2048,
}
}
return hparams
| mlperf/training_results_v0.5 | v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/models/research/moe_experiments.py | Python | apache-2.0 | 13,050 | [
"MOE"
] | 8349d6dcd19dc0beb27cbdf0e4ec736d775b473a1a6610b92f8c80acd8f4916e |
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose,
assert_equal, assert_, assert_array_less)
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy import signal, fftpack
window_funcs = [
('boxcar', ()),
('triang', ()),
('parzen', ()),
('bohman', ()),
('blackman', ()),
('nuttall', ()),
('blackmanharris', ()),
('flattop', ()),
('bartlett', ()),
('hanning', ()),
('barthann', ()),
('hamming', ()),
('kaiser', (1,)),
('gaussian', (0.5,)),
('general_gaussian', (1.5, 2)),
('chebwin', (1,)),
('slepian', (2,)),
('cosine', ()),
('hann', ()),
('exponential', ()),
('tukey', (0.5,)),
]
class TestBartHann(object):
def test_basic(self):
assert_allclose(signal.barthann(6, sym=True),
[0, 0.35857354213752, 0.8794264578624801,
0.8794264578624801, 0.3585735421375199, 0])
assert_allclose(signal.barthann(7),
[0, 0.27, 0.73, 1.0, 0.73, 0.27, 0])
assert_allclose(signal.barthann(6, False),
[0, 0.27, 0.73, 1.0, 0.73, 0.27])
class TestBartlett(object):
def test_basic(self):
assert_allclose(signal.bartlett(6), [0, 0.4, 0.8, 0.8, 0.4, 0])
assert_allclose(signal.bartlett(7), [0, 1/3, 2/3, 1.0, 2/3, 1/3, 0])
assert_allclose(signal.bartlett(6, False),
[0, 1/3, 2/3, 1.0, 2/3, 1/3])
class TestBlackman(object):
def test_basic(self):
assert_allclose(signal.blackman(6, sym=False),
[0, 0.13, 0.63, 1.0, 0.63, 0.13], atol=1e-14)
assert_allclose(signal.blackman(7, sym=False),
[0, 0.09045342435412804, 0.4591829575459636,
0.9203636180999081, 0.9203636180999081,
0.4591829575459636, 0.09045342435412804], atol=1e-8)
assert_allclose(signal.blackman(6),
[0, 0.2007701432625305, 0.8492298567374694,
0.8492298567374694, 0.2007701432625305, 0],
atol=1e-14)
assert_allclose(signal.blackman(7, True),
[0, 0.13, 0.63, 1.0, 0.63, 0.13, 0], atol=1e-14)
class TestBlackmanHarris(object):
def test_basic(self):
assert_allclose(signal.blackmanharris(6, False),
[6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645])
assert_allclose(signal.blackmanharris(7, sym=False),
[6.0e-05, 0.03339172347815117, 0.332833504298565,
0.8893697722232837, 0.8893697722232838,
0.3328335042985652, 0.03339172347815122])
assert_allclose(signal.blackmanharris(6),
[6.0e-05, 0.1030114893456638, 0.7938335106543362,
0.7938335106543364, 0.1030114893456638, 6.0e-05])
assert_allclose(signal.blackmanharris(7, sym=True),
[6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645,
6.0e-05])
class TestBohman(object):
def test_basic(self):
assert_allclose(signal.bohman(6),
[0, 0.1791238937062839, 0.8343114522576858,
0.8343114522576858, 0.1791238937062838, 0])
assert_allclose(signal.bohman(7, sym=True),
[0, 0.1089977810442293, 0.6089977810442293, 1.0,
0.6089977810442295, 0.1089977810442293, 0])
assert_allclose(signal.bohman(6, False),
[0, 0.1089977810442293, 0.6089977810442293, 1.0,
0.6089977810442295, 0.1089977810442293])
class TestBoxcar(object):
def test_basic(self):
assert_allclose(signal.boxcar(6), [1, 1, 1, 1, 1, 1])
assert_allclose(signal.boxcar(7), [1, 1, 1, 1, 1, 1, 1])
assert_allclose(signal.boxcar(6, False), [1, 1, 1, 1, 1, 1])
cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348,
0.198891, 0.235450, 0.274846, 0.316836,
0.361119, 0.407338, 0.455079, 0.503883,
0.553248, 0.602637, 0.651489, 0.699227,
0.745266, 0.789028, 0.829947, 0.867485,
0.901138, 0.930448, 0.955010, 0.974482,
0.988591, 0.997138, 1.000000, 0.997138,
0.988591, 0.974482, 0.955010, 0.930448,
0.901138, 0.867485, 0.829947, 0.789028,
0.745266, 0.699227, 0.651489, 0.602637,
0.553248, 0.503883, 0.455079, 0.407338,
0.361119, 0.316836, 0.274846, 0.235450,
0.198891, 0.165348, 0.134941, 0.107729,
0.200938])
cheb_even_true = array([0.203894, 0.107279, 0.133904,
0.163608, 0.196338, 0.231986,
0.270385, 0.311313, 0.354493,
0.399594, 0.446233, 0.493983,
0.542378, 0.590916, 0.639071,
0.686302, 0.732055, 0.775783,
0.816944, 0.855021, 0.889525,
0.920006, 0.946060, 0.967339,
0.983557, 0.994494, 1.000000,
1.000000, 0.994494, 0.983557,
0.967339, 0.946060, 0.920006,
0.889525, 0.855021, 0.816944,
0.775783, 0.732055, 0.686302,
0.639071, 0.590916, 0.542378,
0.493983, 0.446233, 0.399594,
0.354493, 0.311313, 0.270385,
0.231986, 0.196338, 0.163608,
0.133904, 0.107279, 0.203894])
class TestChebWin(object):
def test_basic(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
assert_allclose(signal.chebwin(6, 100),
[0.1046401879356917, 0.5075781475823447, 1.0, 1.0,
0.5075781475823447, 0.1046401879356917])
assert_allclose(signal.chebwin(7, 100),
[0.05650405062850233, 0.316608530648474,
0.7601208123539079, 1.0, 0.7601208123539079,
0.316608530648474, 0.05650405062850233])
assert_allclose(signal.chebwin(6, 10),
[1.0, 0.6071201674458373, 0.6808391469897297,
0.6808391469897297, 0.6071201674458373, 1.0])
assert_allclose(signal.chebwin(7, 10),
[1.0, 0.5190521247588651, 0.5864059018130382,
0.6101519801307441, 0.5864059018130382,
0.5190521247588651, 1.0])
assert_allclose(signal.chebwin(6, 10, False),
[1.0, 0.5190521247588651, 0.5864059018130382,
0.6101519801307441, 0.5864059018130382,
0.5190521247588651])
def test_cheb_odd_high_attenuation(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
cheb_odd = signal.chebwin(53, at=-40)
assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4)
def test_cheb_even_high_attenuation(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
cheb_even = signal.chebwin(54, at=40)
assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4)
def test_cheb_odd_low_attenuation(self):
cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405,
0.610151, 0.586405, 0.519052,
1.000000])
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
cheb_odd = signal.chebwin(7, at=10)
assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4)
def test_cheb_even_low_attenuation(self):
cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027,
0.541338, 0.541338, 0.51027,
0.451924, 1.000000])
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
cheb_even = signal.chebwin(8, at=-10)
assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4)
exponential_data = {
(4, None, 0.2, False):
array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03]),
(4, None, 0.2, True): array([0.00055308437014783, 0.0820849986238988,
0.0820849986238988, 0.00055308437014783]),
(4, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233]),
(4, None, 1.0, True): array([0.22313016014842982, 0.60653065971263342,
0.60653065971263342, 0.22313016014842982]),
(4, 2, 0.2, False):
array([4.53999297624848542e-05, 6.73794699908546700e-03,
1.00000000000000000e+00, 6.73794699908546700e-03]),
(4, 2, 0.2, True): None,
(4, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233]),
(4, 2, 1.0, True): None,
(5, None, 0.2, True):
array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03, 4.53999297624848542e-05]),
(5, None, 1.0, True): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233, 0.1353352832366127]),
(5, 2, 0.2, True): None,
(5, 2, 1.0, True): None
}
def test_exponential():
for k, v in exponential_data.items():
if v is None:
assert_raises(ValueError, signal.exponential, *k)
else:
win = signal.exponential(*k)
assert_allclose(win, v, rtol=1e-14)
class TestFlatTop(object):
def test_basic(self):
assert_allclose(signal.flattop(6, sym=False),
[-0.000421051, -0.051263156, 0.19821053, 1.0,
0.19821053, -0.051263156])
assert_allclose(signal.flattop(7, sym=False),
[-0.000421051, -0.03684078115492348,
0.01070371671615342, 0.7808739149387698,
0.7808739149387698, 0.01070371671615342,
-0.03684078115492348])
assert_allclose(signal.flattop(6),
[-0.000421051, -0.0677142520762119, 0.6068721525762117,
0.6068721525762117, -0.0677142520762119,
-0.000421051])
assert_allclose(signal.flattop(7, True),
[-0.000421051, -0.051263156, 0.19821053, 1.0,
0.19821053, -0.051263156, -0.000421051])
class TestGaussian(object):
def test_basic(self):
assert_allclose(signal.gaussian(6, 1.0),
[0.04393693362340742, 0.3246524673583497,
0.8824969025845955, 0.8824969025845955,
0.3246524673583497, 0.04393693362340742])
assert_allclose(signal.gaussian(7, 1.2),
[0.04393693362340742, 0.2493522087772962,
0.7066482778577162, 1.0, 0.7066482778577162,
0.2493522087772962, 0.04393693362340742])
assert_allclose(signal.gaussian(7, 3),
[0.6065306597126334, 0.8007374029168081,
0.9459594689067654, 1.0, 0.9459594689067654,
0.8007374029168081, 0.6065306597126334])
assert_allclose(signal.gaussian(6, 3, False),
[0.6065306597126334, 0.8007374029168081,
0.9459594689067654, 1.0, 0.9459594689067654,
0.8007374029168081])
class TestHamming(object):
def test_basic(self):
assert_allclose(signal.hamming(6, False),
[0.08, 0.31, 0.77, 1.0, 0.77, 0.31])
assert_allclose(signal.hamming(7, sym=False),
[0.08, 0.2531946911449826, 0.6423596296199047,
0.9544456792351128, 0.9544456792351128,
0.6423596296199047, 0.2531946911449826])
assert_allclose(signal.hamming(6),
[0.08, 0.3978521825875242, 0.9121478174124757,
0.9121478174124757, 0.3978521825875242, 0.08])
assert_allclose(signal.hamming(7, sym=True),
[0.08, 0.31, 0.77, 1.0, 0.77, 0.31, 0.08])
class TestHann(object):
def test_basic(self):
assert_allclose(signal.hann(6, sym=False),
[0, 0.25, 0.75, 1.0, 0.75, 0.25])
assert_allclose(signal.hann(7, sym=False),
[0, 0.1882550990706332, 0.6112604669781572,
0.9504844339512095, 0.9504844339512095,
0.6112604669781572, 0.1882550990706332])
assert_allclose(signal.hann(6, True),
[0, 0.3454915028125263, 0.9045084971874737,
0.9045084971874737, 0.3454915028125263, 0])
assert_allclose(signal.hann(7),
[0, 0.25, 0.75, 1.0, 0.75, 0.25, 0])
class TestKaiser(object):
def test_basic(self):
assert_allclose(signal.kaiser(6, 0.5),
[0.9403061933191572, 0.9782962393705389,
0.9975765035372042, 0.9975765035372042,
0.9782962393705389, 0.9403061933191572])
assert_allclose(signal.kaiser(7, 0.5),
[0.9403061933191572, 0.9732402256999829,
0.9932754654413773, 1.0, 0.9932754654413773,
0.9732402256999829, 0.9403061933191572])
assert_allclose(signal.kaiser(6, 2.7),
[0.2603047507678832, 0.6648106293528054,
0.9582099802511439, 0.9582099802511439,
0.6648106293528054, 0.2603047507678832])
assert_allclose(signal.kaiser(7, 2.7),
[0.2603047507678832, 0.5985765418119844,
0.8868495172060835, 1.0, 0.8868495172060835,
0.5985765418119844, 0.2603047507678832])
assert_allclose(signal.kaiser(6, 2.7, False),
[0.2603047507678832, 0.5985765418119844,
0.8868495172060835, 1.0, 0.8868495172060835,
0.5985765418119844])
class TestNuttall(object):
def test_basic(self):
assert_allclose(signal.nuttall(6, sym=False),
[0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298,
0.0613345])
assert_allclose(signal.nuttall(7, sym=False),
[0.0003628, 0.03777576895352025, 0.3427276199688195,
0.8918518610776603, 0.8918518610776603,
0.3427276199688196, 0.0377757689535203])
assert_allclose(signal.nuttall(6),
[0.0003628, 0.1105152530498718, 0.7982580969501282,
0.7982580969501283, 0.1105152530498719, 0.0003628])
assert_allclose(signal.nuttall(7, True),
[0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298,
0.0613345, 0.0003628])
class TestParzen(object):
def test_basic(self):
assert_allclose(signal.parzen(6),
[0.009259259259259254, 0.25, 0.8611111111111112,
0.8611111111111112, 0.25, 0.009259259259259254])
assert_allclose(signal.parzen(7, sym=True),
[0.00583090379008747, 0.1574344023323616,
0.6501457725947521, 1.0, 0.6501457725947521,
0.1574344023323616, 0.00583090379008747])
assert_allclose(signal.parzen(6, False),
[0.00583090379008747, 0.1574344023323616,
0.6501457725947521, 1.0, 0.6501457725947521,
0.1574344023323616])
class TestTriang(object):
def test_basic(self):
assert_allclose(signal.triang(6, True),
[1/6, 1/2, 5/6, 5/6, 1/2, 1/6])
assert_allclose(signal.triang(7),
[1/4, 1/2, 3/4, 1, 3/4, 1/2, 1/4])
assert_allclose(signal.triang(6, sym=False),
[1/4, 1/2, 3/4, 1, 3/4, 1/2])
tukey_data = {
(4, 0.5, True): array([0.0, 1.0, 1.0, 0.0]),
(4, 0.9, True): array([0.0, 0.84312081893436686,
0.84312081893436686, 0.0]),
(4, 1.0, True): array([0.0, 0.75, 0.75, 0.0]),
(4, 0.5, False): array([0.0, 1.0, 1.0, 1.0]),
(4, 0.9, False): array([0.0, 0.58682408883346526,
1.0, 0.58682408883346526]),
(4, 1.0, False): array([0.0, 0.5, 1.0, 0.5]),
(5, 0.0, True): array([1.0, 1.0, 1.0, 1.0, 1.0]),
(5, 0.8, True): array([0.0, 0.69134171618254492,
1.0, 0.69134171618254492, 0.0]),
(5, 1.0, True): array([0.0, 0.5, 1.0, 0.5, 0.0]),
(6, 0): [1, 1, 1, 1, 1, 1],
(7, 0): [1, 1, 1, 1, 1, 1, 1],
(6, .25): [0, 1, 1, 1, 1, 0],
(7, .25): [0, 1, 1, 1, 1, 1, 0],
(6,): [0, 0.9045084971874737, 1.0, 1.0, 0.9045084971874735, 0],
(7,): [0, 0.75, 1.0, 1.0, 1.0, 0.75, 0],
(6, .75): [0, 0.5522642316338269, 1.0, 1.0, 0.5522642316338267, 0],
(7, .75): [0, 0.4131759111665348, 0.9698463103929542, 1.0,
0.9698463103929542, 0.4131759111665347, 0],
(6, 1): [0, 0.3454915028125263, 0.9045084971874737, 0.9045084971874737,
0.3454915028125263, 0],
(7, 1): [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0],
}
class TestTukey(object):
def test_basic(self):
# Test against hardcoded data
for k, v in tukey_data.items():
if v is None:
assert_raises(ValueError, signal.tukey, *k)
else:
win = signal.tukey(*k)
assert_allclose(win, v, rtol=1e-14)
def test_extremes(self):
# Test extremes of alpha correspond to boxcar and hann
tuk0 = signal.tukey(100, 0)
box0 = signal.boxcar(100)
assert_array_almost_equal(tuk0, box0)
tuk1 = signal.tukey(100, 1)
han1 = signal.hann(100)
assert_array_almost_equal(tuk1, han1)
class TestGetWindow(object):
def test_boxcar(self):
w = signal.get_window('boxcar', 12)
assert_array_equal(w, np.ones_like(w))
# window is a tuple of len 1
w = signal.get_window(('boxcar',), 16)
assert_array_equal(w, np.ones_like(w))
def test_cheb_odd(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
w = signal.get_window(('chebwin', -40), 53, fftbins=False)
assert_array_almost_equal(w, cheb_odd_true, decimal=4)
def test_cheb_even(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
w = signal.get_window(('chebwin', 40), 54, fftbins=False)
assert_array_almost_equal(w, cheb_even_true, decimal=4)
def test_kaiser_float(self):
win1 = signal.get_window(7.2, 64)
win2 = signal.kaiser(64, 7.2, False)
assert_allclose(win1, win2)
def test_invalid_inputs(self):
# Window is not a float, tuple, or string
assert_raises(ValueError, signal.get_window, set('hann'), 8)
# Unknown window type error
assert_raises(ValueError, signal.get_window, 'broken', 4)
def test_array_as_window(self):
# github issue 3603
osfactor = 128
sig = np.arange(128)
win = signal.get_window(('kaiser', 8.0), osfactor // 2)
assert_raises(ValueError, signal.resample,
(sig, len(sig) * osfactor), {'window': win})
def test_windowfunc_basics():
for window_name, params in window_funcs:
window = getattr(signal, window_name)
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
# Check symmetry for odd and even lengths
w1 = window(8, *params, sym=True)
w2 = window(7, *params, sym=False)
assert_array_almost_equal(w1[:-1], w2)
w1 = window(9, *params, sym=True)
w2 = window(8, *params, sym=False)
assert_array_almost_equal(w1[:-1], w2)
# Check that functions run and output lengths are correct
assert_equal(len(window(6, *params, sym=True)), 6)
assert_equal(len(window(6, *params, sym=False)), 6)
assert_equal(len(window(7, *params, sym=True)), 7)
assert_equal(len(window(7, *params, sym=False)), 7)
# Check invalid lengths
assert_raises(ValueError, window, 5.5, *params)
assert_raises(ValueError, window, -7, *params)
# Check degenerate cases
assert_array_equal(window(0, *params, sym=True), [])
assert_array_equal(window(0, *params, sym=False), [])
assert_array_equal(window(1, *params, sym=True), [1])
assert_array_equal(window(1, *params, sym=False), [1])
# Check dtype
assert_(window(0, *params, sym=True).dtype == 'float')
assert_(window(0, *params, sym=False).dtype == 'float')
assert_(window(1, *params, sym=True).dtype == 'float')
assert_(window(1, *params, sym=False).dtype == 'float')
assert_(window(6, *params, sym=True).dtype == 'float')
assert_(window(6, *params, sym=False).dtype == 'float')
# Check normalization
assert_array_less(window(10, *params, sym=True), 1.01)
assert_array_less(window(10, *params, sym=False), 1.01)
assert_array_less(window(9, *params, sym=True), 1.01)
assert_array_less(window(9, *params, sym=False), 1.01)
# Check that DFT-even spectrum is purely real for odd and even
assert_allclose(fftpack.fft(window(10, *params, sym=False)).imag,
0, atol=1e-14)
assert_allclose(fftpack.fft(window(11, *params, sym=False)).imag,
0, atol=1e-14)
def test_needs_params():
for winstr in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'slepian', 'optimal', 'slep', 'dss', 'dpss',
'chebwin', 'cheb', 'exponential', 'poisson', 'tukey',
'tuk']:
assert_raises(ValueError, signal.get_window, winstr, 7)
| mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/scipy/signal/tests/test_windows.py | Python | mit | 23,428 | [
"Gaussian"
] | 3eb89047f6e0f6802b30760d82d01ab85d0e39332d791aa346ef4b289a59d72b |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements input and output processing from Nwchem.
2015/09/21 - Xin Chen (chenxin13@mails.tsinghua.edu.cn):
NwOutput will read new kinds of data:
1. normal hessian matrix. ["hessian"]
2. projected hessian matrix. ["projected_hessian"]
3. normal frequencies. ["normal_frequencies"]
For backward compatibility, the key for accessing the projected frequencies
is still 'frequencies'.
2015/10/12 - Xin Chen
NwOutput will read new kinds of data:
1. forces. ["forces"]
"""
import os
import re
import warnings
from string import Template
import numpy as np
from monty.io import zopen
from monty.json import MSONable
from pymatgen.analysis.excitation import ExcitationSpectrum
from pymatgen.core.structure import Molecule, Structure
from pymatgen.core.units import Energy, FloatWithUnit
NWCHEM_BASIS_LIBRARY = None
if os.environ.get("NWCHEM_BASIS_LIBRARY"):
NWCHEM_BASIS_LIBRARY = set(os.listdir(os.environ["NWCHEM_BASIS_LIBRARY"]))
class NwTask(MSONable):
"""
Base task for Nwchem.
"""
theories = {
"g3gn": "some description",
"scf": "Hartree-Fock",
"dft": "DFT",
"esp": "ESP",
"sodft": "Spin-Orbit DFT",
"mp2": "MP2 using a semi-direct algorithm",
"direct_mp2": "MP2 using a full-direct algorithm",
"rimp2": "MP2 using the RI approximation",
"ccsd": "Coupled-cluster single and double excitations",
"ccsd(t)": "Coupled-cluster linearized triples approximation",
"ccsd+t(ccsd)": "Fourth order triples contribution",
"mcscf": "Multiconfiguration SCF",
"selci": "Selected CI with perturbation correction",
"md": "Classical molecular dynamics simulation",
"pspw": "Pseudopotential plane-wave DFT for molecules and insulating solids using NWPW",
"band": "Pseudopotential plane-wave DFT for solids using NWPW",
"tce": "Tensor Contraction Engine",
"tddft": "Time Dependent DFT",
}
operations = {
"energy": "Evaluate the single point energy.",
"gradient": "Evaluate the derivative of the energy with respect to nuclear coordinates.",
"optimize": "Minimize the energy by varying the molecular structure.",
"saddle": "Conduct a search for a transition state (or saddle point).",
"hessian": "Compute second derivatives.",
"frequencies": "Compute second derivatives and print out an analysis of molecular vibrations.",
"freq": "Same as frequencies.",
"vscf": "Compute anharmonic contributions to the vibrational modes.",
"property": "Calculate the properties for the wave function.",
"dynamics": "Perform classical molecular dynamics.",
"thermodynamics": "Perform multi-configuration thermodynamic integration using classical MD.",
"": "dummy",
}
def __init__(
self,
charge,
spin_multiplicity,
basis_set,
basis_set_option="cartesian",
title=None,
theory="dft",
operation="optimize",
theory_directives=None,
alternate_directives=None,
):
"""
Very flexible arguments to support many types of potential setups.
Users should use more friendly static methods unless they need the
flexibility.
Args:
charge: Charge of the molecule. If None, charge on molecule is
used. Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
basis_set: The basis set used for the task as a dict. E.g.,
{"C": "6-311++G**", "H": "6-31++G**"}.
basis_set_option: cartesian (default) | spherical,
title: Title for the task. Defaults to None, which means a title
based on the theory and operation of the task is
autogenerated.
theory: The theory used for the task. Defaults to "dft".
operation: The operation for the task. Defaults to "optimize".
theory_directives: A dict of theory directives. For example,
if you are running dft calculations, you may specify the
exchange correlation functional using {"xc": "b3lyp"}.
alternate_directives: A dict of alternate directives. For
example, to perform cosmo calculations and dielectric
constant of 78, you'd supply {'cosmo': {"dielectric": 78}}.
"""
# Basic checks.
if theory.lower() not in NwTask.theories.keys():
raise NwInputError(f"Invalid theory {theory}")
if operation.lower() not in NwTask.operations.keys():
raise NwInputError(f"Invalid operation {operation}")
self.charge = charge
self.spin_multiplicity = spin_multiplicity
self.title = title if title is not None else f"{theory} {operation}"
self.theory = theory
self.basis_set = basis_set or {}
if NWCHEM_BASIS_LIBRARY is not None:
for b in set(self.basis_set.values()):
if re.sub(r"\*", "s", b.lower()) not in NWCHEM_BASIS_LIBRARY:
warnings.warn(f"Basis set {b} not in in NWCHEM_BASIS_LIBRARY")
self.basis_set_option = basis_set_option
self.operation = operation
self.theory_directives = theory_directives or {}
self.alternate_directives = alternate_directives or {}
def __str__(self):
bset_spec = []
for el, bset in sorted(self.basis_set.items(), key=lambda x: x[0]):
bset_spec.append(f' {el} library "{bset}"')
theory_spec = []
if self.theory_directives:
theory_spec.append(f"{self.theory}")
for k in sorted(self.theory_directives.keys()):
theory_spec.append(f" {k} {self.theory_directives[k]}")
theory_spec.append("end")
for k in sorted(self.alternate_directives.keys()):
theory_spec.append(k)
for k2 in sorted(self.alternate_directives[k].keys()):
theory_spec.append(f" {k2} {self.alternate_directives[k][k2]}")
theory_spec.append("end")
t = Template(
"""title "$title"
charge $charge
basis $basis_set_option
$bset_spec
end
$theory_spec
"""
)
output = t.substitute(
title=self.title,
charge=int(self.charge),
spinmult=self.spin_multiplicity,
basis_set_option=self.basis_set_option,
bset_spec="\n".join(bset_spec),
theory_spec="\n".join(theory_spec),
theory=self.theory,
)
if self.operation is not None:
output += f"task {self.theory} {self.operation}"
return output
def as_dict(self):
"""
Returns: MSONable dict.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"title": self.title,
"theory": self.theory,
"operation": self.operation,
"basis_set": self.basis_set,
"basis_set_option": self.basis_set_option,
"theory_directives": self.theory_directives,
"alternate_directives": self.alternate_directives,
}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
NwTask
"""
return NwTask(
charge=d["charge"],
spin_multiplicity=d["spin_multiplicity"],
title=d["title"],
theory=d["theory"],
operation=d["operation"],
basis_set=d["basis_set"],
basis_set_option=d["basis_set_option"],
theory_directives=d["theory_directives"],
alternate_directives=d["alternate_directives"],
)
@classmethod
def from_molecule(
cls,
mol,
theory,
charge=None,
spin_multiplicity=None,
basis_set="6-31g",
basis_set_option="cartesian",
title=None,
operation="optimize",
theory_directives=None,
alternate_directives=None,
):
"""
Very flexible arguments to support many types of potential setups.
Users should use more friendly static methods unless they need the
flexibility.
Args:
mol: Input molecule
charge: Charge of the molecule. If None, charge on molecule is
used. Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
basis_set: The basis set to be used as string or a dict. E.g.,
{"C": "6-311++G**", "H": "6-31++G**"} or "6-31G". If string,
same basis set is used for all elements.
basis_set_option: cartesian (default) | spherical,
title: Title for the task. Defaults to None, which means a title
based on the theory and operation of the task is
autogenerated.
theory: The theory used for the task. Defaults to "dft".
operation: The operation for the task. Defaults to "optimize".
theory_directives: A dict of theory directives. For example,
if you are running dft calculations, you may specify the
exchange correlation functional using {"xc": "b3lyp"}.
alternate_directives: A dict of alternate directives. For
example, to perform cosmo calculations with DFT, you'd supply
{'cosmo': "cosmo"}.
"""
title = title if title is not None else "{} {} {}".format(re.sub(r"\s", "", mol.formula), theory, operation)
charge = charge if charge is not None else mol.charge
nelectrons = -charge + mol.charge + mol.nelectrons # pylint: disable=E1130
if spin_multiplicity is not None:
spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(charge, spin_multiplicity)
)
elif charge == mol.charge:
spin_multiplicity = mol.spin_multiplicity
else:
spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
elements = set(mol.composition.get_el_amt_dict().keys())
if isinstance(basis_set, str):
basis_set = {el: basis_set for el in elements}
basis_set_option = basis_set_option
return NwTask(
charge,
spin_multiplicity,
basis_set,
basis_set_option=basis_set_option,
title=title,
theory=theory,
operation=operation,
theory_directives=theory_directives,
alternate_directives=alternate_directives,
)
@classmethod
def dft_task(cls, mol, xc="b3lyp", **kwargs):
"""
A class method for quickly creating DFT tasks with optional
cosmo parameter .
Args:
mol: Input molecule
xc: Exchange correlation to use.
kwargs: Any of the other kwargs supported by NwTask. Note the
theory is always "dft" for a dft task.
"""
t = NwTask.from_molecule(mol, theory="dft", **kwargs)
t.theory_directives.update({"xc": xc, "mult": t.spin_multiplicity})
return t
@classmethod
def esp_task(cls, mol, **kwargs):
"""
A class method for quickly creating ESP tasks with RESP
charge fitting.
Args:
mol: Input molecule
kwargs: Any of the other kwargs supported by NwTask. Note the
theory is always "dft" for a dft task.
"""
return NwTask.from_molecule(mol, theory="esp", **kwargs)
class NwInput(MSONable):
"""
An object representing a Nwchem input file, which is essentially a list
of tasks on a particular molecule.
"""
def __init__(
self,
mol,
tasks,
directives=None,
geometry_options=("units", "angstroms"),
symmetry_options=None,
memory_options=None,
):
"""
Args:
mol: Input molecule. If molecule is a single string, it is used as a
direct input to the geometry section of the Gaussian input
file.
tasks: List of NwTasks.
directives: List of root level directives as tuple. E.g.,
[("start", "water"), ("print", "high")]
geometry_options: Additional list of options to be supplied to the
geometry. E.g., ["units", "angstroms", "noautoz"]. Defaults to
("units", "angstroms").
symmetry_options: Addition list of option to be supplied to the
symmetry. E.g. ["c1"] to turn off the symmetry
memory_options: Memory controlling options. str.
E.g "total 1000 mb stack 400 mb"
"""
self._mol = mol
self.directives = directives if directives is not None else []
self.tasks = tasks
self.geometry_options = geometry_options
self.symmetry_options = symmetry_options
self.memory_options = memory_options
@property
def molecule(self):
"""
Returns molecule associated with this GaussianInput.
"""
return self._mol
def __str__(self):
o = []
if self.memory_options:
o.append("memory " + self.memory_options)
for d in self.directives:
o.append(f"{d[0]} {d[1]}")
o.append("geometry " + " ".join(self.geometry_options))
if self.symmetry_options:
o.append(" symmetry " + " ".join(self.symmetry_options))
for site in self._mol:
o.append(f" {site.specie.symbol} {site.x} {site.y} {site.z}")
o.append("end\n")
for t in self.tasks:
o.append(str(t))
o.append("")
return "\n".join(o)
def write_file(self, filename):
"""
Args:
filename (str): Filename
"""
with zopen(filename, "w") as f:
f.write(self.__str__())
def as_dict(self):
"""
Returns: MSONable dict
"""
return {
"mol": self._mol.as_dict(),
"tasks": [t.as_dict() for t in self.tasks],
"directives": [list(t) for t in self.directives],
"geometry_options": list(self.geometry_options),
"symmetry_options": self.symmetry_options,
"memory_options": self.memory_options,
}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
NwInput
"""
return NwInput(
Molecule.from_dict(d["mol"]),
tasks=[NwTask.from_dict(dt) for dt in d["tasks"]],
directives=[tuple(li) for li in d["directives"]],
geometry_options=d["geometry_options"],
symmetry_options=d["symmetry_options"],
memory_options=d["memory_options"],
)
@classmethod
def from_string(cls, string_input):
"""
Read an NwInput from a string. Currently tested to work with
files generated from this class itself.
Args:
string_input: string_input to parse.
Returns:
NwInput object
"""
directives = []
tasks = []
charge = None
spin_multiplicity = None
title = None
basis_set = None
basis_set_option = None
theory_directives = {}
geom_options = None
symmetry_options = None
memory_options = None
lines = string_input.strip().split("\n")
while len(lines) > 0:
l = lines.pop(0).strip()
if l == "":
continue
toks = l.split()
if toks[0].lower() == "geometry":
geom_options = toks[1:]
l = lines.pop(0).strip()
toks = l.split()
if toks[0].lower() == "symmetry":
symmetry_options = toks[1:]
l = lines.pop(0).strip()
# Parse geometry
species = []
coords = []
while l.lower() != "end":
toks = l.split()
species.append(toks[0])
coords.append([float(i) for i in toks[1:]])
l = lines.pop(0).strip()
mol = Molecule(species, coords)
elif toks[0].lower() == "charge":
charge = int(toks[1])
elif toks[0].lower() == "title":
title = l[5:].strip().strip('"')
elif toks[0].lower() == "basis":
# Parse basis sets
l = lines.pop(0).strip()
basis_set = {}
while l.lower() != "end":
toks = l.split()
basis_set[toks[0]] = toks[-1].strip('"')
l = lines.pop(0).strip()
elif toks[0].lower() in NwTask.theories:
# read the basis_set_option
if len(toks) > 1:
basis_set_option = toks[1]
# Parse theory directives.
theory = toks[0].lower()
l = lines.pop(0).strip()
theory_directives[theory] = {}
while l.lower() != "end":
toks = l.split()
theory_directives[theory][toks[0]] = toks[-1]
if toks[0] == "mult":
spin_multiplicity = float(toks[1])
l = lines.pop(0).strip()
elif toks[0].lower() == "task":
tasks.append(
NwTask(
charge=charge,
spin_multiplicity=spin_multiplicity,
title=title,
theory=toks[1],
operation=toks[2],
basis_set=basis_set,
basis_set_option=basis_set_option,
theory_directives=theory_directives.get(toks[1]),
)
)
elif toks[0].lower() == "memory":
memory_options = " ".join(toks[1:])
else:
directives.append(l.strip().split())
return NwInput(
mol,
tasks=tasks,
directives=directives,
geometry_options=geom_options,
symmetry_options=symmetry_options,
memory_options=memory_options,
)
@classmethod
def from_file(cls, filename):
"""
Read an NwInput from a file. Currently tested to work with
files generated from this class itself.
Args:
filename: Filename to parse.
Returns:
NwInput object
"""
with zopen(filename) as f:
return cls.from_string(f.read())
class NwInputError(Exception):
"""
Error class for NwInput.
"""
class NwOutput:
"""
A Nwchem output file parser. Very basic for now - supports only dft and
only parses energies and geometries. Please note that Nwchem typically
outputs energies in either au or kJ/mol. All energies are converted to
eV in the parser.
"""
def __init__(self, filename):
"""
Args:
filename: Filename to read.
"""
self.filename = filename
with zopen(filename) as f:
data = f.read()
chunks = re.split(r"NWChem Input Module", data)
if re.search(r"CITATION", chunks[-1]):
chunks.pop()
preamble = chunks.pop(0)
self.raw = data
self.job_info = self._parse_preamble(preamble)
self.data = [self._parse_job(c) for c in chunks]
def parse_tddft(self):
"""
Parses TDDFT roots. Adapted from nw_spectrum.py script.
Returns:
{
"singlet": [
{
"energy": float,
"osc_strength: float
}
],
"triplet": [
{
"energy": float
}
]
}
"""
start_tag = "Convergence criterion met"
end_tag = "Excited state energy"
singlet_tag = "singlet excited"
triplet_tag = "triplet excited"
state = "singlet"
inside = False # true when we are inside output block
lines = self.raw.split("\n")
roots = {"singlet": [], "triplet": []}
while lines:
line = lines.pop(0).strip()
if start_tag in line:
inside = True
elif end_tag in line:
inside = False
elif singlet_tag in line:
state = "singlet"
elif triplet_tag in line:
state = "triplet"
elif inside and "Root" in line and "eV" in line:
toks = line.split()
roots[state].append({"energy": float(toks[-2])})
elif inside and "Dipole Oscillator Strength" in line:
osc = float(line.split()[-1])
roots[state][-1]["osc_strength"] = osc
return roots
def get_excitation_spectrum(self, width=0.1, npoints=2000):
"""
Generate an excitation spectra from the singlet roots of TDDFT
calculations.
Args:
width (float): Width for Gaussian smearing.
npoints (int): Number of energy points. More points => smoother
curve.
Returns:
(ExcitationSpectrum) which can be plotted using
pymatgen.vis.plotters.SpectrumPlotter.
"""
roots = self.parse_tddft()
data = roots["singlet"]
en = np.array([d["energy"] for d in data])
osc = np.array([d["osc_strength"] for d in data])
epad = 20.0 * width
emin = en[0] - epad
emax = en[-1] + epad
de = (emax - emin) / npoints
# Use width of at least two grid points
if width < 2 * de:
width = 2 * de
energies = [emin + ie * de for ie in range(npoints)]
cutoff = 20.0 * width
gamma = 0.5 * width
gamma_sqrd = gamma * gamma
de = (energies[-1] - energies[0]) / (len(energies) - 1)
prefac = gamma / np.pi * de
x = []
y = []
for energy in energies:
xx0 = energy - en
stot = osc / (xx0 * xx0 + gamma_sqrd)
t = np.sum(stot[np.abs(xx0) <= cutoff])
x.append(energy)
y.append(t * prefac)
return ExcitationSpectrum(x, y)
@staticmethod
def _parse_preamble(preamble):
info = {}
for l in preamble.split("\n"):
toks = l.split("=")
if len(toks) > 1:
info[toks[0].strip()] = toks[-1].strip()
return info
def __iter__(self):
return self.data.__iter__()
def __getitem__(self, ind):
return self.data[ind]
def __len__(self):
return len(self.data)
@staticmethod
def _parse_job(output):
energy_patt = re.compile(r"Total \w+ energy\s+=\s+([.\-\d]+)")
energy_gas_patt = re.compile(r"gas phase energy\s+=\s+([.\-\d]+)")
energy_sol_patt = re.compile(r"sol phase energy\s+=\s+([.\-\d]+)")
coord_patt = re.compile(r"\d+\s+(\w+)\s+[.\-\d]+\s+([.\-\d]+)\s+" r"([.\-\d]+)\s+([.\-\d]+)")
lat_vector_patt = re.compile(r"a[123]=<\s+([.\-\d]+)\s+" r"([.\-\d]+)\s+([.\-\d]+)\s+>")
corrections_patt = re.compile(r"([\w\-]+ correction to \w+)\s+=" r"\s+([.\-\d]+)")
preamble_patt = re.compile(
r"(No. of atoms|No. of electrons" r"|SCF calculation type|Charge|Spin " r"multiplicity)\s*:\s*(\S+)"
)
force_patt = re.compile(r"\s+(\d+)\s+(\w+)" + 6 * r"\s+([0-9\.\-]+)")
time_patt = re.compile(r"\s+ Task \s+ times \s+ cpu: \s+ ([.\d]+)s .+ ", re.VERBOSE)
error_defs = {
"calculations not reaching convergence": "Bad convergence",
"Calculation failed to converge": "Bad convergence",
"geom_binvr: #indep variables incorrect": "autoz error",
"dft optimize failed": "Geometry optimization failed",
}
def fort2py(x):
return x.replace("D", "e")
def isfloatstring(s):
return s.find(".") == -1
parse_hess = False
parse_proj_hess = False
hessian = None
projected_hessian = None
parse_force = False
all_forces = []
forces = []
data = {}
energies = []
frequencies = None
normal_frequencies = None
corrections = {}
molecules = []
structures = []
species = []
coords = []
lattice = []
errors = []
basis_set = {}
bset_header = []
parse_geom = False
parse_freq = False
parse_bset = False
parse_projected_freq = False
job_type = ""
parse_time = False
time = 0
for l in output.split("\n"):
# pylint: disable=E1136
for e, v in error_defs.items():
if l.find(e) != -1:
errors.append(v)
if parse_time:
m = time_patt.search(l)
if m:
time = m.group(1)
parse_time = False
if parse_geom:
if l.strip() == "Atomic Mass":
if lattice:
structures.append(Structure(lattice, species, coords, coords_are_cartesian=True))
else:
molecules.append(Molecule(species, coords))
species = []
coords = []
lattice = []
parse_geom = False
else:
m = coord_patt.search(l)
if m:
species.append(m.group(1).capitalize())
coords.append([float(m.group(2)), float(m.group(3)), float(m.group(4))])
m = lat_vector_patt.search(l)
if m:
lattice.append([float(m.group(1)), float(m.group(2)), float(m.group(3))])
if parse_force:
m = force_patt.search(l)
if m:
forces.extend(map(float, m.groups()[5:]))
elif len(forces) > 0:
all_forces.append(forces)
forces = []
parse_force = False
elif parse_freq:
if len(l.strip()) == 0:
if len(normal_frequencies[-1][1]) == 0:
continue
parse_freq = False
else:
vibs = [float(vib) for vib in l.strip().split()[1:]]
num_vibs = len(vibs)
for mode, dis in zip(normal_frequencies[-num_vibs:], vibs):
mode[1].append(dis)
elif parse_projected_freq:
if len(l.strip()) == 0:
if len(frequencies[-1][1]) == 0:
continue
parse_projected_freq = False
else:
vibs = [float(vib) for vib in l.strip().split()[1:]]
num_vibs = len(vibs)
for mode, dis in zip(frequencies[-num_vibs:], vibs):
mode[1].append(dis)
elif parse_bset:
if l.strip() == "":
parse_bset = False
else:
toks = l.split()
if toks[0] != "Tag" and not re.match(r"-+", toks[0]):
basis_set[toks[0]] = dict(zip(bset_header[1:], toks[1:]))
elif toks[0] == "Tag":
bset_header = toks
bset_header.pop(4)
bset_header = [h.lower() for h in bset_header]
elif parse_hess:
if l.strip() == "":
continue
if len(hessian) > 0 and l.find("----------") != -1:
parse_hess = False
continue
toks = l.strip().split()
if len(toks) > 1:
try:
row = int(toks[0])
except Exception:
continue
if isfloatstring(toks[1]):
continue
vals = [float(fort2py(x)) for x in toks[1:]]
if len(hessian) < row:
hessian.append(vals)
else:
hessian[row - 1].extend(vals)
elif parse_proj_hess:
if l.strip() == "":
continue
nat3 = len(hessian)
toks = l.strip().split()
if len(toks) > 1:
try:
row = int(toks[0])
except Exception:
continue
if isfloatstring(toks[1]):
continue
vals = [float(fort2py(x)) for x in toks[1:]]
if len(projected_hessian) < row:
projected_hessian.append(vals)
else:
projected_hessian[row - 1].extend(vals)
if len(projected_hessian[-1]) == nat3:
parse_proj_hess = False
else:
m = energy_patt.search(l)
if m:
energies.append(Energy(m.group(1), "Ha").to("eV"))
parse_time = True
continue
m = energy_gas_patt.search(l)
if m:
cosmo_scf_energy = energies[-1]
energies[-1] = {}
energies[-1].update({"cosmo scf": cosmo_scf_energy})
energies[-1].update({"gas phase": Energy(m.group(1), "Ha").to("eV")})
m = energy_sol_patt.search(l)
if m:
energies[-1].update({"sol phase": Energy(m.group(1), "Ha").to("eV")})
m = preamble_patt.search(l)
if m:
try:
val = int(m.group(2))
except ValueError:
val = m.group(2)
k = m.group(1).replace("No. of ", "n").replace(" ", "_")
data[k.lower()] = val
elif l.find('Geometry "geometry"') != -1:
parse_geom = True
elif l.find('Summary of "ao basis"') != -1:
parse_bset = True
elif l.find("P.Frequency") != -1:
parse_projected_freq = True
if frequencies is None:
frequencies = []
toks = l.strip().split()[1:]
frequencies.extend([(float(freq), []) for freq in toks])
elif l.find("Frequency") != -1:
toks = l.strip().split()
if len(toks) > 1 and toks[0] == "Frequency":
parse_freq = True
if normal_frequencies is None:
normal_frequencies = []
normal_frequencies.extend([(float(freq), []) for freq in l.strip().split()[1:]])
elif l.find("MASS-WEIGHTED NUCLEAR HESSIAN") != -1:
parse_hess = True
if not hessian:
hessian = []
elif l.find("MASS-WEIGHTED PROJECTED HESSIAN") != -1:
parse_proj_hess = True
if not projected_hessian:
projected_hessian = []
elif l.find("atom coordinates gradient") != -1:
parse_force = True
elif job_type == "" and l.strip().startswith("NWChem"):
job_type = l.strip()
if job_type == "NWChem DFT Module" and "COSMO solvation results" in output:
job_type += " COSMO"
else:
m = corrections_patt.search(l)
if m:
corrections[m.group(1)] = FloatWithUnit(m.group(2), "kJ mol^-1").to("eV atom^-1")
if frequencies:
for freq, mode in frequencies:
mode[:] = zip(*[iter(mode)] * 3)
if normal_frequencies:
for freq, mode in normal_frequencies:
mode[:] = zip(*[iter(mode)] * 3)
if hessian:
n = len(hessian)
for i in range(n):
for j in range(i + 1, n):
hessian[i].append(hessian[j][i])
if projected_hessian:
n = len(projected_hessian)
for i in range(n):
for j in range(i + 1, n):
projected_hessian[i].append(projected_hessian[j][i])
data.update(
{
"job_type": job_type,
"energies": energies,
"corrections": corrections,
"molecules": molecules,
"structures": structures,
"basis_set": basis_set,
"errors": errors,
"has_error": len(errors) > 0,
"frequencies": frequencies,
"normal_frequencies": normal_frequencies,
"hessian": hessian,
"projected_hessian": projected_hessian,
"forces": all_forces,
"task_time": time,
}
)
return data
| materialsproject/pymatgen | pymatgen/io/nwchem.py | Python | mit | 35,100 | [
"Gaussian",
"NWChem",
"pymatgen"
] | 1ecd5815e8ec68d22de2915c7fda9794307d9f5481bdc6dafdfd16bddbf41347 |
#!/usr/bin/env python
"""Convert a BLAST XML file to tabular output.
Takes three command line options, input BLAST XML filename, output tabular
BLAST filename, output format (std for standard 12 columns, or ext for the
extended 24 columns offered in the BLAST+ wrappers).
The 12 columns output are 'qseqid sseqid pident length mismatch gapopen qstart
qend sstart send evalue bitscore' or 'std' at the BLAST+ command line, which
mean:
====== ========= ============================================
Column NCBI name Description
------ --------- --------------------------------------------
1 qseqid Query Seq-id (ID of your sequence)
2 sseqid Subject Seq-id (ID of the database hit)
3 pident Percentage of identical matches
4 length Alignment length
5 mismatch Number of mismatches
6 gapopen Number of gap openings
7 qstart Start of alignment in query
8 qend End of alignment in query
9 sstart Start of alignment in subject (database hit)
10 send End of alignment in subject (database hit)
11 evalue Expectation value (E-value)
12 bitscore Bit score
====== ========= ============================================
The additional columns offered in the Galaxy BLAST+ wrappers are:
====== ============= ===========================================
Column NCBI name Description
------ ------------- -------------------------------------------
13 sallseqid All subject Seq-id(s), separated by ';'
14 score Raw score
15 nident Number of identical matches
16 positive Number of positive-scoring matches
17 gaps Total number of gaps
18 ppos Percentage of positive-scoring matches
19 qframe Query frame
20 sframe Subject frame
21 qseq Aligned part of query sequence
22 sseq Aligned part of subject sequence
23 qlen Query sequence length
24 slen Subject sequence length
25 salltitles All subject titles, separated by '<>'
====== ============= ===========================================
Most of these fields are given explicitly in the XML file, others some like
the percentage identity and the number of gap openings must be calculated.
Be aware that the sequence in the extended tabular output or XML direct from
BLAST+ may or may not use XXXX masking on regions of low complexity. This
can throw the off the calculation of percentage identity and gap openings.
[In fact, both BLAST 2.2.24+ and 2.2.25+ have a subtle bug in this regard,
with these numbers changing depending on whether or not the low complexity
filter is used.]
This script attempts to produce identical output to what BLAST+ would have done.
However, check this with "diff -b ..." since BLAST+ sometimes includes an extra
space character (probably a bug).
"""
import sys
import re
import os
from optparse import OptionParser
if "-v" in sys.argv or "--version" in sys.argv:
print "v0.1.01"
sys.exit(0)
if sys.version_info[:2] >= ( 2, 5 ):
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree as ElementTree
else:
from galaxy import eggs
import pkg_resources; pkg_resources.require( "elementtree" )
from elementtree import ElementTree
def stop_err( msg ):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
if len(sys.argv) == 4 and sys.argv[3] in ["std", "x22", "ext"]:
#False positive if user really has a BLAST XML file called 'std' or 'ext'...
stop_err("""ERROR: The script API has changed, sorry.
Instead of the old style:
$ python blastxml_to_tabular.py input.xml output.tabular std
Please use:
$ python blastxml_to_tabular.py -o output.tabular -c std input.xml
For more information, use:
$ python blastxml_to_tabular.py -h
""")
usage = """usage: %prog [options] blastxml[,...]
Convert one (or more) BLAST XML files into a single tabular file.
The columns option can be 'std' (standard 12 columns), 'ext'
(extended 25 columns), or a list of BLAST+ column names like
'qseqid,sseqid,pident' (space or comma separated).
"""
parser = OptionParser(usage=usage)
parser.add_option('-o', '--output', dest='output', default=None, help='output filename (defaults to stdout)', metavar="FILE")
parser.add_option("-c", "--columns", dest="columns", default='std', help="[std|ext|col1,col2,...] standard 12 columns, extended 25 columns, or list of column names")
(options, args) = parser.parse_args()
colnames = 'qseqid,sseqid,pident,length,mismatch,gapopen,qstart,qend,sstart,send,evalue,bitscore,sallseqid,score,nident,positive,gaps,ppos,qframe,sframe,qseq,sseq,qlen,slen,salltitles'.split(',')
if len(args) < 1:
stop_err("ERROR: No BLASTXML input files given; run with --help to see options.")
out_fmt = options.columns
if out_fmt == "std":
extended = False
cols = None
elif out_fmt == "x22":
stop_err("Format argument x22 has been replaced with ext (extended 25 columns)")
elif out_fmt == "ext":
extended = True
cols = None
else:
cols = out_fmt.replace(" ", ",").split(",") #Allow space or comma separated
#Remove any blank entries due to trailing comma,
#or annoying "None" dummy value from Galaxy if no columns
cols = [c for c in cols if c and c != "None"]
extra = set(cols).difference(colnames)
if extra:
stop_err("These are not recognised column names: %s" % ",".join(sorted(extra)))
del extra
assert set(colnames).issuperset(cols), cols
if not cols:
stop_err("No columns selected!")
extended = max(colnames.index(c) for c in cols) >= 12 #Do we need any higher columns?
del out_fmt
for in_file in args:
if not os.path.isfile(in_file):
stop_err("Input BLAST XML file not found: %s" % in_file)
re_default_query_id = re.compile("^Query_\d+$")
assert re_default_query_id.match("Query_101")
assert not re_default_query_id.match("Query_101a")
assert not re_default_query_id.match("MyQuery_101")
re_default_subject_id = re.compile("^Subject_\d+$")
assert re_default_subject_id.match("Subject_1")
assert not re_default_subject_id.match("Subject_")
assert not re_default_subject_id.match("Subject_12a")
assert not re_default_subject_id.match("TheSubject_1")
def convert(blastxml_filename, output_handle):
blast_program = None
# get an iterable
try:
context = ElementTree.iterparse(in_file, events=("start", "end"))
except:
stop_err("Invalid data format.")
# turn it into an iterator
context = iter(context)
# get the root element
try:
event, root = context.next()
except:
stop_err( "Invalid data format." )
for event, elem in context:
if event == "end" and elem.tag == "BlastOutput_program":
blast_program = elem.text
# for every <Iteration> tag
if event == "end" and elem.tag == "Iteration":
#Expecting either this, from BLAST 2.2.25+ using FASTA vs FASTA
# <Iteration_query-ID>sp|Q9BS26|ERP44_HUMAN</Iteration_query-ID>
# <Iteration_query-def>Endoplasmic reticulum resident protein 44 OS=Homo sapiens GN=ERP44 PE=1 SV=1</Iteration_query-def>
# <Iteration_query-len>406</Iteration_query-len>
# <Iteration_hits></Iteration_hits>
#
#Or, from BLAST 2.2.24+ run online
# <Iteration_query-ID>Query_1</Iteration_query-ID>
# <Iteration_query-def>Sample</Iteration_query-def>
# <Iteration_query-len>516</Iteration_query-len>
# <Iteration_hits>...
qseqid = elem.findtext("Iteration_query-ID")
if re_default_query_id.match(qseqid):
#Place holder ID, take the first word of the query definition
qseqid = elem.findtext("Iteration_query-def").split(None,1)[0]
qlen = int(elem.findtext("Iteration_query-len"))
# for every <Hit> within <Iteration>
for hit in elem.findall("Iteration_hits/Hit"):
#Expecting either this,
# <Hit_id>gi|3024260|sp|P56514.1|OPSD_BUFBU</Hit_id>
# <Hit_def>RecName: Full=Rhodopsin</Hit_def>
# <Hit_accession>P56514</Hit_accession>
#or,
# <Hit_id>Subject_1</Hit_id>
# <Hit_def>gi|57163783|ref|NP_001009242.1| rhodopsin [Felis catus]</Hit_def>
# <Hit_accession>Subject_1</Hit_accession>
#
#apparently depending on the parse_deflines switch
#
#Or, with a local database not using -parse_seqids can get this,
# <Hit_id>gnl|BL_ORD_ID|2</Hit_id>
# <Hit_def>chrIII gi|240255695|ref|NC_003074.8| Arabidopsis thaliana chromosome 3, complete sequence</Hit_def>
# <Hit_accession>2</Hit_accession>
sseqid = hit.findtext("Hit_id").split(None,1)[0]
hit_def = sseqid + " " + hit.findtext("Hit_def")
if re_default_subject_id.match(sseqid) \
and sseqid == hit.findtext("Hit_accession"):
#Place holder ID, take the first word of the subject definition
hit_def = hit.findtext("Hit_def")
sseqid = hit_def.split(None,1)[0]
if sseqid.startswith("gnl|BL_ORD_ID|") \
and sseqid == "gnl|BL_ORD_ID|" + hit.findtext("Hit_accession"):
#Alternative place holder ID, again take the first word of hit_def
hit_def = hit.findtext("Hit_def")
sseqid = hit_def.split(None,1)[0]
# for every <Hsp> within <Hit>
for hsp in hit.findall("Hit_hsps/Hsp"):
nident = hsp.findtext("Hsp_identity")
length = hsp.findtext("Hsp_align-len")
pident = "%0.2f" % (100*float(nident)/float(length))
q_seq = hsp.findtext("Hsp_qseq")
h_seq = hsp.findtext("Hsp_hseq")
m_seq = hsp.findtext("Hsp_midline")
assert len(q_seq) == len(h_seq) == len(m_seq) == int(length)
gapopen = str(len(q_seq.replace('-', ' ').split())-1 + \
len(h_seq.replace('-', ' ').split())-1)
mismatch = m_seq.count(' ') + m_seq.count('+') \
- q_seq.count('-') - h_seq.count('-')
#TODO - Remove this alternative mismatch calculation and test
#once satisifed there are no problems
expected_mismatch = len(q_seq) \
- sum(1 for q,h in zip(q_seq, h_seq) \
if q == h or q == "-" or h == "-")
xx = sum(1 for q,h in zip(q_seq, h_seq) if q=="X" and h=="X")
if not (expected_mismatch - q_seq.count("X") <= int(mismatch) <= expected_mismatch + xx):
stop_err("%s vs %s mismatches, expected %i <= %i <= %i" \
% (qseqid, sseqid, expected_mismatch - q_seq.count("X"),
int(mismatch), expected_mismatch))
#TODO - Remove this alternative identity calculation and test
#once satisifed there are no problems
expected_identity = sum(1 for q,h in zip(q_seq, h_seq) if q == h)
if not (expected_identity - xx <= int(nident) <= expected_identity + q_seq.count("X")):
stop_err("%s vs %s identities, expected %i <= %i <= %i" \
% (qseqid, sseqid, expected_identity, int(nident),
expected_identity + q_seq.count("X")))
evalue = hsp.findtext("Hsp_evalue")
if evalue == "0":
evalue = "0.0"
else:
evalue = "%0.0e" % float(evalue)
bitscore = float(hsp.findtext("Hsp_bit-score"))
if bitscore < 100:
#Seems to show one decimal place for lower scores
bitscore = "%0.1f" % bitscore
else:
#Note BLAST does not round to nearest int, it truncates
bitscore = "%i" % bitscore
values = [qseqid,
sseqid,
pident,
length, #hsp.findtext("Hsp_align-len")
str(mismatch),
gapopen,
hsp.findtext("Hsp_query-from"), #qstart,
hsp.findtext("Hsp_query-to"), #qend,
hsp.findtext("Hsp_hit-from"), #sstart,
hsp.findtext("Hsp_hit-to"), #send,
evalue, #hsp.findtext("Hsp_evalue") in scientific notation
bitscore, #hsp.findtext("Hsp_bit-score") rounded
]
if extended:
try:
sallseqid = ";".join(name.split(None,1)[0] for name in hit_def.split(" >"))
salltitles = "<>".join(name.split(None,1)[1] for name in hit_def.split(" >"))
except IndexError as e:
stop_err("Problem splitting multuple hits?\n%r\n--> %s" % (hit_def, e))
#print hit_def, "-->", sallseqid
positive = hsp.findtext("Hsp_positive")
ppos = "%0.2f" % (100*float(positive)/float(length))
qframe = hsp.findtext("Hsp_query-frame")
sframe = hsp.findtext("Hsp_hit-frame")
if blast_program == "blastp":
#Probably a bug in BLASTP that they use 0 or 1 depending on format
if qframe == "0": qframe = "1"
if sframe == "0": sframe = "1"
slen = int(hit.findtext("Hit_len"))
values.extend([sallseqid,
hsp.findtext("Hsp_score"), #score,
nident,
positive,
hsp.findtext("Hsp_gaps"), #gaps,
ppos,
qframe,
sframe,
#NOTE - for blastp, XML shows original seq, tabular uses XXX masking
q_seq,
h_seq,
str(qlen),
str(slen),
salltitles,
])
if cols:
#Only a subset of the columns are needed
values = [values[colnames.index(c)] for c in cols]
#print "\t".join(values)
outfile.write("\t".join(values) + "\n")
# prevents ElementTree from growing large datastructure
root.clear()
elem.clear()
if options.output:
outfile = open(options.output, "w")
else:
outfile = sys.stdout
for in_file in args:
blast_program = None
convert(in_file, outfile)
if options.output:
outfile.close()
else:
#Using stdout
pass
| naumenko-sa/bioscripts | blast/blast.xml2flat1.py | Python | mit | 15,755 | [
"BLAST",
"Galaxy"
] | 4e6d47d9a0b3293f9e239cba6e758d3287ce9990a15164592a1a18b2e2a4f176 |
from sympy import *
import sympy.polynomials
import random
class NonSquareMatrixException(Exception):
pass
class ShapeError(ValueError):
"""Wrong matrix shape"""
pass
class Matrix(object):
def __init__(self, *args):
"""
Matrix can be constructed with values or a rule.
>>> from sympy import *
>>> Matrix( (1,2+I), (3,4) ) #doctest:+NORMALIZE_WHITESPACE
1 2 + I
3 4
>>> Matrix(2, 2, lambda i,j: (i+1)*j ) #doctest:+NORMALIZE_WHITESPACE
0 1
0 2
Note: in SymPy we count indices from 0. The rule however counts from 1.
"""
if len(args) == 3 and callable(args[2]):
operation = args[2]
assert isinstance(args[0], int) and isinstance(args[1], int)
self.lines = args[0]
self.cols = args[1]
self.mat = []
for i in range(self.lines):
for j in range(self.cols):
self.mat.append(Basic.sympify(operation(i, j)))
elif len(args)==3 and isinstance(args[0],int) and \
isinstance(args[1],int) and isinstance(args[2], (list, tuple)):
self.lines=args[0]
self.cols=args[1]
mat = args[2]
self.mat=[]
for j in range(self.lines):
for i in range(self.cols):
self.mat.append(Basic.sympify(mat[j*self.cols+i]))
else:
if len(args) == 1:
mat = args[0]
else:
mat = args
if not isinstance(mat[0], (list, tuple)):
# make each element a singleton
mat = [ [element] for element in mat ]
self.lines=len(mat)
self.cols=len(mat[0])
self.mat=[]
for j in range(self.lines):
assert len(mat[j])==self.cols
for i in range(self.cols):
self.mat.append(Basic.sympify(mat[j][i]))
def key2ij(self,key):
"""Converts key=(4,6) to 4,6 and ensures the key is correct."""
if not (isinstance(key,(list, tuple)) and len(key) == 2):
raise TypeError("wrong syntax: a[%s]. Use a[i,j] or a[(i,j)]"
%repr(key))
i,j=key
if not (i>=0 and i<self.lines and j>=0 and j < self.cols):
print self.lines, " ", self.cols
raise IndexError("Index out of range: a[%s]"%repr(key))
return i,j
def __getattr__(self,name):
"""
>>> from sympy import *
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
1 2 + I
3 4
>>> m.T #doctest: +NORMALIZE_WHITESPACE
1 3
2 + I 4
>>> m.H #doctest: +NORMALIZE_WHITESPACE
1 3
2 - I 4
"""
if name == "T":
#transposition
out = Matrix(self.cols,self.lines, lambda i,j: self[j,i])
return out
if name == "C":
#by-element conjugation
out = Matrix(self.lines,self.cols,
lambda i,j: self[i,j].conjugate())
return out
if name == "H":
#hermite conjugation
out = self.T.C
return out
if name == "D":
#dirac conjugation
from sympy.physics.matrices import mgamma
out = self.H * mgamma(0)
return out
raise AttributeError("'%s' object has no attribute '%s'"%
(self.__class__.__name__, name))
def __getitem__(self,key):
"""
>>> from sympy import *
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
1 2 + I
3 4
>>> m[1,0]
3
>>> m.H[1,0]
2 - I
"""
# row-wise decomposition of matrix
if isinstance(key, slice) or isinstance(key, int):
return self.mat[key]
# proper 2-index access
assert len(key) == 2
if isinstance(key[0], int) and isinstance(key[1], int):
i,j=self.key2ij(key)
return self.mat[i*self.cols+j]
elif isinstance(key[0], slice) or isinstance(key[1], slice):
return self.submatrix(key)
else:
raise IndexError("Index out of range: a[%s]"%repr(key))
def __setitem__(self,key,value):
"""
>>> from sympy import *
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
1 2 + I
3 4
>>> m[1,0]=9
>>> m #doctest: +NORMALIZE_WHITESPACE
1 2 + I
9 4
"""
assert len(key) == 2
if isinstance(key[0], slice) or isinstance(key[1], slice):
if isinstance(value, Matrix):
self.copyin_matrix(key, value)
if isinstance(value, (list, tuple)):
self.copyin_list(key, value)
else:
i,j=self.key2ij(key)
self.mat[i*self.cols + j] = Basic.sympify(value)
def copyin_matrix(self, key, value):
rlo, rhi = self.slice2bounds(key[0], self.lines)
clo, chi = self.slice2bounds(key[1], self.cols)
assert value.lines == rhi - rlo and value.cols == chi - clo
for i in range(value.lines):
for j in range(value.cols):
self[i+rlo, j+clo] = Basic.sympify(value[i,j])
def copyin_list(self, key, value):
assert isinstance(value, (list, tuple))
self.copyin_matrix(key, Matrix(value))
def hash(self):
"""Compute a hash every time, because the matrix elements
could change."""
return hash(self.__str__() )
@property
def shape(self):
return (self.lines, self.cols)
def __rmul__(self,a):
assert not isinstance(a,Matrix)
r=self.zeronm(self.lines,self.cols)
for i in range(self.lines):
for j in range(self.cols):
r[i,j]=a*self[i,j]
return r
def expand(self):
out = self[:,:]
out[:,:] = Matrix(self.lines, self.cols, lambda i,j: self[i,j].expand())
return out
def combine(self):
out = self[:,:]
out[:,:] = Matrix(self.lines, self.cols, lambda i,j: self[i,j].combine())
return out
def subs(self,a,b):
out = self[:,:]
out[:,:] = Matrix(self.lines, self.cols, lambda i,j: self[i,j].subs(a,b))
return out
def __sub__(self,a):
return self + (-a)
def __mul__(self,a):
if isinstance(a,Matrix):
return self.multiply(a)
out = self[:,:]
out[:,:] = Matrix(self.lines, self.cols, lambda i,j: self[i,j]*a)
return out
def __pow__(self, num):
if not self.is_square:
raise NonSquareMatrixException()
if isinstance(num, int) or isinstance(num, Integer):
n = int(num)
if n < 0:
return self.inv() ** -n # A**-2 = (A**-1)**2
a = eye(self.cols)
while n:
if n % 2:
a = a * self
n -= 1
self = self * self
n = n // 2
return a
raise NotImplementedError('Can only rise to the power of an integer for now')
def __add__(self,a):
return self.add(a)
def __div__(self,a):
return self * (Rational(1)/a)
def multiply(self,b):
"""Returns self*b """
def dotprod(a,b,i,j):
if a.cols != b.lines:
raise ShapeError()
r=0
for x in range(a.cols):
r+=a[i,x]*b[x,j]
return r.expand() # .expand() is a test
return Matrix(self.lines,b.cols, lambda i,j: dotprod(self,b,i,j))
def add(self,b):
"""Returns self+b """
assert self.lines == b.lines
assert self.cols == b.cols
out = self[:,:]
out[:,:] = Matrix(self.lines, self.cols, lambda i,j: self[i,j]+b[i,j])
return out
def __neg__(self):
return -1*self
def __eq__(self,a):
if not isinstance(a, (Matrix, Basic)):
a = Basic.sympify(a)
return self.hash() == a.hash()
def __ne__(self,a):
if not isinstance(a, (Matrix, Basic)):
a = Basic.sympify(a)
return self.hash() != a.hash()
def __repr__(self):
return str(self)
def inv(self, method="GE"):
assert self.cols==self.lines
# gaussian by default, also can be done by LU
if method == "GE":
return self.inverse_GE()
elif method == "LU":
return self.inverse_LU()
else:
raise "Inversion method unrecognized"
def __str__(self):
s="";
for i in range(self.lines):
for j in range(self.cols):
s+="%s "%repr(self[i,j]);
if i != self.lines - 1:
s+="\n"
return s
def __mathml__(self):
mml = ""
for i in range(self.lines):
mml += "<matrixrow>"
for j in range(self.cols):
mml += self[i,j].__mathml__()
mml += "</matrixrow>"
return "<matrix>" + mml + "</matrix>"
def row(self, i, f):
"""Elementary row operation using functor"""
for j in range(0, self.cols):
self[i, j] = f(self[i, j], j)
def col(self, j, f):
"""Elementary column operation using functor"""
for i in range(0, self.lines):
self[i, j] = f(self[i, j], i)
def row_swap(self, i, j):
for k in range(0, self.cols):
self[i, k], self[j, k] = self[j, k], self[i, k]
def col_swap(self, i, j):
for k in range(0, self.lines):
self[k, i], self[k, j] = self[k, j], self[k, i]
def row_del(self, i):
self.mat = self.mat[:i*self.cols] + self.mat[(i+1)*self.cols:]
self.lines -= 1
def col_del(self, i):
"""
>>> import sympy
>>> M = sympy.matrices.eye(3)
>>> M.col_del(1)
>>> M #doctest: +NORMALIZE_WHITESPACE
1 0
0 0
0 1
"""
for j in range(self.lines-1, -1, -1):
del self.mat[i+j*self.cols]
self.cols -= 1
def row_join(self, rhs):
# concatenates two matrices along self's last and rhs's first col
assert self.lines == rhs.lines
newmat = self.zeronm(self.lines, self.cols + rhs.cols)
newmat[:,:self.cols] = self[:,:]
newmat[:,self.cols:] = rhs
return newmat
def col_join(self, bott):
assert self.cols == bott.cols
newmat = self.zeronm(self.lines+bott.lines, self.cols)
newmat[:self.lines,:] = self[:,:]
newmat[self.lines:,:] = bott
return newmat
def trace(self):
assert self.cols == self.lines
trace = 0
for i in range(self.cols):
trace += self[i,i]
return trace
def submatrix(self, keys):
"""
>>> from sympy import *
>>> m = Matrix(4,4,lambda i,j: i+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
0 1 2 3
1 2 3 4
2 3 4 5
3 4 5 6
>>> m[0:1, 1] #doctest: +NORMALIZE_WHITESPACE
1
>>> m[0:2, 0:1] #doctest: +NORMALIZE_WHITESPACE
0
1
>>> m[2:4, 2:4] #doctest: +NORMALIZE_WHITESPACE
4 5
5 6
"""
assert isinstance(keys[0], slice) or isinstance(keys[1], slice)
rlo, rhi = self.slice2bounds(keys[0], self.lines)
clo, chi = self.slice2bounds(keys[1], self.cols)
if not ( 0<=rlo<=rhi and 0<=clo<=chi ):
raise IndexError("Slice indices out of range: a[%s]"%repr(keys))
return Matrix(rhi-rlo, chi-clo, lambda i,j: self[i+rlo, j+clo])
def slice2bounds(self, key, defmax):
"""
Takes slice or number and returns (min,max) for iteration
Takes a default maxval to deal with the slice ':' which is (none, none)
"""
if isinstance(key, slice):
lo, hi = 0, defmax
if key.start != None:
if key.start >= 0:
lo = key.start
else:
lo = defmax+key.start
if key.stop != None:
if key.stop >= 0:
hi = key.stop
else:
hi = defmax+key.stop
return lo, hi
elif isinstance(key, int):
if key >= 0:
return key, key+1
else:
return defmax+key, defmax+key+1
else:
raise IndexError("Improper index type")
def applyfunc(self, f):
"""
>>> from sympy import *
>>> m = Matrix(2,2,lambda i,j: i*2+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
0 1
2 3
>>> m.applyfunc(lambda i: 2*i) #doctest: +NORMALIZE_WHITESPACE
0 2
4 6
"""
assert callable(f)
out = self[:,:]
for i in range(self.lines):
for j in range(self.cols):
out[i,j] = f(self[i,j])
return out
def reshape(self, _rows, _cols):
"""
>>> from sympy import *
>>> m = Matrix(2,3,lambda i,j: 1)
>>> m #doctest: +NORMALIZE_WHITESPACE
1 1 1
1 1 1
>>> m.reshape(1,6) #doctest: +NORMALIZE_WHITESPACE
1 1 1 1 1 1
>>> m.reshape(3,2) #doctest: +NORMALIZE_WHITESPACE
1 1
1 1
1 1
"""
if self.lines*self.cols != _rows*_cols:
print "Invalid reshape parameters %d %d" % (_rows, _cols)
return Matrix(_rows, _cols, lambda i,j: self.mat[i*_cols + j])
def print_nonzero (self, symb="X"):
"""
Shows location of non-zero entries for fast shape lookup
>>> from sympy import *
>>> m = Matrix(2,3,lambda i,j: i*3+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
0 1 2
3 4 5
>>> m.print_nonzero() #doctest: +NORMALIZE_WHITESPACE
[ XX]
[XXX]
>>> m = matrices.eye(4)
>>> m.print_nonzero("x") #doctest: +NORMALIZE_WHITESPACE
[x ]
[ x ]
[ x ]
[ x]
"""
s="";
for i in range(self.lines):
s+="["
for j in range(self.cols):
if self[i,j] == 0:
s+=" "
else:
s+= symb+""
s+="]\n"
print s
def LUsolve(self, rhs):
assert rhs.lines == self.lines
A, perm = self.LUdecomposition_Simple()
n = self.lines
b = rhs.permuteFwd(perm)
# forward substitution, all diag entries are scaled to 1
for i in range(n):
for j in range(i):
b.row(i, lambda x,k: x - b[j,k]*A[i,j])
# backward substitution
for i in range(n-1,-1,-1):
for j in range(i+1, n):
b.row(i, lambda x,k: x - b[j,k]*A[i,j])
b.row(i, lambda x,k: x / A[i,i])
return b
def LUdecomposition(self):
combined, p = self.LUdecomposition_Simple()
L = self.zero(self.lines)
U = self.zero(self.lines)
for i in range(self.lines):
for j in range(self.lines):
if i > j:
L[i,j] = combined[i,j]
else:
if i == j:
L[i,i] = 1
U[i,j] = combined[i,j]
return L, U, p
def LUdecomposition_Simple(self):
# returns A compused of L,U (L's diag entries are 1) and
# p which is the list of the row swaps (in order)
assert self.lines == self.cols
n = self.lines
A = self[:,:]
p = []
# factorization
for j in range(n):
for i in range(j):
for k in range(i):
A[i,j] = A[i,j] - A[i,k]*A[k,j]
pivot = -1
for i in range(j,n):
for k in range(j):
A[i,j] = A[i,j] - A[i,k]*A[k,j]
# find the first non-zero pivot, includes any expression
if pivot == -1 and A[i,j] != 0:
pivot = i
if pivot < 0:
raise "Error: non-invertible matrix passed to LUdecomposition_Simple()"
if pivot != j: # row must be swapped
A.row_swap(pivot,j)
p.append([pivot,j])
assert A[j,j] != 0
scale = 1 / A[j,j]
for i in range(j+1,n):
A[i,j] = A[i,j] * scale
return A, p
def LUdecompositionFF(self):
# returns 4 matrices P, L, D, U such that PA = L D**-1 U
# from the paper "fraction-free matrix factors..." by Zhou and Jeffrey
n, m = self.lines, self.cols
U, L, P = self[:,:], eye(n), eye(n)
DD = zero(n) # store it smarter since it's just diagonal
oldpivot = 1
for k in range(n-1):
if U[k,k] == 0:
kpivot = k+1
Notfound = True
while kpivot < n and Notfound:
if U[kpivot, k] != 0:
Notfound = False
else:
kpivot = kpivot + 1
if kpivot == n+1:
raise "Matrix is not full rank"
else:
swap = U[k, k:]
U[k,k:] = U[kpivot,k:]
U[kpivot, k:] = swap
swap = P[k, k:]
P[k, k:] = P[kpivot, k:]
P[kpivot, k:] = swap
assert U[k, k] != 0
L[k,k] = U[k,k]
DD[k,k] = oldpivot * U[k,k]
assert DD[k,k] != 0
Ukk = U[k,k]
for i in range(k+1, n):
L[i,k] = U[i,k]
Uik = U[i,k]
for j in range(k+1, m):
U[i,j] = (Ukk * U[i,j] - U[k,j]*Uik) / oldpivot
U[i,k] = 0
oldpivot = U[k,k]
DD[n-1,n-1] = oldpivot
return P, L, DD, U
def cofactorMatrix(self):
out = self[:,:]
out[:,:] = Matrix(self.lines, self.cols, lambda i,j: self.cofactor(i,j))
return out
def minorEntry(self, i, j):
assert 0 <= i < self.lines and 0 <= j < self.cols
return self.minorMatrix(i,j).det()
def minorMatrix(self, i, j):
assert 0 <= i < self.lines and 0 <= j < self.cols
return self.delRowCol(i,j)
def cofactor(self, i, j):
if (i+j) % 2 == 0:
return self.minorEntry(i,j)
else:
return -1 * self.minorEntry(i,j)
def jacobian(self, varlist):
# self is a vector of expression representing functions f_i(x_1, ..., x_n)
# varlist is the set of x_i's in order
assert self.lines == 1
m = self.cols
if isinstance(varlist, Matrix):
assert varlist.lines == 1
n = varlist.cols
elif isinstance(varlist, (list, tuple)):
n = len(varlist)
assert n > 0 # need to diff by something
J = self.zeronm(m,n) # maintain subclass type
for i in range(m):
if isinstance(self[i], (float, int)):
continue # constant function, jacobian row is zero
try:
tmp = self[i].diff(varlist[0]) # check differentiability
J[i,0] = tmp
except AttributeError:
raise "Function %d is not differentiable" % i
for j in range(1,n):
J[i,j] = self[i].diff(varlist[j])
return J
def QRdecomposition(self):
# TODO: still doesn't work for large expressions, there's a bug in an eval somewhere
# return Q*R where Q is orthogonal and R is upper triangular
# assume full-rank square, for now
assert self.lines == self.cols
n = self.lines
Q, R = self.zero(n), self.zero(n)
for j in range(n): # for each column vector
tmp = self[:,j] # take original v
for i in range(j):
# subtract the project of self on new vector
tmp -= Q[:,i] * self[:,j].dot(Q[:,i])
tmp.expand()
# normalize it
R[j,j] = tmp.norm()
Q[:,j] = tmp / R[j,j]
assert Q[:,j].norm() == 1
for i in range(j):
R[i,j] = Q[:,i].dot(self[:,j])
return Q,R
# Utility functions
def simplify(self):
for i in range(self.lines):
for j in range(self.cols):
try:
self[i,j] = self[i,j].simplify()
except:
pass
def expand(self):
for i in range(self.lines):
for j in range(self.cols):
self[i,j] = self[i,j].expand()
#def evaluate(self): # no more eval() so should be removed
# for i in range(self.lines):
# for j in range(self.cols):
# self[i,j] = self[i,j].eval()
def cross(self, b):
assert isinstance(b, (list, tuple, Matrix))
if not (self.lines == 1 and self.cols == 3 or \
self.lines == 3 and self.cols == 1 ) and \
(b.lines == 1 and b.cols == 3 or \
b.lines == 3 and b.cols == 1):
raise "Dimensions incorrect for cross product"
else:
return Matrix(1,3,((self[1]*b[2] - self[2]*b[1]),
(self[2]*b[0] - self[0]*b[2]),
(self[0]*b[1] - self[1]*b[0])))
def dot(self, b):
assert isinstance(b, (list, tuple, Matrix))
if isinstance(b, (list, tuple)):
m = len(b)
else:
m = b.lines * b.cols
assert self.cols*self.lines == m
prod = 0
for i in range(m):
prod += self[i] * b[i]
return prod
def norm(self):
assert self.lines == 1 or self.cols == 1
out = Basic.sympify(0)
for i in range(self.lines * self.cols):
out += self[i]*self[i]
return out**Basic.Half()
def normalized(self):
assert self.lines == 1 or self.cols == 1
norm = self.norm()
out = self[:,:].applyfunc(lambda i: i / norm)
return out
def project(self, v):
# project ONTO v
return v * (self.dot(v) / v.dot(v))
def permuteBkwd(self, perm):
copy = self[:,:]
for i in range(len(perm)-1, -1, -1):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def permuteFwd(self, perm):
copy = self[:,:]
for i in range(len(perm)):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def delRowCol(self, i, j):
#used only for cofactors, makes a copy
M = self[:,:]
M.row_del(i)
M.col_del(j)
return M
def zeronm(self, n, m):
# used so that certain functions above can use this
# then only this func need be overloaded in subclasses
return Matrix(n,m,lambda i,j:0)
def zero(self, n):
return Matrix(n,n,lambda i,j:0)
def eye(self, n):
tmp = self.zero(n)
for i in range(tmp.lines):
tmp[i,i] = 1
return tmp
@property
def is_square(self):
return self.lines == self.cols
def is_upper(self):
for i in range(self.cols):
for j in range(self.lines):
if i > j and self[i,j] != 0:
return False
return True
def is_lower(self):
for i in range(self.cols):
for j in range(self.lines):
if i < j and self[i, j] != 0:
return False
return True
def is_symbolic(self):
for i in range(self.cols):
for j in range(self.lines):
if not self[i,j].atoms(type=Symbol):
return True
return False
def clone(self):
return Matrix(self.lines, self.cols, lambda i, j: self[i, j])
def det(self):
"""Compute matrix determinant using Bareis' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal numer of fractions. It means that less term
rewriting is needed on resulting formulae.
TODO: Implement algorithm for sparse matrices (SFF).
"""
if not self.is_square:
raise NonSquareMatrixException()
M, n = self[:,:], self.lines
if n == 1:
det = M[0, 0]
elif n == 2:
det = M[0, 0]*M[1, 1] - M[0, 1]*M[1, 0]
else:
sign = 1 # track current sign in case of column swap
for k in range(n-1):
# look for a pivot in the current column
# and assume det == 0 if none is found
if M[k, k] == 0:
for i in range(k+1, n):
if M[i, k] != 0:
M.row_swap(i, k)
sign *= -1
break
else:
return Rational(0)
# proceed with Bareis' fraction-free (FF)
# form of Gaussian elimination algorithm
for i in range(k+1, n):
for j in range(k+1, n):
D = M[k, k]*M[i, j] - M[i, k]*M[k, j]
if k > 0:
M[i, j] = D / M[k-1, k-1]
else:
M[i, j] = D
det = sign * M[n-1, n-1]
return det.expand()
def inverse_LU(self):
return self.LUsolve(self.eye(self.lines))
def inverse_GE(self):
assert self.lines == self.cols
assert self.det() != 0
big = self.row_join(self.eye(self.lines))
red = big.rref()
return red[0][:,big.lines:]
def rref(self):
# take any matrix and return reduced row-ech form and indices of pivot vars
# TODO: rewrite inverse_GE to use this
pivots, r = 0, self[:,:] # pivot: index of next row to contain a pivot
pivotlist = [] # indices of pivot variables (non-free)
for i in range(r.cols):
if pivots == r.lines:
break
if r[pivots,i] == 0:
for k in range(pivots, r.lines):
if r[k,i] != 0:
break
if k == r.lines - 1:
continue
r.row_swap(pivots,k)
scale = r[pivots,i]
r.row(pivots, lambda x, _: x/scale)
for j in range(0, r.lines):
if j == pivots:
continue
scale = r[j,i]
r.row(j, lambda x, k: x - r[pivots,k]*scale)
pivotlist.append(i)
pivots += 1
return r, pivotlist
def nullspace(self):
# Returns list of vectors (Matrix objects) that span nullspace of self
assert self.cols >= self.lines
reduced, pivots = self.rref()
basis = []
# create a set of vectors for the basis
for i in range(self.cols - len(pivots)):
basis.append(zeronm(self.cols,1))
# contains the variable index to which the vector corresponds
basiskey, cur = [-1]*len(basis), 0
for i in range(self.cols):
if i not in pivots:
basiskey[cur] = i
cur += 1
for i in range(self.cols):
if i not in pivots: # free var, just set vector's ith place to 1
basis[basiskey.index(i)][i,0] = 1
else: # add negative of nonpivot entry to corr vector
for j in range(i+1, self.cols):
line = pivots.index(i)
if reduced[line, j] != 0:
assert j not in pivots
basis[basiskey.index(j)][i,0] = -1 * reduced[line, j]
return basis
def charpoly(self, var):
assert self.lines == self.cols
if isinstance(var, Symbol):
x = var
else:
raise "Input variable to charpoly not valid"
copy = self[:,:]
for i in range(self.lines):
copy[i,i] -= x
return copy.det()
def eigenvals(self, var=None):
""" Calls polynomials's roots(), doesn't support coeff type right now """
# returns list of pairs (eigenval, multiplicty)
if var == None:
var = Symbol('x')
p = self.charpoly(var)
num, den = p.as_numer_denom()
if den != 1:
divop = sympy.polynomials.div(num, den)
assert divop[1] == 0
p = divop[0]
rl = sympy.polynomials.roots(p, var)
assert len(rl) == self.lines
outlist = []
def f(num):
def g(n):
if num == n:
return True
else:
return False
return g
while rl != []:
n = len(filter(f(rl[0]), rl))
outlist.append([rl[0], n])
for i in range(n):
rl.remove(rl[0])
return outlist
def eigenvects(self):
# return list of triples (eigenval, multiplicty, basis)
out, vlist = [], self.eigenvals()
for i in range(len(vlist)):
tmp = self - eye(self.lines)*vlist[i][0]
basis = tmp.nullspace()
# check if basis is right size, don't do it if symbolic - too many solutions
if not tmp.is_symbolic():
assert len(basis) == vlist[i][1]
vlist[i].append(basis)
out.append(vlist[i])
return out
def zero(n):
return zeronm(n,n)
def zeronm(n,m):
assert n>0
assert m>0
return Matrix(n,m, lambda i,j: 0)
def one(n):
m = zero(n)
for i in range(n):
m[i,i]=1
return m
def eye(n):
assert n>0
out = zeronm(n,n)
for i in range(n):
out[i,i]=1
return out
def randMatrix(r,c,min=0,max=99,seed=[]):
if seed == []:
random.seed()
else:
random.seed(seed) # use system time
return Matrix(r,c,lambda i,j: random.randint(min,max))
def hessian(f, varlist):
# f is the expression representing a function f, return regular matrix
if isinstance(varlist, (list, tuple)):
m = len(varlist)
elif isinstance(varlist, Matrix):
m = varlist.cols
assert varlist.lines == 1
else:
raise "Improper variable list in hessian function"
assert m > 0
try:
f.diff(varlist[0]) # check differentiability
except AttributeError:
raise "Function %d is not differentiable" % i
out = zero(m)
for i in range(m):
for j in range(i,m):
out[i,j] = f.diff(varlist[i]).diff(varlist[j])
for i in range(m):
for j in range(i):
out[i,j] = out[j,i]
return out
def GramSchmidt(vlist, orthog=False):
out = []
m = len(vlist)
for i in range(m):
tmp = vlist[i]
for j in range(i):
tmp -= vlist[i].project(out[j])
if tmp == Matrix([[0,0,0]]):
raise "GramSchmidt: vector set not linearly independent"
out.append(tmp)
if orthog:
for i in range(len(out)):
out[i] = out[i].normalized()
return out
def wronskian(functions, var):
for index in xrange(0, len(functions)):
functions[index] = Basic.sympify(functions[index])
n = len(functions)
W = Matrix(n, n, lambda i,j: functions[i].diff(var, j) )
return W.det()
def casoratian(seqs, n, zero=True):
"""Given linear difference operator L of order 'k' and homogeneous
equation Ly = 0 we want to compute kernel of L, which is a set
of 'k' sequences: a(n), b(n), ... z(n).
Solutions of L are lineary independent iff their Casoratian,
denoted as C(a, b, ..., z), do not vanish for n = 0.
Casoratian is defined by k x k determinant:
+ a(n) b(n) . . . z(n) +
| a(n+1) b(n+1) . . . z(n+1) |
| . . . . |
| . . . . |
| . . . . |
+ a(n+k-1) b(n+k-1) . . . z(n+k-1) +
It proves very useful in rsolve_hyper() where it is applied
to a generating set of a recurrence to factor out lineary
dependent solutions and return a basis.
>>> from sympy import *
>>> n = Symbol('n', integer=True)
Exponential and factorial are lineary independent:
>>> casoratian([2**n, factorial(n)], n) != 0
True
"""
seqs = map(Basic.sympify, seqs)
if not zero:
f = lambda i, j: seqs[j].subs(n, n+i)
else:
f = lambda i, j: seqs[j].subs(n, i)
k = len(seqs)
return Matrix(k, k, f).det()
class SMatrix(Matrix):
def __init__(self, *args):
if len(args) == 3 and callable(args[2]):
op = args[2]
assert isinstance(args[0], int) and isinstance(args[1], int)
self.lines = args[0]
self.cols = args[1]
self.mat = {}
for i in range(self.lines):
for j in range(self.cols):
value = Basic.sympify(op(i,j))
if value != 0:
self.mat[(i,j)] = value
elif len(args)==3 and isinstance(args[0],int) and \
isinstance(args[1],int) and isinstance(args[2], (list, tuple)):
self.lines = args[0]
self.cols = args[1]
mat = args[2]
self.mat = {}
for i in range(self.lines):
for j in range(self.cols):
value = Basic.sympify(mat[i*self.cols+j])
if value != 0:
self.mat[(i,j)] = value
elif len(args)==3 and isinstance(args[0],int) and \
isinstance(args[1],int) and isinstance(args[2], dict):
self.lines = args[0]
self.cols = args[1]
self.mat = {}
# manual copy, copy.deepcopy() doesn't work
for key in args[2].keys():
self.mat[key] = args[2][key]
else:
if len(args) == 1:
mat = args[0]
else:
mat = args
if not isinstance(mat[0], (list, tuple)):
mat = [ [element] for element in mat ]
self.lines = len(mat)
self.cols = len(mat[0])
self.mat = {}
for i in range(self.lines):
assert len(mat[i]) == self.cols
for j in range(self.cols):
value = Basic.sympify(mat[i][j])
if value != 0:
self.mat[(i,j)] = value
def __getitem__(self, key):
if isinstance(key, slice) or isinstance(key, int):
lo, hi = self.slice2bounds(key, self.lines*self.cols)
L = []
for i in range(lo, hi):
m,n = self.rowdecomp(i)
if self.mat.has_key((m,n)):
L.append(self.mat[(m,n)])
else:
L.append(0)
if len(L) == 1:
return L[0]
else:
return L
assert len(key) == 2
if isinstance(key[0], int) and isinstance(key[1], int):
i,j=self.key2ij(key)
if self.mat.has_key((i,j)):
return self.mat[(i,j)]
else:
return 0
elif isinstance(key[0], slice) or isinstance(key[1], slice):
return self.submatrix(key)
else:
raise IndexError("Index out of range: a[%s]"%repr(key))
def rowdecomp(self, num):
assert (0 <= num < self.lines * self.cols) or \
(0 <= -1*num < self.lines * self.cols)
i, j = 0, num
while j >= self.cols:
j -= self.cols
i += 1
return i,j
def __setitem__(self, key, value):
# almost identical, need to test for 0
assert len(key) == 2
if isinstance(key[0], slice) or isinstance(key[1], slice):
if isinstance(value, Matrix):
self.copyin_matrix(key, value)
if isinstance(value, (list, tuple)):
self.copyin_list(key, value)
else:
i,j=self.key2ij(key)
testval = Basic.sympify(value)
if testval != 0:
self.mat[(i,j)] = testval
elif self.mat.has_key((i,j)):
del self.mat[(i,j)]
def __str__(self):
s = ""
for i in range(self.lines):
for j in range(self.cols):
if self.mat.has_key((i,j)):
s += "%s " % repr(self[i,j])
else:
s += "0 "
s += "\n"
return s
def row_del(self, k):
newD = {}
for (i,j) in self.mat.keys():
if i==k:
pass
elif i > k:
newD[i-1,j] = self.mat[i,j]
else:
newD[i,j] = self.mat[i,j]
self.mat = newD
self.lines -= 1
def col_del(self, k):
newD = {}
for (i,j) in self.mat.keys():
if j==k:
pass
elif j > k:
newD[i,j-1] = self.mat[i,j]
else:
newD[i,j] = self.mat[i,j]
self.mat = newD
self.cols -= 1
# from here to end all functions are same as in matrices.py
# with Matrix replaced with SMatrix
def copyin_list(self, key, value):
assert isinstance(value, (list, tuple))
self.copyin_matrix(key, SMatrix(value))
def multiply(self,b):
"""Returns self*b """
def dotprod(a,b,i,j):
assert a.cols == b.lines
r=0
for x in range(a.cols):
r+=a[i,x]*b[x,j]
return r
r = SMatrix(self.lines, b.cols, lambda i,j: dotprod(self,b,i,j))
if r.lines == 1 and r.cols ==1:
return r[0,0]
return r
def submatrix(self, keys):
assert isinstance(keys[0], slice) or isinstance(keys[1], slice)
rlo, rhi = self.slice2bounds(keys[0], self.lines)
clo, chi = self.slice2bounds(keys[1], self.cols)
if not ( 0<=rlo<=rhi and 0<=clo<=chi ):
raise IndexError("Slice indices out of range: a[%s]"%repr(keys))
return SMatrix(rhi-rlo, chi-clo, lambda i,j: self[i+rlo, j+clo])
def reshape(self, _rows, _cols):
if self.lines*self.cols != _rows*_cols:
print "Invalid reshape parameters %d %d" % (_rows, _cols)
newD = {}
for i in range(_rows):
for j in range(_cols):
m,n = self.rowdecomp(i*_cols + j)
if self.mat.has_key((m,n)):
newD[(i,j)] = self.mat[(m,n)]
return SMatrix(_rows, _cols, newD)
def cross(self, b):
assert isinstance(b, (list, tuple, Matrix))
if not (self.lines == 1 and self.cols == 3 or \
self.lines == 3 and self.cols == 1 ) and \
(b.lines == 1 and b.cols == 3 or \
b.lines == 3 and b.cols == 1):
raise "Dimensions incorrect for cross product"
else:
return SMatrix(1,3,((self[1]*b[2] - self[2]*b[1]),
(self[2]*b[0] - self[0]*b[2]),
(self[0]*b[1] - self[1]*b[0])))
def zeronm(self,n,m):
return SMatrix(n,m,{})
def zero(self, n):
return SMatrix(n,n,{})
def eye(self, n):
tmp = SMatrix(n,n,lambda i,j:0)
for i in range(tmp.lines):
tmp[i,i] = 1
return tmp
| certik/sympy-oldcore | sympy/matrices/matrices.py | Python | bsd-3-clause | 40,228 | [
"DIRAC",
"Gaussian"
] | 9274fa63bf0f2b26ce49da22a41f12c8bee8ae647ce1d543aa518ba4da7989c0 |
#!/usr/bin/env python
########################################################################
# File : dirac-admin-get-pilot-output
# Author : Stuart Paterson
########################################################################
"""
Retrieve output of a Grid pilot
Example:
$ dirac-admin-get-pilot-output https://marlb.in2p3.fr:9000/26KCLKBFtxXKHF4_ZrQjkw
$ ls -la
drwxr-xr-x 2 hamar marseill 2048 Feb 21 14:13 pilot_26KCLKBFtxXKHF4_ZrQjkw
"""
from DIRAC.Core.Base.Script import Script
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(["PilotID: Grid ID of the pilot"])
_, args = Script.parseCommandLine(ignoreErrors=True)
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
for gridID in args:
result = diracAdmin.getPilotOutput(gridID)
if not result["OK"]:
errorList.append((gridID, result["Message"]))
exitCode = 2
for error in errorList:
print("ERROR %s: %s" % error)
DIRACExit(exitCode)
if __name__ == "__main__":
main()
| DIRACGrid/DIRAC | src/DIRAC/Interfaces/scripts/dirac_admin_get_pilot_output.py | Python | gpl-3.0 | 1,226 | [
"DIRAC"
] | 8192fa3d5ea78a8d388dafac7c2ad2e78b882c20e641d21953d9e3728f5fdae3 |
import os
import unittest
from __main__ import vtk, qt, ctk, slicer
import urllib
#
# SlicerTestRecentFilesTests
#
class SlicerTestRecentFilesTests:
def __init__(self, parent):
parent.title = "SlicerTestRecentFilesTests"
parent.categories = ["Testing.TestCases"]
parent.dependencies = []
parent.contributors = ["Johan Andruejol (Kitware)"] # replace with "Firstname Lastname (Org)"
parent.helpText = """
"""
parent.acknowledgementText = """TODO""" # replace with organization, grant and thanks.
self.parent = parent
# Add this test to the SelfTest module's list for discovery when the module
# is created. Since this module may be discovered before SelfTests itself,
# create the list if it doesn't already exist.
try:
slicer.selfTests
except AttributeError:
slicer.selfTests = {}
slicer.selfTests['SlicerTestRecentFilesTests'] = self.runTest
def runTest(self):
tester = SlicerTestRecentFilesTestsTest()
tester.runTests()
#
# qSlicerTestRecentFilesTestsTest
#
class SlicerTestRecentFilesTestsTest(unittest.TestCase):
def delayDisplay(self,message,msec=1000):
"""This utility method displays a small dialog and waits.
This does two things: 1) it lets the event loop catch up
to the state of the test so that rendering and widget updates
have all taken place before the test continues and 2) it
shows the user/developer/tester the state of the test
so that we'll know when it breaks.
"""
print(message)
self.info = qt.QDialog()
self.infoLayout = qt.QVBoxLayout()
self.info.setLayout(self.infoLayout)
self.label = qt.QLabel(message,self.info)
self.infoLayout.addWidget(self.label)
qt.QTimer.singleShot(msec, self.info.close)
self.info.exec_()
def getTestMethodNames(self):
methods = []
for method in dir(self):
if (callable(getattr(self, method)) and method.find('test_') != -1):
methods.append(method)
return methods
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
deleteHistoryAction = self.deleteRecentFilesHistoryAction()
deleteHistoryAction.trigger()
self.recentFilesChangedSignalTriggered = 0
def tearDown(self):
pass
def runTests(self):
"""Run as few or as many tests as needed here.
"""
for methodName in self.getTestMethodNames():
self.runTest(methodName)
def runTest(self, method):
self.setUp()
getattr(self, method)()
self.tearDown()
def deleteRecentFilesHistoryAction(self):
mainWindow = slicer.util.mainWindow()
self.assertTrue(mainWindow)
recentlyLoadedMenu = slicer.util.findChildren(mainWindow, 'RecentlyLoadedMenu')[0]
self.assertTrue(recentlyLoadedMenu)
for action in recentlyLoadedMenu.actions():
if action.text == 'Clear History':
return action
return None
def openVolume(self, downloads):
# perform the downloads if needed, then load
for url,name,loader in downloads:
filePath = slicer.app.temporaryPath + '/' + name
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
self.delayDisplay('Requesting download %s from %s...\n' % (name, url))
urllib.urlretrieve(url, filePath)
if loader:
self.delayDisplay('Loading %s...\n' % (name,))
loader(filePath)
self.delayDisplay('Finished with download and loading\n')
def onRecentFilesChanged(self):
self.recentFilesChangedSignalTriggered = self.recentFilesChangedSignalTriggered + 1
def test_LoadData(self):
self.delayDisplay('test_TestReturnToWelcome')
mainWindow = slicer.util.mainWindow()
self.assertTrue(mainWindow)
self.assertEqual(len(mainWindow.recentlyLoadedPaths()), 0)
mainWindow.connect('recentlyLoadedFilesChanged()', self.onRecentFilesChanged)
downloads = (
('http://slicer.kitware.com/midas3/download?items=5767', 'FA.nrrd', slicer.util.loadVolume),
)
self.openVolume(downloads)
self.assertEqual(self.recentFilesChangedSignalTriggered, 1)
self.assertEqual(len(mainWindow.recentlyLoadedPaths()), 1)
path = mainWindow.recentlyLoadedPaths()[0]
self.assertTrue(path.find('FA.nrrd') > 0)
self.delayDisplay('Test passed!')
def test_ClearHistory(self):
self.delayDisplay('test_ClearHistory')
self.test_LoadData()
deleteHistoryAction = self.deleteRecentFilesHistoryAction()
deleteHistoryAction.trigger()
self.assertEqual(self.recentFilesChangedSignalTriggered, 2)
self.assertEqual(len(slicer.util.mainWindow().recentlyLoadedPaths()), 0)
self.delayDisplay('Test passed!')
#
# qSlicerTestRecentFilesTestsWidget
#
class SlicerTestRecentFilesTestsWidget():
def __init__(self, parent = None):
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parent
self.layout = self.parent.layout()
if not parent:
self.setup()
self.parent.show()
self.moduleName = 'SlicerTestRecentFilesTests'
self.tester = SlicerTestRecentFilesTestsTest()
def setup(self):
# Instantiate and connect widgets ...
# reload button
# (use this during development, but remove it when delivering
# your module to users)
self.reloadButton = qt.QPushButton("Reload")
self.reloadButton.toolTip = "Reload this module."
self.reloadButton.name = "Tests Reload"
self.layout.addWidget(self.reloadButton)
self.reloadButton.connect('clicked()', self.onReload)
# reload and test button
# (use this during development, but remove it when delivering
# your module to users)
self.reloadAndTestButton = qt.QPushButton("Reload and Test")
self.reloadAndTestButton.toolTip = "Reload this module and then run the self tests."
self.layout.addWidget(self.reloadAndTestButton)
self.reloadAndTestButton.connect('clicked()', self.onReloadAndTest)
self.testButton = qt.QPushButton('Run Tests')
self.layout.addWidget(self.testButton)
self.testButton.connect('clicked(bool)', self.tester.runTests)
# Add vertical spacer
self.layout.addStretch(1)
def onReload(self):
"""Generic reload method for any scripted module.
ModuleWizard will subsitute correct default.
"""
globals()[self.moduleName] = slicer.util.reloadScriptedModule(self.moduleName)
def onReloadAndTest(self):
self.onReload()
self.tester.runTests()
| agirault/VesselView | Applications/App/Testing/Python/SlicerTestRecentFilesTests.py | Python | apache-2.0 | 6,572 | [
"VTK"
] | cb2fde71518325cb02446cbe98a4d1371b6df8012a252d845c59bc9377b17486 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.