text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''Density expansion on plane waves'''
import sys
import copy
import numpy
import h5py
from pyscf import lib
from pyscf import gto
from pyscf import dft
from pyscf.pbc import tools
from pyscf.pbc import gto as pgto
from pyscf.pbc.df import ft_ao
from pyscf.pbc.dft import gen_grid
from pyscf.pbc.dft import numint
from pyscf.pbc.gto import pseudo
from pyscf.pbc.df import fft
from pyscf.pbc.df.aft import _sub_df_jk_
from mpi4pyscf.lib import logger
from mpi4pyscf.tools import mpi
from mpi4pyscf.pbc.df import fft_jk as mpi_fft_jk
comm = mpi.comm
rank = mpi.rank
@mpi.parallel_call
def get_nuc(mydf, kpts):
mydf = _sync_mydf(mydf)
cell = mydf.cell
if kpts is None:
kpts_lst = numpy.zeros((1,3))
else:
kpts_lst = numpy.reshape(kpts, (-1,3))
if abs(kpts_lst).sum() < 1e-9: # gamma_point
dtype = numpy.float64
else:
dtype = numpy.complex128
mesh = mydf.mesh
charge = -cell.atom_charges()
Gv = cell.get_Gv(mesh)
SI = cell.get_SI(Gv)
rhoG = numpy.dot(charge, SI)
coulG = tools.get_coulG(cell, mesh=mesh, Gv=Gv)
vneG = rhoG * coulG
vneR = tools.ifft(vneG, mydf.mesh).real
vne = [0] * len(kpts_lst)
for ao_ks_etc, p0, p1 in mydf.mpi_aoR_loop(mydf.grids, kpts_lst):
ao_ks = ao_ks_etc[0]
for k, ao in enumerate(ao_ks):
vne[k] += lib.dot(ao.T.conj()*vneR[p0:p1], ao)
ao = ao_ks = None
vne = mpi.reduce(lib.asarray(vne))
if rank == 0:
if kpts is None or numpy.shape(kpts) == (3,):
vne = vne[0]
return vne
@mpi.parallel_call
def get_pp(mydf, kpts=None):
mydf = _sync_mydf(mydf)
cell = mydf.cell
if kpts is None:
kpts_lst = numpy.zeros((1,3))
else:
kpts_lst = numpy.reshape(kpts, (-1,3))
if abs(kpts_lst).sum() < 1e-9:
dtype = numpy.float64
else:
dtype = numpy.complex128
mesh = mydf.mesh
SI = cell.get_SI()
Gv = cell.get_Gv(mesh)
vpplocG = pseudo.get_vlocG(cell, Gv)
vpplocG = -numpy.einsum('ij,ij->j', SI, vpplocG)
ngrids = len(vpplocG)
nao = cell.nao_nr()
nkpts = len(kpts_lst)
# vpploc evaluated in real-space
vpplocR = tools.ifft(vpplocG, mesh).real
vpp = numpy.zeros((nkpts,nao,nao), dtype=dtype)
for ao_ks_etc, p0, p1 in mydf.mpi_aoR_loop(mydf.grids, kpts_lst):
ao_ks = ao_ks_etc[0]
for k, ao in enumerate(ao_ks):
vpp[k] += lib.dot(ao.T.conj()*vpplocR[p0:p1], ao)
ao = ao_ks = None
vpp = mpi.reduce(lib.asarray(vpp))
# vppnonloc evaluated in reciprocal space
fakemol = gto.Mole()
fakemol._atm = numpy.zeros((1,gto.ATM_SLOTS), dtype=numpy.int32)
fakemol._bas = numpy.zeros((1,gto.BAS_SLOTS), dtype=numpy.int32)
ptr = gto.PTR_ENV_START
fakemol._env = numpy.zeros(ptr+10)
fakemol._bas[0,gto.NPRIM_OF ] = 1
fakemol._bas[0,gto.NCTR_OF ] = 1
fakemol._bas[0,gto.PTR_EXP ] = ptr+3
fakemol._bas[0,gto.PTR_COEFF] = ptr+4
# buf for SPG_lmi upto l=0..3 and nl=3
buf = numpy.empty((48,ngrids), dtype=numpy.complex128)
def vppnl_by_k(kpt):
Gk = Gv + kpt
G_rad = lib.norm(Gk, axis=1)
aokG = ft_ao.ft_ao(cell, Gv, kpt=kpt) * (ngrids/cell.vol)
vppnl = 0
for ia in range(cell.natm):
symb = cell.atom_symbol(ia)
if symb not in cell._pseudo:
continue
pp = cell._pseudo[symb]
p1 = 0
for l, proj in enumerate(pp[5:]):
rl, nl, hl = proj
if nl > 0:
fakemol._bas[0,gto.ANG_OF] = l
fakemol._env[ptr+3] = .5*rl**2
fakemol._env[ptr+4] = rl**(l+1.5)*numpy.pi**1.25
pYlm_part = dft.numint.eval_ao(fakemol, Gk, deriv=0)
p0, p1 = p1, p1+nl*(l*2+1)
# pYlm is real, SI[ia] is complex
pYlm = numpy.ndarray((nl,l*2+1,ngrids), dtype=numpy.complex128, buffer=buf[p0:p1])
for k in range(nl):
qkl = pseudo.pp._qli(G_rad*rl, l, k)
pYlm[k] = pYlm_part.T * qkl
#:SPG_lmi = numpy.einsum('g,nmg->nmg', SI[ia].conj(), pYlm)
#:SPG_lm_aoG = numpy.einsum('nmg,gp->nmp', SPG_lmi, aokG)
#:tmp = numpy.einsum('ij,jmp->imp', hl, SPG_lm_aoG)
#:vppnl += numpy.einsum('imp,imq->pq', SPG_lm_aoG.conj(), tmp)
if p1 > 0:
SPG_lmi = buf[:p1]
SPG_lmi *= SI[ia].conj()
SPG_lm_aoGs = lib.zdot(SPG_lmi, aokG)
p1 = 0
for l, proj in enumerate(pp[5:]):
rl, nl, hl = proj
if nl > 0:
p0, p1 = p1, p1+nl*(l*2+1)
hl = numpy.asarray(hl)
SPG_lm_aoG = SPG_lm_aoGs[p0:p1].reshape(nl,l*2+1,-1)
tmp = numpy.einsum('ij,jmp->imp', hl, SPG_lm_aoG)
vppnl += numpy.einsum('imp,imq->pq', SPG_lm_aoG.conj(), tmp)
return vppnl * (1./ngrids**2)
vppnl = []
for kpt in mpi.static_partition(kpts_lst):
vppnl.append(vppnl_by_k(kpt))
vppnl = mpi.gather(lib.asarray(vppnl))
if rank == 0:
for k in range(nkpts):
if dtype == numpy.float64:
vpp[k] += vppnl[k].real
else:
vpp[k] += vppnl[k]
if kpts is None or numpy.shape(kpts) == (3,):
vpp = vpp[0]
return vpp
def _sync_mydf(mydf):
mydf.unpack_(comm.bcast(mydf.pack()))
return mydf
@mpi.register_class
class FFTDF(fft.FFTDF):
'''Density expansion on plane waves
'''
def __init__(self, cell, kpts=numpy.zeros((1,3))):
if (cell.dimension < 2 or
(cell.dimension == 2 and cell.low_dim_ft_type == 'inf_vacuum')):
raise RuntimeError('MPI-FFTDF module does not support 0D/1D/2D low-dimension '
'PBC system')
fft.FFTDF.__init__(self, cell, kpts)
def pack(self):
return {'verbose' : self.verbose,
'max_memory': self.max_memory,
'kpts' : self.kpts,
'mesh' : self.mesh}
def unpack_(self, dfdic):
self.__dict__.update(dfdic)
return self
def mpi_aoR_loop(self, grids=None, kpts=None, deriv=0):
if grids is None:
grids = self.grids
cell = self.cell
else:
cell = grids.cell
if grids.non0tab is None:
grids.build(with_non0tab=True)
if kpts is None: kpts = self.kpts
kpts = numpy.asarray(kpts)
max_memory = max(2000, self.max_memory-lib.current_memory()[0])
ni = self._numint
nao = cell.nao_nr()
ngrids = grids.weights.size
nblks = (ngrids+gen_grid.BLKSIZE-1)//gen_grid.BLKSIZE
mpi_size = mpi.pool.size
step = (nblks+mpi_size-1) // mpi_size * gen_grid.BLKSIZE
start = min(ngrids, rank * step)
stop = min(ngrids, start + step)
grids = copy.copy(grids)
grids.coords = grids.coords[start:stop]
grids.weights = grids.weights[start:stop]
grids.non0tab = grids.non0tab[start:stop]
p1 = start
for ao_k1_etc in ni.block_loop(cell, grids, nao, deriv, kpts,
max_memory=max_memory):
coords = ao_k1_etc[4]
p0, p1 = p1, p1 + coords.shape[0]
yield ao_k1_etc, p0, p1
get_pp = get_pp
get_nuc = get_nuc
def get_jk(self, dm, hermi=1, kpts=None, kpts_band=None,
with_j=True, with_k=True, omega=None, exxdiv='ewald'):
# J/K for RSH functionals
if omega is not None:
return _sub_df_jk_(self, dm, hermi, kpts, kpts_band,
with_j, with_k, omega, exxdiv)
if kpts is None:
if numpy.all(self.kpts == 0):
# Gamma-point calculation by default
kpts = numpy.zeros(3)
else:
kpts = self.kpts
else:
kpts = numpy.asarray(kpts)
vj = vk = None
if kpts.shape == (3,):
if with_k:
vk = mpi_fft_jk.get_k(self, dm, hermi, kpts, kpts_band, exxdiv)
if with_j:
vj = mpi_fft_jk.get_j(self, dm, hermi, kpts, kpts_band)
else:
if with_k:
vk = mpi_fft_jk.get_k_kpts(self, dm, hermi, kpts, kpts_band, exxdiv)
if with_j:
vj = mpi_fft_jk.get_j_kpts(self, dm, hermi, kpts, kpts_band)
return vj, vk
if __name__ == '__main__':
# run with mpirun -n
from pyscf.pbc import gto as pgto
from mpi4pyscf.pbc import df
cell = pgto.Cell()
cell.atom = 'He 1. .5 .5; C .1 1.3 2.1'
cell.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))],
'C' :'gth-szv',}
cell.pseudo = {'C':'gth-pade'}
cell.h = numpy.eye(3) * 2.5
cell.mesh = [11] * 3
cell.build()
numpy.random.seed(19)
kpts = numpy.random.random((5,3))
mydf = df.FFTDF(cell)
v = mydf.get_nuc()
print(v.shape)
v = mydf.get_pp(kpts)
print(v.shape)
cell = pgto.M(atom='He 0 0 0; He 0 0 1', h=numpy.eye(3)*4, mesh=[11]*3)
mydf = df.FFTDF(cell)
nao = cell.nao_nr()
dm = numpy.ones((nao,nao))
vj, vk = mydf.get_jk(dm)
print(vj.shape)
print(vk.shape)
dm_kpts = [dm]*5
vj, vk = mydf.get_jk(dm_kpts, kpts=kpts)
print(vj.shape)
print(vk.shape)
mydf.close()
| sunqm/mpi4pyscf | mpi4pyscf/pbc/df/fft.py | Python | gpl-3.0 | 9,680 | [
"PySCF"
] | 17299b8547ea1da0340df90b6632f1baee4c3aa0dcd1f6cb19003bc9679660d5 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
#
# Copyright (c) 2010 Brian E. Granger
#
# This file is part of pyzmq.
#
# pyzmq is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyzmq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import copy
from sys import getrefcount as grc
import time
from unittest import TestCase
import zmq
from zmq.tests import BaseZMQTestCase
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
class TestMessage(BaseZMQTestCase):
def test_above_30(self):
"""Message above 30 bytes are never copied by 0MQ."""
for i in range(5, 16): # 32, 64,..., 65536
s = (2**i)*'x'
self.assertEquals(grc(s), 2)
m = zmq.Message(s)
self.assertEquals(grc(s), 4)
del m
self.assertEquals(grc(s), 2)
del s
def test_str(self):
"""Test the str representations of the Messages."""
for i in range(16):
s = (2**i)*'x'
m = zmq.Message(s)
self.assertEquals(s, str(m))
self.assert_(s is str(m))
def test_bytes(self):
"""Test the Message.bytes property."""
for i in range(1,16):
s = (2**i)*'x'
m = zmq.Message(s)
b = m.bytes
self.assertEquals(s, m.bytes)
# check that it copies
self.assert_(b is not s)
# check that it copies only once
self.assert_(b is m.bytes)
def test_unicode(self):
"""Test the unicode representations of the Messages."""
s = u'asdf'
self.assertRaises(TypeError, zmq.Message, s)
for i in range(16):
s = (2**i)*u'§'
m = zmq.Message(s.encode('utf8'))
self.assertEquals(s, unicode(str(m),'utf8'))
def test_len(self):
"""Test the len of the Messages."""
for i in range(16):
s = (2**i)*'x'
m = zmq.Message(s)
self.assertEquals(len(s), len(s))
def test_lifecycle1(self):
"""Run through a ref counting cycle with a copy."""
for i in range(5, 16): # 32, 64,..., 65536
s = (2**i)*'x'
self.assertEquals(grc(s), 2)
m = zmq.Message(s)
self.assertEquals(grc(s), 4)
m2 = copy.copy(m)
self.assertEquals(grc(s), 5)
b = m2.buffer
self.assertEquals(grc(s), 6)
self.assertEquals(s, str(m))
self.assertEquals(s, str(m2))
self.assertEquals(s, m.bytes)
self.assert_(s is str(m))
self.assert_(s is str(m2))
del m2
self.assertEquals(grc(s), 5)
del b
self.assertEquals(grc(s), 4)
del m
self.assertEquals(grc(s), 2)
del s
def test_lifecycle2(self):
"""Run through a different ref counting cycle with a copy."""
for i in range(5, 16): # 32, 64,..., 65536
s = (2**i)*'x'
self.assertEquals(grc(s), 2)
m = zmq.Message(s)
self.assertEquals(grc(s), 4)
m2 = copy.copy(m)
self.assertEquals(grc(s), 5)
b = m.buffer
self.assertEquals(grc(s), 6)
self.assertEquals(s, str(m))
self.assertEquals(s, str(m2))
self.assertEquals(s, m2.bytes)
self.assertEquals(s, m.bytes)
self.assert_(s is str(m))
self.assert_(s is str(m2))
del b
self.assertEquals(grc(s), 6)
del m
self.assertEquals(grc(s), 4)
del m2
self.assertEquals(grc(s), 2)
del s
def test_tracker(self):
m = zmq.Message('asdf')
self.assertFalse(m.done)
pm = zmq.MessageTracker(m)
self.assertFalse(pm.done)
del m
self.assertTrue(pm.done)
def test_multi_tracker(self):
m = zmq.Message('asdf')
m2 = zmq.Message('whoda')
mt = zmq.MessageTracker(m,m2)
self.assertFalse(m.done)
self.assertFalse(mt.done)
self.assertRaises(zmq.NotDone, mt.wait, 0.1)
del m
time.sleep(0.1)
self.assertRaises(zmq.NotDone, mt.wait, 0.1)
self.assertFalse(mt.done)
del m2
self.assertTrue(mt.wait() is None)
self.assertTrue(mt.done)
def test_buffer_in(self):
"""test using a buffer as input"""
ins = unicode("§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√",encoding='utf16')
m = zmq.Message(buffer(ins))
def test_bad_buffer_in(self):
"""test using a bad object"""
self.assertRaises(TypeError, zmq.Message, 5)
self.assertRaises(TypeError, zmq.Message, object())
def test_buffer_out(self):
"""receiving buffered output"""
ins = unicode("§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√",encoding='utf8')
m = zmq.Message(ins.encode('utf8'))
outb = m.buffer
self.assertTrue(isinstance(outb, buffer))
self.assert_(outb is m.buffer)
self.assert_(m.buffer is m.buffer)
def test_multisend(self):
"""ensure that a message remains intact after multiple sends"""
a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
s = "message"
m = zmq.Message(s)
self.assertEquals(s, m.bytes)
a.send(m, copy=False)
time.sleep(0.1)
self.assertEquals(s, m.bytes)
a.send(m, copy=False)
time.sleep(0.1)
self.assertEquals(s, m.bytes)
a.send(m, copy=True)
time.sleep(0.1)
self.assertEquals(s, m.bytes)
a.send(m, copy=True)
time.sleep(0.1)
self.assertEquals(s, m.bytes)
for i in range(4):
r = b.recv()
self.assertEquals(s,r)
self.assertEquals(s, m.bytes)
def test_buffer_numpy(self):
"""test non-copying numpy array messages"""
try:
import numpy
except ImportError:
return
shapes = map(numpy.random.randint, [2]*5,[16]*5)
for i in range(1,len(shapes)+1):
shape = shapes[:i]
A = numpy.random.random(shape)
m = zmq.Message(A)
self.assertEquals(A.data, m.buffer)
B = numpy.frombuffer(m.buffer,dtype=A.dtype).reshape(A.shape)
self.assertEquals((A==B).all(), True)
| takluyver/pyzmq | zmq/tests/test_message.py | Python | lgpl-3.0 | 7,325 | [
"Brian"
] | 828dccff6be56f7509a784615f18e7cbc6fcaa5853b2d14443a00f601dbfad29 |
# This plugin is adapted from the Python Console plugin and the IPython
# cookbook at:
# http://ipython.scipy.org/moin/Cookbook/EmbeddingInGTK
# Copyright (C) 2009-2010 Brian Parma
# Updated 2012 Brian Parma
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GLib
import ipconsoleprefs
from xl import settings, providers
from xlgui.widgets import menu
try: # xl doesn't exist outside of exaile
from xl.nls import gettext as _
from xl import event
except ImportError:
from gettext import gettext as _
print('Running outside of Exaile...')
import ipython_view as ip
from gi.repository import Pango
import __builtin__, site
FONT = "Luxi Mono 10"
PLUGIN = None
def get_preferences_pane():
return ipconsoleprefs
class Quitter(object):
"""Simple class to handle exit, similar to Python 2.5's.
This Quitter is used to circumvent IPython's circumvention
of the builtin Quitter, since it prevents exaile form closing."""
def __init__(self,exit,name):
self.exit = exit
self.name = name
def __repr__(self):
return 'Type %s() to exit.' % self.name
__str__ = __repr__
def __call__(self):
self.exit() # Passed in exit function
site.setquit() # Restore default builtins
exit() # Call builtin
class IPView(ip.IPythonView):
'''Extend IPythonView to support closing with Ctrl+D'''
def onKeyPressExtend(self, event):
if ip.IPythonView.onKeyPressExtend(self, event):
return True
if event.string == '\x04':
# ctrl+d
self.destroy()
class IPyConsole(Gtk.Window):
"""
A gtk Window with an embedded IPython Console.
"""
def __init__(self, namespace):
Gtk.Window.__init__(self)
self.set_title(_("IPython Console - Exaile"))
self.set_size_request(750,550)
self.set_resizable(True)
sw = Gtk.ScrolledWindow()
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
ipv = IPView()
ipv.connect('destroy', lambda *x: self.destroy())
# so it's exposed in the shell
self.ipv = ipv
# change display to emulate dark gnome-terminal
console_font = settings.get_option('plugin/ipconsole/font', FONT)
text_color = settings.get_option('plugin/ipconsole/text_color',
'lavender')
bg_color = settings.get_option('plugin/ipconsole/background_color',
'black')
iptheme = settings.get_option('plugin/ipconsole/iptheme', 'Linux')
ipv.modify_font(Pango.FontDescription(console_font))
ipv.set_wrap_mode(Gtk.WrapMode.CHAR)
ipv.modify_base(Gtk.StateType.NORMAL, Gdk.color_parse(bg_color))
ipv.modify_text(Gtk.StateType.NORMAL, Gdk.color_parse(text_color))
if hasattr(ipv.IP, 'magic_colors'):
ipv.IP.magic_colors(iptheme) # IPython color scheme
opacity = settings.get_option('plugin/ipconsole/opacity', 80.0)
# add a little transparency :)
if opacity < 100: self.set_opacity(float(opacity) / 100.0)
ipv.updateNamespace(namespace) # expose exaile (passed in)
ipv.updateNamespace({'self':self}) # Expose self to IPython
# prevent exit and quit - freezes window? does bad things
ipv.updateNamespace({'exit':None,
'quit':None})
# This is so when exaile calls exit(), IP doesn't prompt and prevent
# it from closing
try:
__builtin__.exit = Quitter(ipv.IP.magic_Exit, 'exit')
__builtin__.quit = Quitter(ipv.IP.magic_Exit, 'quit')
except AttributeError: # newer versions of IP don't need this
pass
ipv.show()
# make it scrollable
sw.add(ipv)
sw.show()
self.add(sw)
self.show()
# don't destroy the window on delete, hide it
self.connect('delete_event',lambda x,y:False)
def _enable(exaile):
"""
Enable plugin.
Create menu item.
"""
# add menuitem to tools menu
item = menu.simple_menu_item('ipconsole', ['plugin-sep'], _('Show _IPython Console'),
callback=lambda *x: show_console(exaile))
providers.register('menubar-tools-menu', item)
if settings.get_option('plugin/ipconsole/autostart', False):
show_console(exaile)
def on_option_set(event, settings, option):
if option == 'plugin/ipconsole/opacity' and PLUGIN:
value = settings.get_option(option, 80.0)
value = float(value) / 100.0
PLUGIN.set_opacity(value)
if option == 'plugin/ipconsole/font' and PLUGIN:
value = settings.get_option(option, FONT)
PLUGIN.ipv.modify_font(Pango.FontDescription(value))
if option == 'plugin/ipconsole/text_color' and PLUGIN:
value = settings.get_option(option, 'lavender')
PLUGIN.ipv.modify_text(Gtk.StateType.NORMAL, Gdk.color_parse(value))
if option == 'plugin/ipconsole/background_color' and PLUGIN:
value = settings.get_option(option, 'black')
PLUGIN.ipv.modify_base(Gtk.StateType.NORMAL, Gdk.color_parse(value))
if option == 'plugin/ipconsole/iptheme' and PLUGIN:
value = settings.get_option(option, 'Linux')
PLUGIN.ipv.IP.magic_colors(value)
def __enb(evt, exaile, nothing):
GLib.idle_add(_enable, exaile)
event.add_ui_callback(on_option_set, 'plugin_ipconsole_option_set')
def enable(exaile):
"""
Called when plugin is enabled, or when exaile is loaded with the plugin
on by default.
Wait for exaile to fully load, then call _enable with idle priority.
"""
if exaile.loading:
event.add_callback(__enb, "gui_loaded")
else:
__enb(None, exaile, None)
def disable(exaile):
"""
Called when the plugin is disabled
"""
for item in providers.get('menubar-tools-menu'):
if item.name == 'ipconsole':
providers.unregister('menubar-tools-menu', item)
break
# if window is open, kill it
if PLUGIN is not None:
PLUGIN.destroy()
def show_console(exaile):
"""
Display window when the menu item is clicked.
"""
global PLUGIN
if PLUGIN is None:
import xl, xlgui
PLUGIN = IPyConsole({'exaile': exaile,
'xl': xl,
'xlgui': xlgui})
PLUGIN.connect('destroy', console_destroyed)
PLUGIN.present()
def console_destroyed(*args):
"""
Called when the window is closed.
"""
global PLUGIN
PLUGIN = None
if __name__ == '__main__':
"""
If run outside of exaile.
"""
con = IPyConsole({})
con.connect('destroy', Gtk.main_quit)
con.show()
Gtk.main()
| Zarokka/exaile | plugins/ipconsole/__init__.py | Python | gpl-2.0 | 7,800 | [
"Brian"
] | 63eb632f84c266e981cf1668eb0bbfc7a6d189b370653f158607991b5b659117 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in control_flow_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.control_flow_ops import *
from tensorflow.python.ops.gen_control_flow_ops import *
# pylint: enable=wildcard-import
def _SwitchGrad(op, *grad):
"""Gradients for a Switch op is calculated using a Merge op.
If the switch is a loop switch, it will be visited twice. We create
the merge on the first visit, and update the other input of the merge
on the second visit. A next_iteration is also added on second visit.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
merge_op = grad_ctxt.grad_state.switch_map.get(op)
if merge_op:
# This is the second time this Switch is visited. It comes from
# the non-exit branch of the Switch, so update the second input
# to the Merge.
# TODO: Perform shape inference with this new input.
# pylint: disable=protected-access
merge_op._update_input(1, control_flow_ops._NextIteration(grad[1]))
# pylint: enable=protected-access
return None, None
else:
# This is the first time this Switch is visited. It always comes
# from the Exit branch, which is grad[0]. grad[1] is empty at this point.
# Use grad[0] for both inputs to merge for now, but update the second
# input of merge when we see this Switch the second time.
merge_fn = control_flow_ops._Merge # pylint: disable=protected-access
merge_op = merge_fn([grad[0], grad[0]], name="b_switch")[0]
grad_ctxt.grad_state.switch_map[op] = merge_op.op
return merge_op, None
elif isinstance(op_ctxt, CondContext):
good_grad = grad[op_ctxt.branch]
zero_grad = grad[1 - op_ctxt.branch]
# At this point, we have created zero_grad guarded by the right switch.
return merge([good_grad, zero_grad], name="cond_grad")[0], None
else:
false_grad = switch(grad[0], op.inputs[1])[0]
true_grad = switch(grad[1], op.inputs[1])[1]
return merge([false_grad, true_grad])[0], None
ops.RegisterGradient("Switch")(_SwitchGrad)
ops.RegisterGradient("RefSwitch")(_SwitchGrad)
@ops.RegisterGradient("Merge")
def _MergeGrad(op, grad, _):
"""Gradients for a Merge op are calculated using a Switch op."""
input_op = op.inputs[0].op
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = input_op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot)
# pylint: enable=protected-access
elif isinstance(op_ctxt, CondContext):
pred = op_ctxt.pred
if grad_ctxt and grad_ctxt.grad_state:
# This Merge node is part of a cond within a loop.
# The backprop needs to have the value of this predicate for every
# iteration. So we must have its values accumulated in the forward, and
# use the accumulated values as the predicate for this backprop switch.
grad_state = grad_ctxt.grad_state
real_pred = grad_state.history_map.get(pred.name)
if real_pred is None:
# Remember the value of pred for every iteration.
grad_ctxt = grad_state.grad_context
grad_ctxt.Exit()
history_pred = grad_state.AddForwardAccumulator(pred)
grad_ctxt.Enter()
# Add the stack pop op. If pred.op is in a (outer) CondContext,
# the stack pop will be guarded with a switch.
real_pred = grad_state.AddBackPropAccumulatedValue(history_pred, pred)
grad_state.history_map[pred.name] = real_pred
pred = real_pred
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, pred, name="cond_grad")
# pylint: enable=protected-access
else:
num_inputs = len(op.inputs)
cond = [math_ops.equal(op.outputs[1], i) for i in xrange(num_inputs)]
# pylint: disable=protected-access
return [control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1]
for i in xrange(num_inputs)]
# pylint: enable=protected-access
@ops.RegisterGradient("RefMerge")
def _RefMergeGrad(op, grad, _):
return _MergeGrad(op, grad, _)
@ops.RegisterGradient("Exit")
def _ExitGrad(_, grad):
"""Gradients for an exit op are calculated using an Enter op."""
graph = ops.get_default_graph()
# pylint: disable=protected-access
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if not grad_ctxt.back_prop:
# The flag `back_prop` is set by users to suppress gradient
# computation for this loop. If the attribute `back_prop` is false,
# no gradient computation.
return None
grad_ctxt.AddName(grad.name)
enter_fn = control_flow_ops._Enter # pylint: disable=protected-access
grad_ctxt.Enter()
result = enter_fn(grad, grad_ctxt.name, is_constant=False,
parallel_iterations=grad_ctxt.parallel_iterations,
name="b_exit")
grad_ctxt.Exit()
return result
ops.RegisterGradient("RefExit")(_ExitGrad)
@ops.RegisterGradient("NextIteration")
def _NextIterationGrad(_, grad):
"""A forward next_iteration is translated into a backprop identity.
Note that the backprop next_iteration is added in switch grad.
"""
return grad
@ops.RegisterGradient("RefNextIteration")
def _RefNextIterationGrad(_, grad):
return _NextIterationGrad(_, grad)
@ops.RegisterGradient("Enter")
def _EnterGrad(op, grad):
"""Gradients for an Enter are calculated using an Exit op.
For loop variables, grad is the gradient so just add an exit.
For loop invariants, we need to add an accumulator loop.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if not grad_ctxt.back_prop:
# If the attribute `back_prop` is true, no gradient computation.
return grad
if op.get_attr("is_constant"):
# Add a gradient accumulator for each loop invariant.
result = grad_ctxt.AddBackPropAccumulator(grad)
else:
result = exit(grad)
grad_ctxt.ExitResult([result])
return result
@ops.RegisterGradient("RefEnter")
def _RefEnterGrad(op, grad):
return _EnterGrad(op, grad)
@ops.RegisterGradient("LoopCond")
def _LoopCondGrad(_):
"""Stop backprop for the predicate of a while loop."""
return None
| awni/tensorflow | tensorflow/python/ops/control_flow_grad.py | Python | apache-2.0 | 7,614 | [
"VisIt"
] | b65a8ed753f0cbb03a9fffa453d3423c8412c7d8eb2913cca75c52f63b583f57 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
%prog blastfile --qsizes query.sizes --ssizes subject.sizes
Visualize the blastfile in a dotplot. At least one of --qsizes and --qbed must
be specified, also at least one of --ssizes and --sbed. The --sizes options help
to define the molecule border as well as the drawing order. The --bed options
help to position names maker (e.g. genes) onto the dot plot. So depending on
whether you are BLASTing raw sequences or makers, you need to place --sizes or
--bed options.
"""
import os.path as op
import sys
import logging
import numpy as np
from random import sample
from jcvi.formats.blast import BlastLine
from jcvi.formats.sizes import Sizes
from jcvi.formats.bed import Bed, BedLine
from jcvi.apps.base import OptionParser
from jcvi.graphics.base import plt, Rectangle, set_human_base_axis, savefig
DotStyles = ("line", "circle", "dot")
def rename_seqid(seqid):
seqid = seqid.split("_")[-1]
seqid = seqid.replace("contig", "c").replace("scaffold", "s")
seqid = seqid.replace("supercont", "s")
try:
seqid = int(seqid)
seqid = "c%d" % seqid
except:
pass
return seqid
def blastplot(ax, blastfile, qsizes, ssizes, qbed, sbed,
style="dot", proportional=False, sampleN=None,
baseticks=False, insetLabels=False, stripNames=False,
highlights=None):
assert style in DotStyles
fp = open(blastfile)
qorder = qbed.order if qbed else None
sorder = sbed.order if sbed else None
data = []
for row in fp:
b = BlastLine(row)
query, subject = b.query, b.subject
if stripNames:
query = query.rsplit(".", 1)[0]
subject = subject.rsplit(".", 1)[0]
if qorder:
if query not in qorder:
continue
qi, q = qorder[query]
query = q.seqid
qstart, qend = q.start, q.end
else:
qstart, qend = b.qstart, b.qstop
if sorder:
if subject not in sorder:
continue
si, s = sorder[subject]
subject = s.seqid
sstart, send = s.start, s.end
else:
sstart, send = b.sstart, b.sstop
qi = qsizes.get_position(query, qstart)
qj = qsizes.get_position(query, qend)
si = ssizes.get_position(subject, sstart)
sj = ssizes.get_position(subject, send)
if None in (qi, si):
continue
data.append(((qi, qj), (si, sj)))
if sampleN:
if len(data) > sampleN:
data = sample(data, sampleN)
if not data:
return logging.error("no blast data imported")
xsize, ysize = qsizes.totalsize, ssizes.totalsize
logging.debug("xsize=%d ysize=%d" % (xsize, ysize))
if style == "line":
for a, b in data:
ax.plot(a, b, 'ro-', mfc="w", mec="r", ms=3)
else:
data = [(x[0], y[0]) for x, y in data]
x, y = zip(*data)
if style == "circle":
ax.plot(x, y, 'mo', mfc="w", mec="m", ms=3)
elif style == "dot":
ax.scatter(x, y, s=3, lw=0)
xlim = (0, xsize)
ylim = (ysize, 0) # invert the y-axis
xchr_labels, ychr_labels = [], []
ignore = True # tag to mark whether to plot chr name (skip small ones)
#ignore_size_x = xsize * .02
#ignore_size_y = ysize * .02
ignore_size_x = ignore_size_y = 0
# plot the chromosome breaks
logging.debug("xbreaks={0} ybreaks={1}".format(len(qsizes), len(ssizes)))
for (seqid, beg, end) in qsizes.get_breaks():
ignore = abs(end - beg) < ignore_size_x
if ignore:
continue
seqid = rename_seqid(seqid)
xchr_labels.append((seqid, (beg + end) / 2, ignore))
ax.plot([end, end], ylim, "-", lw=1, color="grey")
for (seqid, beg, end) in ssizes.get_breaks():
ignore = abs(end - beg) < ignore_size_y
if ignore:
continue
seqid = rename_seqid(seqid)
ychr_labels.append((seqid, (beg + end) / 2, ignore))
ax.plot(xlim, [end, end], "-", lw=1, color="grey")
# plot the chromosome labels
for label, pos, ignore in xchr_labels:
if not ignore:
if insetLabels:
ax.text(pos, 0, label, size=8, \
ha="center", va="top", color="grey")
else:
pos = .1 + pos * .8 / xsize
root.text(pos, .91, label, size=10,
ha="center", va="bottom", rotation=45, color="grey")
# remember y labels are inverted
for label, pos, ignore in ychr_labels:
if not ignore:
if insetLabels:
continue
pos = .9 - pos * .8 / ysize
root.text(.91, pos, label, size=10,
va="center", color="grey")
# Highlight regions based on a list of BedLine
qhighlights = shighlights = None
if highlights:
if isinstance(highlights[0], BedLine):
shighlights = highlights
elif len(highlights) == 2:
qhighlights, shighlights = highlights
if qhighlights:
for hl in qhighlights:
hls = qsizes.get_position(hl.seqid, hl.start)
ax.add_patch(Rectangle((hls, 0), hl.span, ysize,\
fc="r", alpha=.2, lw=0))
if shighlights:
for hl in shighlights:
hls = ssizes.get_position(hl.seqid, hl.start)
ax.add_patch(Rectangle((0, hls), xsize, hl.span, \
fc="r", alpha=.2, lw=0))
if baseticks:
def increaseDensity(a, ratio=4):
assert len(a) > 1
stepsize = a[1] - a[0]
newstepsize = int(stepsize / ratio)
return np.arange(0, a[-1], newstepsize)
# Increase the density of the ticks
xticks = ax.get_xticks()
yticks = ax.get_yticks()
xticks = increaseDensity(xticks, ratio=2)
yticks = increaseDensity(yticks, ratio=2)
ax.set_xticks(xticks)
#ax.set_yticks(yticks)
# Plot outward ticklines
for pos in xticks[1:]:
if pos > xsize:
continue
pos = .1 + pos * .8 / xsize
root.plot((pos, pos), (.08, .1), '-', color="grey", lw=2)
for pos in yticks[1:]:
if pos > ysize:
continue
pos = .9 - pos * .8 / ysize
root.plot((.09, .1), (pos, pos), '-', color="grey", lw=2)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# beautify the numeric axis
for tick in ax.get_xticklines() + ax.get_yticklines():
tick.set_visible(False)
set_human_base_axis(ax)
plt.setp(ax.get_xticklabels() + ax.get_yticklabels(),
color='gray', size=10)
plt.setp(ax.get_yticklabels(), rotation=90)
if __name__ == "__main__":
from jcvi.formats.bed import sizes
p = OptionParser(__doc__)
p.add_option("--qsizes", help="Path to two column qsizes file")
p.add_option("--ssizes", help="Path to two column ssizes file")
p.add_option("--qbed", help="Path to qbed")
p.add_option("--sbed", help="Path to sbed")
p.add_option("--qselect", default=0, type="int",
help="Minimum size of query contigs to select [default: %default]")
p.add_option("--sselect", default=0, type="int",
help="Minimum size of subject contigs to select [default: %default]")
p.add_option("--qh", help="Path to highlight bed for query")
p.add_option("--sh", help="Path to highlight bed for subject")
p.add_option("--dotstyle", default="dot", choices=DotStyles,
help="Style of the dots [default: %default]")
p.add_option("--proportional", default=False, action="store_true",
help="Make image width:height equal to seq ratio [default: %default]")
p.add_option("--stripNames", default=False, action="store_true",
help="Remove trailing .? from gene names [default: %default]")
p.add_option("--nmax", default=None, type="int",
help="Only plot maximum of N dots [default: %default]")
opts, args, iopts = p.set_image_options(figsize="8x8", style="dark", dpi=150)
qsizes, ssizes = opts.qsizes, opts.ssizes
qbed, sbed = opts.qbed, opts.sbed
proportional = opts.proportional
if len(args) != 1:
sys.exit(not p.print_help())
if qbed:
qsizes = qsizes or sizes([qbed])
qbed = Bed(qbed)
if sbed:
ssizes = ssizes or sizes([sbed])
sbed = Bed(sbed)
assert qsizes and ssizes, \
"You must specify at least one of --sizes of --bed"
qsizes = Sizes(qsizes, select=opts.qselect)
ssizes = Sizes(ssizes, select=opts.sselect)
blastfile, = args
image_name = op.splitext(blastfile)[0] + "." + opts.format
plt.rcParams["xtick.major.pad"] = 16
plt.rcParams["ytick.major.pad"] = 16
# Fix the width
xsize, ysize = qsizes.totalsize, ssizes.totalsize
# get highlight beds
qh, sh = opts.qh, opts.sh
qh = Bed(qh) if qh else None
sh = Bed(sh) if sh else None
highlights = (qh, sh) if qh or sh else None
ratio = ysize * 1. / xsize if proportional else 1
width = iopts.w
height = iopts.h * ratio
fig = plt.figure(1, (width, height))
root = fig.add_axes([0, 0, 1, 1]) # the whole canvas
ax = fig.add_axes([.1, .1, .8, .8]) # the dot plot
blastplot(ax, blastfile, qsizes, ssizes, qbed, sbed,
style=opts.dotstyle, proportional=proportional, sampleN=opts.nmax,
baseticks=True, stripNames=opts.stripNames, highlights=highlights)
# add genome names
to_ax_label = lambda fname: op.basename(fname).split(".")[0]
gx, gy = [to_ax_label(x.filename) for x in (qsizes, ssizes)]
ax.set_xlabel(gx, size=16)
ax.set_ylabel(gy, size=16)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
| sgordon007/jcvi_062915 | graphics/blastplot.py | Python | bsd-2-clause | 9,940 | [
"BLAST"
] | a1fb423e10e498437e4b8293ffa5f00c858c0b3416ab4ca4f09bced528fef180 |
import io
import os
import re
from setuptools import setup
def read(path, encoding='utf-8'):
path = os.path.join(os.path.dirname(__file__), path)
with io.open(path, encoding=encoding) as fp:
return fp.read()
def version(path):
"""Obtain the packge version from a python file e.g. pkg/__init__.py
See <https://packaging.python.org/en/latest/single_source_version.html>.
"""
version_file = read(path)
version_match = re.search(r"""^__version__ = ['"]([^'"]*)['"]""",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
DESCRIPTION = "Flexible and scikit-learn compatible Mapper implementation"
LONG_DESCRIPTION = """
cartographer: Flexible and scikit-learn compatible Mapper implementation
======================================================
This package implements a scikit-learn API compatible implementation of
the Mapper algorithm.
For more information, visit http://github.com/pablodecm/cartographer
"""
NAME = "cartographer"
AUTHOR = "Pablo de Castro"
AUTHOR_EMAIL = "pablodecm@gmail.com"
MAINTAINER = "Pablo de Castro"
MAINTAINER_EMAIL = "pablodecm@gmail.com"
URL = 'http://github.com/pablodecm/cartographer'
DOWNLOAD_URL = 'http://github.com/pablodecm/cartographer'
LICENSE = 'MIT'
VERSION = version('cartographer/__init__.py')
INSTALL_REQUIRES = ["numpy",
"scipy",
"scikit-learn"]
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=['cartographer',
'cartographer.tests',
],
package_data={'cartographer': ['graph_template.html']},
install_requires = INSTALL_REQUIRES,
classifiers=[
'Environment :: Console',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5']
)
| pablodecm/cartographer | setup.py | Python | mit | 2,305 | [
"VisIt"
] | ba1ecb2b2bac36de3514f5edfc28fa063e28bac6e9f935a875ea469ebfad2f9c |
from sympy.physics.matrices import msigma, mgamma, minkowski_tensor, pat_matrix, mdft
from sympy import zeros, eye, I, Matrix, sqrt, Rational
def test_parallel_axis_theorem():
# This tests the parallel axis theorem matrix by comparing to test
# matrices.
# First case, 1 in all directions.
mat1 = Matrix(((2, -1, -1), (-1, 2, -1), (-1, -1, 2)))
assert pat_matrix(1, 1, 1, 1) == mat1
assert pat_matrix(2, 1, 1, 1) == 2*mat1
# Second case, 1 in x, 0 in all others
mat2 = Matrix(((0, 0, 0), (0, 1, 0), (0, 0, 1)))
assert pat_matrix(1, 1, 0, 0) == mat2
assert pat_matrix(2, 1, 0, 0) == 2*mat2
# Third case, 1 in y, 0 in all others
mat3 = Matrix(((1, 0, 0), (0, 0, 0), (0, 0, 1)))
assert pat_matrix(1, 0, 1, 0) == mat3
assert pat_matrix(2, 0, 1, 0) == 2*mat3
# Fourth case, 1 in z, 0 in all others
mat4 = Matrix(((1, 0, 0), (0, 1, 0), (0, 0, 0)))
assert pat_matrix(1, 0, 0, 1) == mat4
assert pat_matrix(2, 0, 0, 1) == 2*mat4
def test_Pauli():
#this and the following test are testing both Pauli and Dirac matrices
#and also that the general Matrix class works correctly in a real world
#situation
sigma1 = msigma(1)
sigma2 = msigma(2)
sigma3 = msigma(3)
assert sigma1 == sigma1
assert sigma1 != sigma2
# sigma*I -> I*sigma (see #354)
assert sigma1*sigma2 == sigma3*I
assert sigma3*sigma1 == sigma2*I
assert sigma2*sigma3 == sigma1*I
assert sigma1*sigma1 == eye(2)
assert sigma2*sigma2 == eye(2)
assert sigma3*sigma3 == eye(2)
assert sigma1*2*sigma1 == 2*eye(2)
assert sigma1*sigma3*sigma1 == -sigma3
def test_Dirac():
gamma0 = mgamma(0)
gamma1 = mgamma(1)
gamma2 = mgamma(2)
gamma3 = mgamma(3)
gamma5 = mgamma(5)
# gamma*I -> I*gamma (see #354)
assert gamma5 == gamma0 * gamma1 * gamma2 * gamma3 * I
assert gamma1 * gamma2 + gamma2 * gamma1 == zeros(4)
assert gamma0 * gamma0 == eye(4) * minkowski_tensor[0, 0]
assert gamma2 * gamma2 != eye(4) * minkowski_tensor[0, 0]
assert gamma2 * gamma2 == eye(4) * minkowski_tensor[2, 2]
assert mgamma(5, True) == \
mgamma(0, True)*mgamma(1, True)*mgamma(2, True)*mgamma(3, True)*I
def test_mdft():
assert mdft(1) == Matrix([[1]])
assert mdft(2) == 1/sqrt(2)*Matrix([[1,1],[1,-1]])
assert mdft(4) == Matrix([[Rational(1,2), Rational(1,2), Rational(1,2),\
Rational(1,2)],[Rational(1,2), -I/2, Rational(-1,2), I/2\
],[Rational(1,2), Rational(-1,2), Rational(1,2), Rational(-1,2)],\
[Rational(1,2), I/2, Rational(-1,2), -I/2]])
| wxgeo/geophar | wxgeometrie/sympy/physics/tests/test_physics_matrices.py | Python | gpl-2.0 | 2,619 | [
"DIRAC"
] | f35a037420413a10fa3cd1dcc8bf028c02e7d22c547571a4754e135ce30b3a24 |
"""
A simple VTK input file for PyQt, the qt bindings for python.
See http://www.trolltech.com for qt documentation, and
http://www.river-bank.demon.co.uk or http://www.thekompany.com
for the qt python bindings.
This class is based on the vtkGenericRenderWindowInteractor and is
therefore fairly powerful. It should also play nicely with the
vtk3DWidget code.
Created by Prabhu Ramachandran, May 2002
Based on David Gobbi's QVTKRenderWidget.py
Changes by Gerard Vermeulen Feb. 2003
Win32 support.
Changes by Gerard Vermeulen, May 2003
Bug fixes and better integration with the Qt framework.
"""
"""
This class works with the UNIX and Win32 versions of Qt.
Depending on the OpenGL graphics drivers, it may not
be possible to have more than one QVTKRenderWidget
per application.
In short, this class is experimental.
"""
# To do for Win32:
# 1. More testing to assure that the widget is always cleaned up
# properly and does not crash the application.
import qt
import vtk
class QVTKRenderWindowInteractor(qt.QWidget):
""" A QVTKRenderWindowInteractor for Python and Qt. Uses a
vtkGenericRenderWindowInteractor to handle the interactions. Use
GetRenderWindow() to get the vtkRenderWindow. Create with the
keyword stereo=1 in order to generate a stereo-capable window.
The user interface is summarized in vtkInteractorStyle.h:
- Keypress j / Keypress t: toggle between joystick (position
sensitive) and trackball (motion sensitive) styles. In joystick
style, motion occurs continuously as long as a mouse button is
pressed. In trackball style, motion occurs when the mouse button
is pressed and the mouse pointer moves.
- Keypress c / Keypress o: toggle between camera and object
(actor) modes. In camera mode, mouse events affect the camera
position and focal point. In object mode, mouse events affect
the actor that is under the mouse pointer.
- Button 1: rotate the camera around its focal point (if camera
mode) or rotate the actor around its origin (if actor mode). The
rotation is in the direction defined from the center of the
renderer's viewport towards the mouse position. In joystick mode,
the magnitude of the rotation is determined by the distance the
mouse is from the center of the render window.
- Button 2: pan the camera (if camera mode) or translate the actor
(if object mode). In joystick mode, the direction of pan or
translation is from the center of the viewport towards the mouse
position. In trackball mode, the direction of motion is the
direction the mouse moves. (Note: with 2-button mice, pan is
defined as <Shift>-Button 1.)
- Button 3: zoom the camera (if camera mode) or scale the actor
(if object mode). Zoom in/increase scale if the mouse position is
in the top half of the viewport; zoom out/decrease scale if the
mouse position is in the bottom half. In joystick mode, the amount
of zoom is controlled by the distance of the mouse pointer from
the horizontal centerline of the window.
- Keypress 3: toggle the render window into and out of stereo
mode. By default, red-blue stereo pairs are created. Some systems
support Crystal Eyes LCD stereo glasses; you have to invoke
SetStereoTypeToCrystalEyes() on the rendering window. Note: to
use stereo you also need to pass a stereo=1 keyword argument to
the constructor.
- Keypress e: exit the application.
- Keypress f: fly to the picked point
- Keypress p: perform a pick operation. The render window interactor
has an internal instance of vtkCellPicker that it uses to pick.
- Keypress r: reset the camera view along the current view
direction. Centers the actors and moves the camera so that all actors
are visible.
- Keypress s: modify the representation of all actors so that they
are surfaces.
- Keypress u: invoke the user-defined function. Typically, this
keypress will bring up an interactor that you can type commands in.
- Keypress w: modify the representation of all actors so that they
are wireframe.
"""
def __init__(self, parent=None, name=None, *args, **kw):
# the current button
self._ActiveButton = 0
# private attributes
self.__oldFocus = None
self.__saveX = 0
self.__saveY = 0
self.__saveState = 0
self.__connected = 0 # is QT->VTK connection done?
# do special handling of some keywords:
# stereo, rw
stereo = 0
if kw.has_key('stereo'):
if kw['stereo']:
stereo = 1
del kw['stereo']
rw = None
if kw.has_key('rw'):
rw = kw['rw']
del kw['rw']
# create qt-level widget
# You cannot pass kw anymore, you'll a TypeError: keyword arguments are not supported
# http://goldenspud.com/webrog/archives/2004/07/20/pyqt-platform-inconsistencies/
apply(qt.QWidget.__init__, (self,parent,name) + args)
if rw: # user-supplied render window
self._RenderWindow = rw
else:
self._RenderWindow = vtk.vtkRenderWindow()
if stereo: # stereo mode
self._RenderWindow.StereoCapableWindowOn()
self._RenderWindow.SetStereoTypeToCrystalEyes()
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow(self._RenderWindow)
# do all the necessary qt setup
self.setBackgroundMode(qt.Qt.NoBackground)
self.setMouseTracking(1) # get all mouse events
self.setFocusPolicy(qt.QWidget.ClickFocus)
if parent == None:
self.show()
self._Timer = qt.QTimer(self, 'timer handler')
self.connect(self._Timer, qt.SIGNAL('timeout()'),
self.TimerEvent)
self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
def __getattr__(self, attr):
"""Makes the object behave like a
vtkGenericRenderWindowInteractor"""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
elif hasattr(qt.QWidget, attr):
return getattr(self.sipThis, attr)
else:
raise AttributeError, self.__class__.__name__ + \
" has no attribute named " + attr
def CreateTimer(self, obj, evt):
self._Timer.start(10)
def DestroyTimer(self, obj, evt):
self._Timer.stop()
return 1
def TimerEvent(self):
self._Iren.TimerEvent()
def polish(self):
"""Final initialization just before the widget is displayed."""
size = self.size()
self._Iren.SetSize(size.width(), size.height())
self._RenderWindow.SetWindowInfo(str(int(self.winId())))
self._Iren.ConfigureEvent()
self.__connected = 1
def show(self):
qt.QWidget.show(self)
self.update() # needed for initial contents display on Win32
def paintEvent(self,ev):
if self.__connected:
self.Render()
def resizeEvent(self,ev):
size = self.size()
self._Iren.SetSize(size.width(), size.height())
self._Iren.ConfigureEvent()
self.update()
def _GetCtrlShift(self, ev):
ctrl, shift = 0, 0
if hasattr(ev, 'state'):
if (ev.state() & 8):
shift = 1
if (ev.state() & 16):
ctrl = 1
elif self.__saveState:
if (self.__saveState & 8):
shift = 1
if (self.__saveState & 16):
ctrl = 1
return ctrl, shift
def enterEvent(self,ev):
if not self.hasFocus():
self.__oldFocus = self.focusWidget()
self.setFocus()
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.EnterEvent()
def leaveEvent(self,ev):
if (self.__saveState & 0x7) == 0 and self.__oldFocus:
self.__oldFocus.setFocus()
self.__oldFocus = None
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.LeaveEvent()
def mousePressEvent(self,ev):
ctrl, shift = self._GetCtrlShift(ev)
repeat = 0
if ev.type() == qt.QEvent.MouseButtonDblClick:
repeat = 1
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), repeat, None)
self._ActiveButton = 0
if ev.button() == 1:
self._Iren.LeftButtonPressEvent()
self._ActiveButton = 'Left'
elif ev.button() == 2:
self._Iren.RightButtonPressEvent()
self._ActiveButton = 'Right'
elif ev.button() == 4:
self._Iren.MiddleButtonPressEvent()
self._ActiveButton = 'Middle'
def mouseReleaseEvent(self,ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
if self._ActiveButton == 'Right':
self._Iren.RightButtonReleaseEvent()
elif self._ActiveButton == 'Left':
self._Iren.LeftButtonReleaseEvent()
elif self._ActiveButton == 'Middle':
self._Iren.MiddleButtonReleaseEvent()
def mouseMoveEvent(self,ev):
self.__saveState = ev.state()
self.__saveX = ev.x()
self.__saveY = ev.y()
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
self._Iren.MouseMoveEvent()
def keyPressEvent(self,ev):
ctrl, shift = self._GetCtrlShift(ev)
key = chr(0)
if ev.key() < 256:
key = str(ev.text())
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, None)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def keyReleaseEvent(self,ev):
ctrl, shift = self._GetCtrlShift(ev)
key = chr(0)
if ev.key() < 256:
key = chr(ev.key())
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, None)
self._Iren.KeyReleaseEvent()
def GetRenderWindow(self):
return self._RenderWindow
def Render(self):
self._RenderWindow.Render()
#-----------------------------------------------------------------------
def QVTKRenderWidgetConeExample():
"""A simple example that uses the QVTKRenderWindowInteractor
class. """
# every QT app needs an app
app = qt.QApplication(['QVTKRenderWindowInteractor'])
# create the widget
widget = QVTKRenderWindowInteractor()
widget.Initialize()
widget.Start()
# if you dont want the 'q' key to exit comment this.
widget.AddObserver("ExitEvent", lambda o, e, a=app: a.quit())
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the widget
widget.show()
# close the application when window is closed
app.setMainWidget(widget)
# start event processing
app.exec_loop()
if __name__ == "__main__":
QVTKRenderWidgetConeExample()
| b3c/VTK-5.8 | Wrapping/Python/vtk/qt/QVTKRenderWindowInteractor.py | Python | bsd-3-clause | 11,991 | [
"CRYSTAL",
"VTK"
] | 028ffda460924a2305cf75cf6babeafc7d6f934c074c55687e4daa51379a0795 |
#!/usr/bin/env python
import os
from log import get_logger, interlog
import uuid
import numpy as np
from generate import generate_pseudo, test_pseudo
from calc_delta import DeltaCalculation
def find_pseudo(settings):
cwd = os.getcwd()
element = settings.calc["element"]
fdf_file = os.path.join(cwd, "siesta.fdf")
# get uuid
calc_uuid = uuid.uuid4().hex[:8]
# make directories for the calculation
dirname = os.path.join(element, calc_uuid)
if not os.path.exists(dirname):
os.makedirs(dirname)
# logging
logger = get_logger('find_pseudo', element)
delta_calc = DeltaCalculation(settings, calc_uuid, logger)
logger.info("Pseudo radii: {}".format(settings.radii))
pseudo_file, err_pseudo = generate_pseudo(settings.calc, settings.electrons, settings.radii)
err_mean, err_max = test_pseudo(settings.calc, settings.configs)
message = """
Pseudo error (ground state) = {err_pseudo:.4} Ry
max mean
Pseudo error (test configs) = {err_max:6.4} {err_mean:6.4} Ry"""
logger.info(message.format(err_pseudo=err_pseudo, err_max=err_max, err_mean=err_mean))
delta_calc.add_pseudo(pseudo_file)
delta_calc.run_calcs(fdf_file)
os.chdir(cwd)
delta_calc.get_delta()
interlog(logger)
| ansobolev/PseudoGenerator | pseudogen/find_pseudo.py | Python | mit | 1,311 | [
"SIESTA"
] | 34d4a5cb0bff4374e7c1b8118f2066cb1df6da2080df0f473f83881c5243585c |
"""
Tests for geography support in PostGIS
"""
import os
from unittest import skipIf, skipUnless
from django.contrib.gis.db import models
from django.contrib.gis.db.models.functions import Area, Distance
from django.contrib.gis.measure import D
from django.db import connection
from django.db.models.functions import Cast
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from ..utils import FuncTestMixin, oracle, postgis, spatialite
from .models import City, County, Zipcode
class GeographyTest(TestCase):
fixtures = ['initial']
def test01_fixture_load(self):
"Ensure geography features loaded properly."
self.assertEqual(8, City.objects.count())
@skipIf(spatialite, "SpatiaLite doesn't support distance lookups with Distance objects.")
@skipUnlessDBFeature("supports_distances_lookups", "supports_distance_geodetic")
def test02_distance_lookup(self):
"Testing distance lookup support on non-point geography fields."
z = Zipcode.objects.get(code='77002')
cities1 = list(City.objects
.filter(point__distance_lte=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
cities2 = list(City.objects
.filter(point__dwithin=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
for cities in [cities1, cities2]:
self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities)
@skipUnless(postgis, "This is a PostGIS-specific test")
def test04_invalid_operators_functions(self):
"Ensuring exceptions are raised for operators & functions invalid on geography fields."
# Only a subset of the geometry functions & operator are available
# to PostGIS geography types. For more information, visit:
# http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions
z = Zipcode.objects.get(code='77002')
# ST_Within not available.
with self.assertRaises(ValueError):
City.objects.filter(point__within=z.poly).count()
# `@` operator not available.
with self.assertRaises(ValueError):
City.objects.filter(point__contained=z.poly).count()
# Regression test for #14060, `~=` was never really implemented for PostGIS.
htown = City.objects.get(name='Houston')
with self.assertRaises(ValueError):
City.objects.get(point__exact=htown.point)
def test05_geography_layermapping(self):
"Testing LayerMapping support on models with geography fields."
# There is a similar test in `layermap` that uses the same data set,
# but the County model here is a bit different.
from django.contrib.gis.utils import LayerMapping
# Getting the shapefile and mapping dictionary.
shp_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'data'))
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
co_mapping = {'name': 'Name',
'state': 'State',
'mpoly': 'MULTIPOLYGON',
}
# Reference county names, number of polygons, and state names.
names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
num_polys = [1, 2, 1, 19, 1] # Number of polygons for each.
st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name')
lm.save(silent=True, strict=True)
for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names):
self.assertEqual(4326, c.mpoly.srid)
self.assertEqual(num_poly, len(c.mpoly))
self.assertEqual(name, c.name)
self.assertEqual(state, c.state)
class GeographyFunctionTests(FuncTestMixin, TestCase):
fixtures = ['initial']
@skipUnlessDBFeature("supports_extent_aggr")
def test_cast_aggregate(self):
"""
Cast a geography to a geometry field for an aggregate function that
expects a geometry input.
"""
if not connection.ops.geography:
self.skipTest("This test needs geography support")
expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820)
res = City.objects.filter(
name__in=('Houston', 'Dallas')
).aggregate(extent=models.Extent(Cast('point', models.PointField())))
for val, exp in zip(res['extent'], expected):
self.assertAlmostEqual(exp, val, 4)
@skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic")
def test_distance_function(self):
"""
Testing Distance() support on non-point geography fields.
"""
if oracle:
ref_dists = [0, 4899.68, 8081.30, 9115.15]
elif spatialite:
# SpatiaLite returns non-zero distance for polygons and points
# covered by that polygon.
ref_dists = [326.61, 4899.68, 8081.30, 9115.15]
else:
ref_dists = [0, 4891.20, 8071.64, 9123.95]
htown = City.objects.get(name='Houston')
qs = Zipcode.objects.annotate(
distance=Distance('poly', htown.point),
distance2=Distance(htown.point, 'poly'),
)
for z, ref in zip(qs, ref_dists):
self.assertAlmostEqual(z.distance.m, ref, 2)
if postgis:
# PostGIS casts geography to geometry when distance2 is calculated.
ref_dists = [0, 4899.68, 8081.30, 9115.15]
for z, ref in zip(qs, ref_dists):
self.assertAlmostEqual(z.distance2.m, ref, 2)
if not spatialite:
# Distance function combined with a lookup.
hzip = Zipcode.objects.get(code='77002')
self.assertEqual(qs.get(distance__lte=0), hzip)
@skipUnlessDBFeature("has_Area_function", "supports_area_geodetic")
def test_geography_area(self):
"""
Testing that Area calculations work on geography columns.
"""
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
z = Zipcode.objects.annotate(area=Area('poly')).get(code='77002')
# Round to the nearest thousand as possible values (depending on
# the database and geolib) include 5439084, 5439100, 5439101.
rounded_value = z.area.sq_m
rounded_value -= z.area.sq_m % 1000
self.assertEqual(rounded_value, 5439000)
@skipUnlessDBFeature("has_Area_function")
@skipIfDBFeature("supports_area_geodetic")
def test_geodetic_area_raises_if_not_supported(self):
with self.assertRaisesMessage(NotImplementedError, 'Area on geodetic coordinate systems not supported.'):
Zipcode.objects.annotate(area=Area('poly')).get(code='77002')
| edmorley/django | tests/gis_tests/geogapp/tests.py | Python | bsd-3-clause | 6,982 | [
"VisIt"
] | d7830e05b00e273e77d5c2e995af3ead6247f349a554543a0158c73a6468cf42 |
## polymer section
# check connectivity of atoms of a polymer
# sometimes, "a broekn polymer" may happen due to periodic boundary condition
# input:
# mda_system mda.Universe(structure,input)
# select selection command
# nmol number of polymer molecules
# cutoff cut-off distance to check connectivity
# mode check only the first frame unless 'all'
def check_traj_connectivity(mda_system,select,nmol,cutoff,mode):
print("polymer.check_traj_connectivity:")
import MDAnalysis as mda
import random
from scipy.spatial.distance import euclidean
u = mda_system
n_frames = len(u.trajectory)
select_mol = u.select_atoms(select)
#print(len(select_mol),nmol)
if len(select_mol)%nmol != 0:
raise ValueError(" wrong # molecules, (args.nmol, select_mol) {} {} ".format(nmol, len(select_mol)))
n_deg = int(len(select_mol)/nmol)
print(" assume all molecules has {} atoms".format(n_deg))
dist_min = 100.0
dist_max = 0
# read trajectory
if 'all' in mode:
set_ts = u.trajectory
print(" active ALL mode")
elif 'random' in mode:
pick_frame = random.randrange(0,n_frames)
set_tx = u.trajectory[pick_frame]
print(" active RANDOM mode")
else:
set_tx = u.trajectory[0]
print(" active FIRST mode")
# check
i_frame = 0
for ts in set_tx:
for i_mol in range(nmol):
# check the validity of atomic positions
for i_atom in range(n_deg-1):
inum = i_mol*n_deg+i_atom
dist = euclidean(select_mol.positions[inum], select_mol.positions[inum+1])
if dist > cutoff:
print(" maybe due to the wrapped trajectory setting or too small cutoff.")
print(" please check wrapping option like gmx trjconv -pbc mol for Gromacs trajectory.")
raise RuntimeError("[{}th frame] {}th polymer ({}th atom) dist. = {} > cutoff {}".format(i_frame,i_mol,i_atom, dist, cutoff))
if dist > dist_max:
dist_max = dist
if dist < dist_min:
dist_min = dist
i_frame = i_frame + 1
print(" passed! with distance [{:.3f},{:.3f}] under {:.3f} cut-off".format(dist_min,dist_max,cutoff))
return
| jht0664/Utility_python_gromacs | python/hjung/polymer.py | Python | mit | 2,031 | [
"Gromacs",
"MDAnalysis"
] | 2b8e3dbe64ad874334fa03d9a10f22f5a7807043c14e026e5a3be8ffe6b4ec6e |
"""
Tests for the Piwik template tags and filters.
"""
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.template import Context
from django.test.utils import override_settings
from analytical.templatetags.piwik import PiwikNode
from analytical.tests.utils import TagTestCase
from analytical.utils import AnalyticalException
@override_settings(PIWIK_DOMAIN_PATH='example.com', PIWIK_SITE_ID='345')
class PiwikTagTestCase(TagTestCase):
"""
Tests for the ``piwik`` template tag.
"""
def test_tag(self):
r = self.render_tag('piwik', 'piwik')
self.assertTrue(' ? "https" : "http") + "://example.com/";' in r, r)
self.assertTrue("_paq.push(['setSiteId', 345]);" in r, r)
self.assertTrue('img src="http://example.com/piwik.php?idsite=345"'
in r, r)
def test_node(self):
r = PiwikNode().render(Context({}))
self.assertTrue(' ? "https" : "http") + "://example.com/";' in r, r)
self.assertTrue("_paq.push(['setSiteId', 345]);" in r, r)
self.assertTrue('img src="http://example.com/piwik.php?idsite=345"'
in r, r)
@override_settings(PIWIK_DOMAIN_PATH='example.com/piwik',
PIWIK_SITE_ID='345')
def test_domain_path_valid(self):
r = self.render_tag('piwik', 'piwik')
self.assertTrue(' ? "https" : "http") + "://example.com/piwik/";' in r,
r)
@override_settings(PIWIK_DOMAIN_PATH=None)
def test_no_domain(self):
self.assertRaises(AnalyticalException, PiwikNode)
@override_settings(PIWIK_SITE_ID=None)
def test_no_siteid(self):
self.assertRaises(AnalyticalException, PiwikNode)
@override_settings(PIWIK_SITE_ID='x')
def test_siteid_not_a_number(self):
self.assertRaises(AnalyticalException, PiwikNode)
@override_settings(PIWIK_DOMAIN_PATH='http://www.example.com')
def test_domain_protocol_invalid(self):
self.assertRaises(AnalyticalException, PiwikNode)
@override_settings(PIWIK_DOMAIN_PATH='example.com/')
def test_domain_slash_invalid(self):
self.assertRaises(AnalyticalException, PiwikNode)
@override_settings(ANALYTICAL_INTERNAL_IPS=['1.1.1.1'])
def test_render_internal_ip(self):
req = HttpRequest()
req.META['REMOTE_ADDR'] = '1.1.1.1'
context = Context({'request': req})
r = PiwikNode().render(context)
self.assertTrue(r.startswith(
'<!-- Piwik disabled on internal IP address'), r)
self.assertTrue(r.endswith('-->'), r)
def test_uservars(self):
context = Context({'piwik_vars': [(1, 'foo', 'foo_val'),
(2, 'bar', 'bar_val', 'page'),
(3, 'spam', 'spam_val', 'visit')]})
r = PiwikNode().render(context)
msg = 'Incorrect Piwik custom variable rendering. Expected:\n%s\nIn:\n%s'
for var_code in ['_paq.push(["setCustomVariable", 1, "foo", "foo_val", "page"]);',
'_paq.push(["setCustomVariable", 2, "bar", "bar_val", "page"]);',
'_paq.push(["setCustomVariable", 3, "spam", "spam_val", "visit"]);']:
self.assertIn(var_code, r, msg % (var_code, r))
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_default_usertrack(self):
context = Context({
'user': User(username='BDFL', first_name='Guido', last_name='van Rossum')
})
r = PiwikNode().render(context)
msg = 'Incorrect Piwik user tracking rendering.\nNot found:\n%s\nIn:\n%s'
var_code = '_paq.push(["setUserId", "BDFL"]);'
self.assertIn(var_code, r, msg % (var_code, r))
def test_piwik_usertrack(self):
context = Context({
'piwik_identity': 'BDFL'
})
r = PiwikNode().render(context)
msg = 'Incorrect Piwik user tracking rendering.\nNot found:\n%s\nIn:\n%s'
var_code = '_paq.push(["setUserId", "BDFL"]);'
self.assertIn(var_code, r, msg % (var_code, r))
def test_analytical_usertrack(self):
context = Context({
'analytical_identity': 'BDFL'
})
r = PiwikNode().render(context)
msg = 'Incorrect Piwik user tracking rendering.\nNot found:\n%s\nIn:\n%s'
var_code = '_paq.push(["setUserId", "BDFL"]);'
self.assertIn(var_code, r, msg % (var_code, r))
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_disable_usertrack(self):
context = Context({
'user': User(username='BDFL', first_name='Guido', last_name='van Rossum'),
'piwik_identity': None
})
r = PiwikNode().render(context)
msg = 'Incorrect Piwik user tracking rendering.\nFound:\n%s\nIn:\n%s'
var_code = '_paq.push(["setUserId", "BDFL"]);'
self.assertNotIn(var_code, r, msg % (var_code, r))
| machtfit/django-analytical | analytical/tests/test_tag_piwik.py | Python | mit | 4,964 | [
"VisIt"
] | cb792dde7e0d54517d01542d8688d170d6b0a5a7d3d363eaa428f0ba14dcf2e4 |
import threading
import datetime
import sys
import os
import csv
import io
from PyQt5 import QtCore, QtGui, uic, QtWidgets
from PyQt5.QtCore import QCoreApplication as QC
from opensnitch import ui_pb2
from opensnitch.config import Config
from opensnitch.version import version
from opensnitch.nodes import Nodes
from opensnitch.dialogs.preferences import PreferencesDialog
from opensnitch.dialogs.ruleseditor import RulesEditorDialog
from opensnitch.dialogs.processdetails import ProcessDetailsDialog
from opensnitch.customwidgets.main import ColorizedDelegate, ConnectionsTableModel
from opensnitch.customwidgets.generictableview import GenericTableModel
from opensnitch.customwidgets.addresstablemodel import AddressTableModel
from opensnitch.utils import Message, QuickHelp, AsnDB
DIALOG_UI_PATH = "%s/../res/stats.ui" % os.path.dirname(sys.modules[__name__].__file__)
class StatsDialog(QtWidgets.QDialog, uic.loadUiType(DIALOG_UI_PATH)[0]):
RED = QtGui.QColor(0xff, 0x63, 0x47)
GREEN = QtGui.QColor(0x2e, 0x90, 0x59)
PURPLE = QtGui.QColor(0x7f, 0x00, 0xff)
_trigger = QtCore.pyqtSignal(bool, bool)
settings_saved = QtCore.pyqtSignal()
_status_changed_trigger = QtCore.pyqtSignal(bool)
_shown_trigger = QtCore.pyqtSignal()
_notification_trigger = QtCore.pyqtSignal(ui_pb2.Notification)
_notification_callback = QtCore.pyqtSignal(ui_pb2.NotificationReply)
SORT_ORDER = ["ASC", "DESC"]
LIMITS = ["LIMIT 50", "LIMIT 100", "LIMIT 200", "LIMIT 300", ""]
LAST_GROUP_BY = ""
# general
COL_TIME = 0
COL_NODE = 1
COL_ACTION = 2
COL_DSTIP = 3
COL_PROTO = 4
COL_PROCS = 5
COL_RULES = 6
GENERAL_COL_NUM = 7
# stats
COL_WHAT = 0
# rules
COL_R_NODE = 1
COL_R_NAME = 2
COL_R_ENABLED = 3
COL_R_ACTION = 4
COL_R_DURATION = 5
COL_R_OP_TYPE = 6
COL_R_OP_OPERAND = 7
# procs
COL_PID = 6
TAB_MAIN = 0
TAB_NODES = 1
TAB_RULES = 2
TAB_HOSTS = 3
TAB_PROCS = 4
TAB_ADDRS = 5
TAB_PORTS = 6
TAB_USERS = 7
# row of entries
RULES_TREE_APPS = 0
RULES_TREE_NODES = 1
RULES_TREE_PERMANENT = 0
RULES_TREE_TEMPORARY = 1
RULES_COMBO_PERMANENT = 1
RULES_COMBO_TEMPORARY = 2
RULES_TYPE_PERMANENT = 0
RULES_TYPE_TEMPORARY = 1
FILTER_TREE_APPS = 0
FILTER_TREE_NODES = 3
# FIXME: don't translate, used only for default argument on _update_status_label
FIREWALL_DISABLED = "Disabled"
# if the user clicks on an item of a table, it'll enter into the detail
# view. From there, deny further clicks on the items.
IN_DETAIL_VIEW = {
TAB_MAIN: False,
TAB_NODES: False,
TAB_RULES: False,
TAB_HOSTS: False,
TAB_PROCS: False,
TAB_ADDRS: False,
TAB_PORTS: False,
TAB_USERS: False
}
# restore scrollbar position when going back from a detail view
LAST_SCROLL_VALUE = None
# try to restore last selection
LAST_SELECTED_ITEM = ""
commonDelegateConf = {
Config.ACTION_DENY: RED,
Config.ACTION_REJECT: PURPLE,
Config.ACTION_ALLOW: GREEN,
'alignment': QtCore.Qt.AlignCenter | QtCore.Qt.AlignHCenter
}
commonTableConf = {
"name": "",
"label": None,
"cmd": None,
"view": None,
"model": None,
"delegate": commonDelegateConf,
"display_fields": "*"
}
TABLES = {
TAB_MAIN: {
"name": "connections",
"label": None,
"cmd": None,
"cmdCleanStats": None,
"view": None,
"filterLine": None,
"model": None,
"delegate": commonDelegateConf,
"display_fields": "time as Time, " \
"node as Node, " \
"action as Action, " \
"CASE dst_host WHEN ''" \
" THEN dst_ip || ' -> ' || dst_port " \
" ELSE dst_host || ' -> ' || dst_port " \
"END Destination, " \
"protocol as Protocol, " \
"process as Process, " \
"rule as Rule",
"group_by": LAST_GROUP_BY,
"last_order_by": "1",
"last_order_to": 1,
"rows_selected": False
},
TAB_NODES: {
"name": "nodes",
"label": None,
"cmd": None,
"cmdCleanStats": None,
"view": None,
"filterLine": None,
"model": None,
"delegate": {
Config.ACTION_DENY: RED,
Config.ACTION_REJECT: PURPLE,
Config.ACTION_ALLOW: GREEN,
Nodes.OFFLINE: RED,
Nodes.ONLINE: GREEN,
'alignment': QtCore.Qt.AlignCenter | QtCore.Qt.AlignHCenter
},
"display_fields": "last_connection as LastConnection, "\
"addr as Addr, " \
"status as Status, " \
"hostname as Hostname, " \
"daemon_version as Version, " \
"daemon_uptime as Uptime, " \
"daemon_rules as Rules," \
"cons as Connections," \
"cons_dropped as Dropped," \
"version as Version",
"header_labels": [],
"last_order_by": "1",
"last_order_to": 1,
"rows_selected": False
},
TAB_RULES: {
"name": "rules",
"label": None,
"cmd": None,
"cmdCleanStats": None,
"view": None,
"filterLine": None,
"model": None,
"delegate": commonDelegateConf,
"display_fields": "*",
"header_labels": [],
"last_order_by": "2",
"last_order_to": 0,
"rows_selected": False
},
TAB_HOSTS: {
"name": "hosts",
"label": None,
"cmd": None,
"cmdCleanStats": None,
"view": None,
"filterLine": None,
"model": None,
"delegate": commonDelegateConf,
"display_fields": "*",
"header_labels": [],
"last_order_by": "2",
"last_order_to": 1,
"rows_selected": False
},
TAB_PROCS: {
"name": "procs",
"label": None,
"cmd": None,
"cmdCleanStats": None,
"view": None,
"filterLine": None,
"model": None,
"delegate": commonDelegateConf,
"display_fields": "*",
"header_labels": [],
"last_order_by": "2",
"last_order_to": 1,
"rows_selected": False
},
TAB_ADDRS: {
"name": "addrs",
"label": None,
"cmd": None,
"cmdCleanStats": None,
"view": None,
"filterLine": None,
"model": None,
"delegate": commonDelegateConf,
"display_fields": "*",
"header_labels": [],
"last_order_by": "2",
"last_order_to": 1,
"rows_selected": False
},
TAB_PORTS: {
"name": "ports",
"label": None,
"cmd": None,
"cmdCleanStats": None,
"view": None,
"filterLine": None,
"model": None,
"delegate": commonDelegateConf,
"display_fields": "*",
"header_labels": [],
"last_order_by": "2",
"last_order_to": 1,
"rows_selected": False
},
TAB_USERS: {
"name": "users",
"label": None,
"cmd": None,
"cmdCleanStats": None,
"view": None,
"filterLine": None,
"model": None,
"delegate": commonDelegateConf,
"display_fields": "*",
"header_labels": [],
"last_order_by": "2",
"last_order_to": 1,
"rows_selected": False
}
}
def __init__(self, parent=None, address=None, db=None, dbname="db", appicon=None):
super(StatsDialog, self).__init__(parent)
QtWidgets.QDialog.__init__(self, parent, QtCore.Qt.WindowStaysOnTopHint)
self._current_desktop = os.environ['XDG_CURRENT_DESKTOP'] if os.environ.get("XDG_CURRENT_DESKTOP") != None else None
self.setWindowFlags(QtCore.Qt.Window)
self.setupUi(self)
self.setWindowIcon(appicon)
# columns names. Must be added here in order to names be translated.
self.COL_STR_NAME = QC.translate("stats", "Name", "This is a word, without spaces and symbols.")
self.COL_STR_ADDR = QC.translate("stats", "Address", "This is a word, without spaces and symbols.")
self.COL_STR_STATUS = QC.translate("stats", "Status", "This is a word, without spaces and symbols.")
self.COL_STR_HOSTNAME = QC.translate("stats", "Hostname", "This is a word, without spaces and symbols.")
self.COL_STR_UPTIME = QC.translate("stats", "Uptime", "This is a word, without spaces and symbols.")
self.COL_STR_VERSION = QC.translate("stats", "Version", "This is a word, without spaces and symbols.")
self.COL_STR_RULES_NUM = QC.translate("stats", "Rules", "This is a word, without spaces and symbols.")
self.COL_STR_TIME = QC.translate("stats", "Time", "This is a word, without spaces and symbols.")
self.COL_STR_ACTION = QC.translate("stats", "Action", "This is a word, without spaces and symbols.")
self.COL_STR_DURATION = QC.translate("stats", "Duration", "This is a word, without spaces and symbols.")
self.COL_STR_NODE = QC.translate("stats", "Node", "This is a word, without spaces and symbols.")
self.COL_STR_ENABLED = QC.translate("stats", "Enabled", "This is a word, without spaces and symbols.")
self.COL_STR_PRECEDENCE = QC.translate("stats", "Precedence", "This is a word, without spaces and symbols.")
self.COL_STR_HITS = QC.translate("stats", "Hits", "This is a word, without spaces and symbols.")
self.COL_STR_PROTOCOL = QC.translate("stats", "Protocol", "This is a word, without spaces and symbols.")
self.COL_STR_PROCESS = QC.translate("stats", "Process", "This is a word, without spaces and symbols.")
self.COL_STR_PROC_ARGS = QC.translate("stats", "Args", "This is a word, without spaces and symbols.")
self.COL_STR_DESTINATION = QC.translate("stats", "Destination", "This is a word, without spaces and symbols.")
self.COL_STR_DST_IP = QC.translate("stats", "DstIP", "This is a word, without spaces and symbols.")
self.COL_STR_DST_HOST = QC.translate("stats", "DstHost", "This is a word, without spaces and symbols.")
self.COL_STR_DST_PORT = QC.translate("stats", "DstPort", "This is a word, without spaces and symbols.")
self.COL_STR_RULE = QC.translate("stats", "Rule", "This is a word, without spaces and symbols.")
self.COL_STR_UID = QC.translate("stats", "UserID", "This is a word, without spaces and symbols.")
self.COL_STR_LAST_CONNECTION = QC.translate("stats", "LastConnection", "This is a word, without spaces and symbols.")
self.FIREWALL_STOPPED = QC.translate("stats", "Not running")
self.FIREWALL_DISABLED = QC.translate("stats", "Disabled")
self.FIREWALL_RUNNING = QC.translate("stats", "Running")
self._db = db
self._db_sqlite = self._db.get_db()
self._db_name = dbname
self.asndb = AsnDB.instance()
self._cfg = Config.get()
self._nodes = Nodes.instance()
# TODO: allow to display multiples dialogs
self._proc_details_dialog = ProcessDetailsDialog(appicon=appicon)
# TODO: allow to navigate records by offsets
self.prevButton.setVisible(False)
self.nextButton.setVisible(False)
self.daemon_connected = False
# skip table updates if a context menu is active
self._context_menu_active = False
# used to skip updates while the user is moving the scrollbar
self.scrollbar_active = False
self._lock = threading.RLock()
self._address = address
self._stats = None
self._notifications_sent = {}
self._prefs_dialog = PreferencesDialog(appicon=appicon)
self._rules_dialog = RulesEditorDialog(appicon=appicon)
self._prefs_dialog.saved.connect(self._on_settings_saved)
self._trigger.connect(self._on_update_triggered)
self._notification_callback.connect(self._cb_notification_callback)
self.nodeLabel.setText("")
self.nodeLabel.setStyleSheet('color: green;font-size:12pt; font-weight:600;')
self.rulesSplitter.setStretchFactor(0,0)
self.rulesSplitter.setStretchFactor(1,2)
self.startButton.clicked.connect(self._cb_start_clicked)
self.prefsButton.clicked.connect(self._cb_prefs_clicked)
self.saveButton.clicked.connect(self._on_save_clicked)
self.comboAction.currentIndexChanged.connect(self._cb_combo_action_changed)
self.limitCombo.currentIndexChanged.connect(self._cb_limit_combo_changed)
self.tabWidget.currentChanged.connect(self._cb_tab_changed)
self.delRuleButton.clicked.connect(self._cb_del_rule_clicked)
self.rulesSplitter.splitterMoved.connect(self._cb_rules_splitter_moved)
self.rulesTreePanel.itemClicked.connect(self._cb_rules_tree_item_clicked)
self.enableRuleCheck.clicked.connect(self._cb_enable_rule_toggled)
self.editRuleButton.clicked.connect(self._cb_edit_rule_clicked)
self.newRuleButton.clicked.connect(self._cb_new_rule_clicked)
self.cmdProcDetails.clicked.connect(self._cb_proc_details_clicked)
self.comboRulesFilter.currentIndexChanged.connect(self._cb_rules_filter_combo_changed)
self.helpButton.clicked.connect(self._cb_help_button_clicked)
self.nextButton.clicked.connect(self._cb_next_button_clicked)
self.prevButton.clicked.connect(self._cb_prev_button_clicked)
self.enableRuleCheck.setVisible(False)
self.delRuleButton.setVisible(False)
self.editRuleButton.setVisible(False)
self.nodeRuleLabel.setVisible(False)
self.comboRulesFilter.setVisible(False)
# translations must be done here, otherwise they don't take effect
self.TABLES[self.TAB_NODES]['header_labels'] = [
self.COL_STR_LAST_CONNECTION,
self.COL_STR_ADDR,
self.COL_STR_STATUS,
self.COL_STR_HOSTNAME,
self.COL_STR_VERSION,
self.COL_STR_UPTIME,
QC.translate("stats", "Rules", "This is a word, without spaces and symbols."),
QC.translate("stats", "Connections", "This is a word, without spaces and symbols."),
QC.translate("stats", "Dropped", "This is a word, without spaces and symbols."),
QC.translate("stats", "Version", "This is a word, without spaces and symbols."),
]
self.TABLES[self.TAB_RULES]['header_labels'] = [
self.COL_STR_TIME,
self.COL_STR_NODE,
self.COL_STR_NAME,
self.COL_STR_ENABLED,
self.COL_STR_PRECEDENCE,
self.COL_STR_ACTION,
self.COL_STR_DURATION,
"operator_type",
"operator_sensitive",
"operator_operand",
"operator_data",
]
stats_headers = [
QC.translate("stats", "What", "This is a word, without spaces and symbols."),
QC.translate("stats", "Hits", "This is a word, without spaces and symbols."),
]
self.TABLES[self.TAB_HOSTS]['header_labels'] = stats_headers
self.TABLES[self.TAB_PROCS]['header_labels'] = stats_headers
self.TABLES[self.TAB_ADDRS]['header_labels'] = stats_headers
self.TABLES[self.TAB_USERS]['header_labels'] = stats_headers
self.TABLES[self.TAB_MAIN]['view'] = self._setup_table(QtWidgets.QTableView, self.eventsTable, "connections",
self.TABLES[self.TAB_MAIN]['display_fields'],
order_by="1",
group_by=self.TABLES[self.TAB_MAIN]['group_by'],
delegate=self.TABLES[self.TAB_MAIN]['delegate'],
resize_cols=(),
model=GenericTableModel("connections", [
self.COL_STR_TIME,
self.COL_STR_NODE,
self.COL_STR_ACTION,
self.COL_STR_DESTINATION,
self.COL_STR_PROTOCOL,
self.COL_STR_PROCESS,
self.COL_STR_RULE,
]),
verticalScrollBar=self.connectionsTableScrollBar,
limit=self._get_limit()
)
self.TABLES[self.TAB_NODES]['view'] = self._setup_table(QtWidgets.QTableView, self.nodesTable, "nodes",
self.TABLES[self.TAB_NODES]['display_fields'],
order_by="3,2,1",
resize_cols=(self.COL_NODE,),
model=GenericTableModel("nodes", self.TABLES[self.TAB_NODES]['header_labels']),
verticalScrollBar=self.verticalScrollBar,
sort_direction=self.SORT_ORDER[1],
delegate=self.TABLES[self.TAB_NODES]['delegate'])
self.TABLES[self.TAB_RULES]['view'] = self._setup_table(QtWidgets.QTableView,
self.rulesTable, "rules",
model=GenericTableModel("rules", self.TABLES[self.TAB_RULES]['header_labels']),
verticalScrollBar=self.rulesScrollBar,
delegate=self.TABLES[self.TAB_RULES]['delegate'],
order_by="2",
sort_direction=self.SORT_ORDER[0])
self.TABLES[self.TAB_HOSTS]['view'] = self._setup_table(QtWidgets.QTableView,
self.hostsTable, "hosts",
model=GenericTableModel("hosts", self.TABLES[self.TAB_HOSTS]['header_labels']),
verticalScrollBar=self.hostsScrollBar,
resize_cols=(self.COL_WHAT,),
delegate=self.TABLES[self.TAB_HOSTS]['delegate'],
order_by="2",
limit=self._get_limit()
)
self.TABLES[self.TAB_PROCS]['view'] = self._setup_table(QtWidgets.QTableView,
self.procsTable, "procs",
model=GenericTableModel("procs", self.TABLES[self.TAB_PROCS]['header_labels']),
verticalScrollBar=self.procsScrollBar,
resize_cols=(self.COL_WHAT,),
delegate=self.TABLES[self.TAB_PROCS]['delegate'],
order_by="2",
limit=self._get_limit()
)
self.TABLES[self.TAB_ADDRS]['view'] = self._setup_table(QtWidgets.QTableView,
self.addrTable, "addrs",
model=AddressTableModel("addrs", self.TABLES[self.TAB_ADDRS]['header_labels']),
verticalScrollBar=self.addrsScrollBar,
resize_cols=(self.COL_WHAT,),
delegate=self.TABLES[self.TAB_ADDRS]['delegate'],
order_by="2",
limit=self._get_limit()
)
self.TABLES[self.TAB_PORTS]['view'] = self._setup_table(QtWidgets.QTableView,
self.portsTable, "ports",
model=GenericTableModel("ports", self.TABLES[self.TAB_PORTS]['header_labels']),
verticalScrollBar=self.portsScrollBar,
resize_cols=(self.COL_WHAT,),
delegate=self.TABLES[self.TAB_PORTS]['delegate'],
order_by="2",
limit=self._get_limit()
)
self.TABLES[self.TAB_USERS]['view'] = self._setup_table(QtWidgets.QTableView,
self.usersTable, "users",
model=GenericTableModel("users", self.TABLES[self.TAB_USERS]['header_labels']),
verticalScrollBar=self.usersScrollBar,
resize_cols=(self.COL_WHAT,),
delegate=self.TABLES[self.TAB_USERS]['delegate'],
order_by="2",
limit=self._get_limit()
)
self.TABLES[self.TAB_NODES]['label'] = self.nodesLabel
self.TABLES[self.TAB_RULES]['label'] = self.ruleLabel
self.TABLES[self.TAB_HOSTS]['label'] = self.hostsLabel
self.TABLES[self.TAB_PROCS]['label'] = self.procsLabel
self.TABLES[self.TAB_ADDRS]['label'] = self.addrsLabel
self.TABLES[self.TAB_PORTS]['label'] = self.portsLabel
self.TABLES[self.TAB_USERS]['label'] = self.usersLabel
self.TABLES[self.TAB_NODES]['cmd'] = self.cmdNodesBack
self.TABLES[self.TAB_RULES]['cmd'] = self.cmdRulesBack
self.TABLES[self.TAB_HOSTS]['cmd'] = self.cmdHostsBack
self.TABLES[self.TAB_PROCS]['cmd'] = self.cmdProcsBack
self.TABLES[self.TAB_ADDRS]['cmd'] = self.cmdAddrsBack
self.TABLES[self.TAB_PORTS]['cmd'] = self.cmdPortsBack
self.TABLES[self.TAB_USERS]['cmd'] = self.cmdUsersBack
self.TABLES[self.TAB_MAIN]['cmdCleanStats'] = self.cmdCleanSql
self.TABLES[self.TAB_NODES]['cmdCleanStats'] = self.cmdCleanSql
self.TABLES[self.TAB_RULES]['cmdCleanStats'] = self.cmdCleanSql
self.TABLES[self.TAB_HOSTS]['cmdCleanStats'] = self.cmdCleanSql
self.TABLES[self.TAB_PROCS]['cmdCleanStats'] = self.cmdCleanSql
self.TABLES[self.TAB_ADDRS]['cmdCleanStats'] = self.cmdCleanSql
self.TABLES[self.TAB_PORTS]['cmdCleanStats'] = self.cmdCleanSql
self.TABLES[self.TAB_USERS]['cmdCleanStats'] = self.cmdCleanSql
# the rules clean button is only for a particular rule, not all.
self.TABLES[self.TAB_RULES]['cmdCleanStats'].setVisible(False)
self.TABLES[self.TAB_NODES]['cmdCleanStats'].setVisible(False)
self.TABLES[self.TAB_MAIN]['cmdCleanStats'].clicked.connect(lambda: self._cb_clean_sql_clicked(self.TAB_MAIN))
self.TABLES[self.TAB_MAIN]['filterLine'] = self.filterLine
self.TABLES[self.TAB_MAIN]['view'].doubleClicked.connect(self._cb_main_table_double_clicked)
self.TABLES[self.TAB_MAIN]['view'].installEventFilter(self)
self.TABLES[self.TAB_MAIN]['filterLine'].textChanged.connect(self._cb_events_filter_line_changed)
self.TABLES[self.TAB_RULES]['view'].setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.TABLES[self.TAB_RULES]['view'].customContextMenuRequested.connect(self._cb_table_context_menu)
for idx in range(1,8):
self.TABLES[idx]['cmd'].hide()
self.TABLES[idx]['cmd'].setVisible(False)
self.TABLES[idx]['cmd'].clicked.connect(lambda: self._cb_cmd_back_clicked(idx))
if self.TABLES[idx]['cmdCleanStats'] != None:
self.TABLES[idx]['cmdCleanStats'].clicked.connect(lambda: self._cb_clean_sql_clicked(idx))
self.TABLES[idx]['label'].setStyleSheet('color: blue; font-size:9pt; font-weight:600;')
self.TABLES[idx]['label'].setVisible(False)
self.TABLES[idx]['view'].doubleClicked.connect(self._cb_table_double_clicked)
self.TABLES[idx]['view'].selectionModel().selectionChanged.connect(self._cb_table_selection_changed)
self.TABLES[idx]['view'].installEventFilter(self)
self._load_settings()
self._tables = ( \
self.TABLES[self.TAB_MAIN]['view'],
self.TABLES[self.TAB_NODES]['view'],
self.TABLES[self.TAB_RULES]['view'],
self.TABLES[self.TAB_HOSTS]['view'],
self.TABLES[self.TAB_PROCS]['view'],
self.TABLES[self.TAB_ADDRS]['view'],
self.TABLES[self.TAB_PORTS]['view'],
self.TABLES[self.TAB_USERS]['view']
)
self._file_names = ( \
'events.csv',
'nodes.csv',
'rules.csv',
'hosts.csv',
'procs.csv',
'addrs.csv',
'ports.csv',
'users.csv'
)
self.iconStart = QtGui.QIcon().fromTheme("media-playback-start")
self.iconPause = QtGui.QIcon().fromTheme("media-playback-pause")
if QtGui.QIcon.hasThemeIcon("document-new") == False:
self._configure_buttons_icons()
#Sometimes a maximized window which had been minimized earlier won't unminimize
#To workaround, we explicitely maximize such windows when unminimizing happens
def changeEvent(self, event):
if event.type() == QtCore.QEvent.WindowStateChange:
if event.oldState() & QtCore.Qt.WindowMinimized and event.oldState() & QtCore.Qt.WindowMaximized:
#a previously minimized maximized window ...
if self.windowState() ^ QtCore.Qt.WindowMinimized and self._current_desktop == "KDE":
# is not minimized anymore, i.e. it was unminimized
# docs: https://doc.qt.io/qt-5/qwidget.html#setWindowState
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
def showEvent(self, event):
super(StatsDialog, self).showEvent(event)
self._shown_trigger.emit()
window_title = QC.translate("stats", "OpenSnitch Network Statistics {0}").format(version)
if self._address is not None:
window_title = QC.translate("stats", "OpenSnitch Network Statistics for {0}").format(self._address)
self.nodeLabel.setText(self._address)
self._load_settings()
self._add_rulesTree_nodes()
self.setWindowTitle(window_title)
self._refresh_active_table()
def eventFilter(self, source, event):
if event.type() == QtCore.QEvent.KeyPress:
if event.matches(QtGui.QKeySequence.Copy):
self._copy_selected_rows()
return True
elif event.key() == QtCore.Qt.Key_Delete:
table = self._get_active_table()
selection = table.selectionModel().selectedRows()
if selection:
model = table.model()
self._table_menu_delete(2, model, selection)
# we need to manually refresh the model
table.selectionModel().clear()
self._refresh_active_table()
return True
return super(StatsDialog, self).eventFilter(source, event)
def _configure_buttons_icons(self):
self.iconStart = self.style().standardIcon(getattr(QtWidgets.QStyle, "SP_MediaPlay"))
self.iconPause = self.style().standardIcon(getattr(QtWidgets.QStyle, "SP_MediaPause"))
self.newRuleButton.setIcon(self.style().standardIcon(getattr(QtWidgets.QStyle, "SP_FileIcon")))
self.delRuleButton.setIcon(self.style().standardIcon(getattr(QtWidgets.QStyle, "SP_TrashIcon")))
self.editRuleButton.setIcon(self.style().standardIcon(getattr(QtWidgets.QStyle, "SP_FileDialogDetailedView")))
self.saveButton.setIcon(self.style().standardIcon(getattr(QtWidgets.QStyle, "SP_DialogSaveButton")))
self.prefsButton.setIcon(self.style().standardIcon(getattr(QtWidgets.QStyle, "SP_FileDialogDetailedView")))
self.startButton.setIcon(self.iconStart)
self.cmdProcDetails.setIcon(self.style().standardIcon(getattr(QtWidgets.QStyle, "SP_FileDialogContentsView")))
self.TABLES[self.TAB_MAIN]['cmdCleanStats'].setIcon(self.style().standardIcon(getattr(QtWidgets.QStyle, "SP_DialogResetButton")))
for idx in range(1,8):
self.TABLES[idx]['cmd'].setIcon(self.style().standardIcon(getattr(QtWidgets.QStyle, "SP_ArrowLeft")))
if self.TABLES[idx]['cmdCleanStats'] != None:
self.TABLES[idx]['cmdCleanStats'].setIcon(self.style().standardIcon(getattr(QtWidgets.QStyle, "SP_DialogResetButton")))
def _load_settings(self):
dialog_geometry = self._cfg.getSettings(Config.STATS_GEOMETRY)
dialog_last_tab = self._cfg.getSettings(Config.STATS_LAST_TAB)
dialog_general_filter_text = self._cfg.getSettings(Config.STATS_FILTER_TEXT)
dialog_general_filter_action = self._cfg.getSettings(Config.STATS_FILTER_ACTION)
dialog_general_limit_results = self._cfg.getSettings(Config.STATS_LIMIT_RESULTS)
if dialog_geometry != None:
self.restoreGeometry(dialog_geometry)
if dialog_last_tab != None:
self.tabWidget.setCurrentIndex(int(dialog_last_tab))
if dialog_general_filter_text != None:
# prevent from firing textChanged signal
self.filterLine.blockSignals(True);
self.filterLine.setText(dialog_general_filter_text)
self.filterLine.blockSignals(False);
if dialog_general_filter_action != None:
self.comboAction.setCurrentIndex(int(dialog_general_filter_action))
if dialog_general_limit_results != None:
# XXX: a little hack, because if the saved index is 0, the signal is not fired.
# XXX: this causes to fire the event twice
self.limitCombo.setCurrentIndex(4)
self.limitCombo.setCurrentIndex(int(dialog_general_limit_results))
rules_splitter_pos = self._cfg.getSettings(Config.STATS_RULES_SPLITTER_POS)
if type(rules_splitter_pos) == QtCore.QByteArray:
self.rulesSplitter.restoreState(rules_splitter_pos)
rulesSizes = self.rulesSplitter.sizes()
if self.IN_DETAIL_VIEW[self.TAB_RULES] == True:
self.comboRulesFilter.setVisible(False)
elif len(rulesSizes) > 0:
self.comboRulesFilter.setVisible(rulesSizes[0] == 0)
else:
w = self.rulesSplitter.width()
self.rulesSplitter.setSizes([int(w/4), int(w/2)])
self._restore_details_view_columns(self.eventsTable.horizontalHeader(), Config.STATS_GENERAL_COL_STATE)
self._restore_details_view_columns(self.nodesTable.horizontalHeader(), Config.STATS_NODES_COL_STATE)
self._restore_details_view_columns(self.rulesTable.horizontalHeader(), Config.STATS_RULES_COL_STATE)
rulesTreeNodes_expanded = self._cfg.getBool(Config.STATS_RULES_TREE_EXPANDED_1)
if rulesTreeNodes_expanded != None:
rules_tree_nodes = self._get_rulesTree_item(self.RULES_TREE_NODES)
if rules_tree_nodes != None:
rules_tree_nodes.setExpanded(rulesTreeNodes_expanded)
rulesTreeApps_expanded = self._cfg.getBool(Config.STATS_RULES_TREE_EXPANDED_0)
if rulesTreeApps_expanded != None:
rules_tree_apps = self._get_rulesTree_item(self.RULES_TREE_APPS)
if rules_tree_apps != None:
rules_tree_apps.setExpanded(rulesTreeApps_expanded)
def _save_settings(self):
self._cfg.setSettings(Config.STATS_GEOMETRY, self.saveGeometry())
self._cfg.setSettings(Config.STATS_LAST_TAB, self.tabWidget.currentIndex())
self._cfg.setSettings(Config.STATS_LIMIT_RESULTS, self.limitCombo.currentIndex())
self._cfg.setSettings(Config.STATS_FILTER_TEXT, self.filterLine.text())
header = self.eventsTable.horizontalHeader()
self._cfg.setSettings(Config.STATS_GENERAL_COL_STATE, header.saveState())
nodesHeader = self.nodesTable.horizontalHeader()
self._cfg.setSettings(Config.STATS_NODES_COL_STATE, nodesHeader.saveState())
rulesHeader = self.rulesTable.horizontalHeader()
self._cfg.setSettings(Config.STATS_RULES_COL_STATE, rulesHeader.saveState())
rules_tree_apps = self._get_rulesTree_item(self.RULES_TREE_APPS)
if rules_tree_apps != None:
self._cfg.setSettings(Config.STATS_RULES_TREE_EXPANDED_0, rules_tree_apps.isExpanded())
rules_tree_nodes = self._get_rulesTree_item(self.RULES_TREE_NODES)
if rules_tree_nodes != None:
self._cfg.setSettings(Config.STATS_RULES_TREE_EXPANDED_1, rules_tree_nodes.isExpanded())
def _del_rule(self, rule_name, node_addr):
nid, noti = self._nodes.delete_rule(rule_name, node_addr, self._notification_callback)
self._notifications_sent[nid] = noti
# https://stackoverflow.com/questions/40225270/copy-paste-multiple-items-from-qtableview-in-pyqt4
def _copy_selected_rows(self):
cur_idx = self.tabWidget.currentIndex()
selection = self.TABLES[cur_idx]['view'].selectedIndexes()
if selection:
rows = sorted(index.row() for index in selection)
columns = sorted(index.column() for index in selection)
rowcount = rows[-1] - rows[0] + 1
colcount = columns[-1] - columns[0] + 1
table = [[''] * colcount for _ in range(rowcount)]
for index in selection:
row = index.row() - rows[0]
column = index.column() - columns[0]
table[row][column] = index.data()
stream = io.StringIO()
csv.writer(stream, delimiter=',').writerows(table)
QtWidgets.qApp.clipboard().setText(stream.getvalue())
def _configure_rules_contextual_menu(self, pos):
try:
cur_idx = self.tabWidget.currentIndex()
table = self._get_active_table()
model = table.model()
selection = table.selectionModel().selectedRows()
if not selection:
return
menu = QtWidgets.QMenu()
durMenu = QtWidgets.QMenu(self.COL_STR_DURATION)
actionMenu = QtWidgets.QMenu(self.COL_STR_ACTION)
nodesMenu = QtWidgets.QMenu(QC.translate("stats", "Apply to"))
nodes_menu = []
if self._nodes.count() > 0:
for node in self._nodes.get_nodes():
nodes_menu.append([nodesMenu.addAction(node), node])
menu.addMenu(nodesMenu)
_actAllow = actionMenu.addAction(QC.translate("stats", "Allow"))
_actDeny = actionMenu.addAction(QC.translate("stats", "Deny"))
_actReject = actionMenu.addAction(QC.translate("stats", "Reject"))
menu.addMenu(actionMenu)
_durAlways = durMenu.addAction(QC.translate("stats", "Always"))
_durUntilReboot = durMenu.addAction(QC.translate("stats", "Until reboot"))
_dur1h = durMenu.addAction(Config.DURATION_1h)
_dur30m = durMenu.addAction(Config.DURATION_30m)
_dur15m = durMenu.addAction(Config.DURATION_15m)
_dur5m = durMenu.addAction(Config.DURATION_5m)
menu.addMenu(durMenu)
is_rule_enabled = model.index(selection[0].row(), self.COL_R_ENABLED).data()
menu_label_enable = QC.translate("stats", "Disable")
if is_rule_enabled == "False":
menu_label_enable = QC.translate("stats", "Enable")
_menu_enable = menu.addAction(QC.translate("stats", menu_label_enable))
_menu_duplicate = menu.addAction(QC.translate("stats", "Duplicate"))
_menu_edit = menu.addAction(QC.translate("stats", "Edit"))
_menu_delete = menu.addAction(QC.translate("stats", "Delete"))
# move away menu a few pixels to the right, to avoid clicking on it by mistake
point = QtCore.QPoint(pos.x()+10, pos.y()+5)
action = menu.exec_(table.mapToGlobal(point))
model = table.model()
if self._nodes.count() > 0:
for nmenu in nodes_menu:
node_action = nmenu[0]
node_addr = nmenu[1]
if action == node_action:
ret = Message.yes_no(
QC.translate("stats", " Apply this rule to {0} ".format(node_addr)),
QC.translate("stats", " Are you sure?"),
QtWidgets.QMessageBox.Warning)
if ret == QtWidgets.QMessageBox.Cancel:
return False
self._table_menu_apply_to_node(cur_idx, model, selection, node_addr)
return
if action == _menu_delete:
self._table_menu_delete(cur_idx, model, selection)
elif action == _menu_edit:
self._table_menu_edit(cur_idx, model, selection)
elif action == _menu_enable:
self._table_menu_enable(cur_idx, model, selection, is_rule_enabled)
elif action == _menu_duplicate:
self._table_menu_duplicate(cur_idx, model, selection)
elif action == _durAlways:
self._table_menu_change_rule_field(cur_idx, model, selection, "duration", Config.DURATION_ALWAYS)
elif action == _dur1h:
self._table_menu_change_rule_field(cur_idx, model, selection, "duration", Config.DURATION_1h)
elif action == _dur30m:
self._table_menu_change_rule_field(cur_idx, model, selection, "duration", Config.DURATION_30m)
elif action == _dur15m:
self._table_menu_change_rule_field(cur_idx, model, selection, "duration", Config.DURATION_15m)
elif action == _dur5m:
self._table_menu_change_rule_field(cur_idx, model, selection, "duration", Config.DURATION_5m)
elif action == _durUntilReboot:
self._table_menu_change_rule_field(cur_idx, model, selection, "duration", Config.DURATION_UNTIL_RESTART)
elif action == _actAllow:
self._table_menu_change_rule_field(cur_idx, model, selection, "action", Config.ACTION_ALLOW)
elif action == _actDeny:
self._table_menu_change_rule_field(cur_idx, model, selection, "action", Config.ACTION_DENY)
elif action == _actReject:
self._table_menu_change_rule_field(cur_idx, model, selection, "action", Config.ACTION_REJECT)
except Exception as e:
print(e)
finally:
self._clear_rows_selection()
return True
def _table_menu_duplicate(self, cur_idx, model, selection):
for idx in selection:
rule_name = model.index(idx.row(), self.COL_R_NAME).data()
node_addr = model.index(idx.row(), self.COL_R_NODE).data()
records = None
for idx in range(0,100):
records = self._get_rule(rule_name, node_addr)
if records == None or records.size() == -1:
rule = self._rules_dialog.get_rule_from_records(records)
rule.name = "cloned-{0}-{1}".format(idx, rule.name)
self._db.insert_rule(rule, node_addr)
break
if records != None and records.size() == -1:
noti = ui_pb2.Notification(type=ui_pb2.CHANGE_RULE, rules=[rule])
nid = self._nodes.send_notification(node_addr, noti, self._notification_callback)
if nid != None:
self._notifications_sent[nid] = noti
def _table_menu_apply_to_node(self, cur_idx, model, selection, node_addr):
for idx in selection:
rule_name = model.index(idx.row(), self.COL_R_NAME).data()
records = self._get_rule(rule_name, None)
rule = self._rules_dialog.get_rule_from_records(records)
noti = ui_pb2.Notification(type=ui_pb2.CHANGE_RULE, rules=[rule])
nid = self._nodes.send_notification(node_addr, noti, self._notification_callback)
if nid != None:
self._db.insert_rule(rule, node_addr)
self._notifications_sent[nid] = noti
def _table_menu_change_rule_field(self, cur_idx, model, selection, field, value):
for idx in selection:
rule_name = model.index(idx.row(), self.COL_R_NAME).data()
node_addr = model.index(idx.row(), self.COL_R_NODE).data()
records = self._get_rule(rule_name, node_addr)
rule = self._rules_dialog.get_rule_from_records(records)
self._db.update(table="rules", fields="{0}=?".format(field),
values=[value], condition="name='{0}' AND node='{1}'".format(rule_name, node_addr),
action_on_conflict="")
if field == "action":
rule.action = value
elif field == "duration":
rule.duration = value
elif field == "precedence":
rule.precedence = value
noti = ui_pb2.Notification(type=ui_pb2.CHANGE_RULE, rules=[rule])
nid = self._nodes.send_notification(node_addr, noti, self._notification_callback)
if nid != None:
self._notifications_sent[nid] = noti
def _table_menu_enable(self, cur_idx, model, selection, is_rule_enabled):
rule_status = "False" if is_rule_enabled == "True" else "True"
for idx in selection:
rule_name = model.index(idx.row(), self.COL_R_NAME).data()
node_addr = model.index(idx.row(), self.COL_R_NODE).data()
records = self._get_rule(rule_name, node_addr)
rule = self._rules_dialog.get_rule_from_records(records)
rule_type = ui_pb2.DISABLE_RULE if is_rule_enabled == "True" else ui_pb2.ENABLE_RULE
self._db.update(table="rules", fields="enabled=?",
values=[rule_status], condition="name='{0}' AND node='{1}'".format(rule_name, node_addr),
action_on_conflict="")
noti = ui_pb2.Notification(type=rule_type, rules=[rule])
nid = self._nodes.send_notification(node_addr, noti, self._notification_callback)
if nid != None:
self._notifications_sent[nid] = noti
def _table_menu_delete(self, cur_idx, model, selection):
ret = Message.yes_no(
QC.translate("stats", " Your are about to delete this rule. "),
QC.translate("stats", " Are you sure?"),
QtWidgets.QMessageBox.Warning)
if ret == QtWidgets.QMessageBox.Cancel:
return False
for idx in selection:
name = model.index(idx.row(), self.COL_R_NAME).data()
node = model.index(idx.row(), self.COL_R_NODE).data()
self._del_rule(name, node)
def _table_menu_edit(self, cur_idx, model, selection):
for idx in selection:
name = model.index(idx.row(), self.COL_R_NAME).data()
node = model.index(idx.row(), self.COL_R_NODE).data()
records = self._get_rule(name, node)
if records == None or records == -1:
Message.ok("Rule error",
QC.translate("stats", "Rule not found by that name and node"),
QtWidgets.QMessageBox.Warning)
return
self._rules_dialog.edit_rule(records, node)
break
# ignore updates while the user is using the scrollbar.
def _cb_scrollbar_pressed(self):
self.scrollbar_active = True
def _cb_scrollbar_released(self):
self.scrollbar_active = False
def _cb_proc_details_clicked(self):
table = self._tables[self.tabWidget.currentIndex()]
nrows = table.model().rowCount()
pids = {}
for row in range(0, nrows):
pid = table.model().index(row, self.COL_PID).data()
node = table.model().index(row, self.COL_NODE).data()
if pid not in pids:
pids[pid] = node
self._proc_details_dialog.monitor(pids)
@QtCore.pyqtSlot(ui_pb2.NotificationReply)
def _cb_notification_callback(self, reply):
if reply.id in self._notifications_sent:
if reply.code == ui_pb2.ERROR:
Message.ok(
QC.translate("stats",
"<b>Error:</b><br><br>",
"{0}").format(reply.data),
QtWidgets.QMessageBox.Warning)
else:
Message.ok(
QC.translate("stats", "Warning:"),
"{0}".format(reply.data),
QtWidgets.QMessageBox.Warning)
def _cb_tab_changed(self, index):
self.comboAction.setVisible(index == self.TAB_MAIN)
self.TABLES[index]['cmdCleanStats'].setVisible(True)
if index == self.TAB_MAIN:
self._set_events_query()
else:
if index == self.TAB_RULES:
# display the clean buton only if not in detail view
self.TABLES[index]['cmdCleanStats'].setVisible( self.IN_DETAIL_VIEW[index] )
self._add_rulesTree_nodes()
elif index == self.TAB_PROCS:
# make the button visible depending if we're in the detail view
nrows = self._get_active_table().model().rowCount()
self.cmdProcDetails.setVisible(self.IN_DETAIL_VIEW[index] and nrows > 0)
elif index == self.TAB_NODES:
self.TABLES[index]['cmdCleanStats'].setVisible( self.IN_DETAIL_VIEW[index] )
self._refresh_active_table()
def _cb_table_context_menu(self, pos):
cur_idx = self.tabWidget.currentIndex()
if cur_idx != self.TAB_RULES or self.IN_DETAIL_VIEW[self.TAB_RULES] == True:
# the only table with context menu for now is the main rules table
return
self._context_menu_active = True
refresh_table = self._configure_rules_contextual_menu(pos)
self._context_menu_active = False
if refresh_table:
self._refresh_active_table()
def _cb_table_header_clicked(self, pos, sortIdx):
cur_idx = self.tabWidget.currentIndex()
# TODO: allow ordering by Network column
if cur_idx == self.TAB_ADDRS and pos == 2:
return
model = self._get_active_table().model()
qstr = model.query().lastQuery().split("ORDER BY")[0]
q = qstr.strip(" ") + " ORDER BY %d %s" % (pos+1, self.SORT_ORDER[sortIdx])
if cur_idx > 0 and self.TABLES[cur_idx]['cmd'].isVisible() == False:
self.TABLES[cur_idx]['last_order_by'] = pos+1
self.TABLES[cur_idx]['last_order_to'] = sortIdx
q = qstr.strip(" ") + self._get_order()
q += self._get_limit()
self.setQuery(model, q)
def _cb_events_filter_line_changed(self, text):
cur_idx = self.tabWidget.currentIndex()
model = self.TABLES[cur_idx]['view'].model()
qstr = None
if cur_idx == StatsDialog.TAB_MAIN:
self._cfg.setSettings(Config.STATS_FILTER_TEXT, text)
self._set_events_query()
return
elif cur_idx == StatsDialog.TAB_NODES:
qstr = self._get_nodes_filter_query(model.query().lastQuery(), text)
elif self.IN_DETAIL_VIEW[cur_idx] == True:
qstr = self._get_indetail_filter_query(model.query().lastQuery(), text)
else:
where_clause = self._get_filter_line_clause(cur_idx, text)
qstr = self._db.get_query( self.TABLES[cur_idx]['name'], self.TABLES[cur_idx]['display_fields'] ) + \
where_clause + self._get_order()
if qstr != None:
self.setQuery(model, qstr)
def _cb_limit_combo_changed(self, idx):
if self.tabWidget.currentIndex() == self.TAB_MAIN:
self._set_events_query()
else:
model = self._get_active_table().model()
qstr = model.query().lastQuery()
if "LIMIT" in qstr:
qs = qstr.split(" LIMIT ")
q = qs[0]
l = qs[1]
qstr = q + self._get_limit()
else:
qstr = qstr + self._get_limit()
self.setQuery(model, qstr)
def _cb_combo_action_changed(self, idx):
if self.tabWidget.currentIndex() != self.TAB_MAIN:
return
self._cfg.setSettings(Config.STATS_GENERAL_FILTER_ACTION, idx)
self._set_events_query()
def _cb_clean_sql_clicked(self, idx):
cur_idx = self.tabWidget.currentIndex()
if self.tabWidget.currentIndex() == StatsDialog.TAB_RULES:
self._db.empty_rule(self.TABLES[cur_idx]['label'].text())
elif self.IN_DETAIL_VIEW[cur_idx]:
model = self._get_active_table().model()
# get left side of the query: * GROUP BY ...
qstr = model.query().lastQuery().split("GROUP BY")[0]
# get right side of the query: ... WHERE *
q = qstr.split("WHERE")
table = self.TABLES[cur_idx]['name']
label = self.TABLES[cur_idx]['label'].text()
field = "dst_host"
if cur_idx == self.TAB_NODES:
field = "node"
if label[0] == '/':
label = "unix:{0}".format(label)
elif cur_idx == self.TAB_PROCS:
field = "process"
elif cur_idx == self.TAB_ADDRS:
field = "dst_ip"
elif cur_idx == self.TAB_PORTS:
field = "dst_port"
elif cur_idx == self.TAB_USERS:
field = "uid"
self._db.remove("DELETE FROM {0} WHERE what = '{1}'".format(table, label))
self._db.remove("DELETE FROM connections WHERE {0} = '{1}'".format(field, label))
else:
self._db.clean(self.TABLES[cur_idx]['name'])
self._refresh_active_table()
def _cb_cmd_back_clicked(self, idx):
try:
cur_idx = self.tabWidget.currentIndex()
self._clear_rows_selection()
self.IN_DETAIL_VIEW[cur_idx] = False
self._set_active_widgets(False)
if cur_idx == StatsDialog.TAB_RULES:
self._restore_rules_tab_widgets(True)
return
elif cur_idx == StatsDialog.TAB_PROCS:
self.cmdProcDetails.setVisible(False)
model = self._get_active_table().model()
where_clause = ""
if self.TABLES[cur_idx]['filterLine'] != None:
filter_text = self.TABLES[cur_idx]['filterLine'].text()
where_clause = self._get_filter_line_clause(cur_idx, filter_text)
self.setQuery(model,
self._db.get_query(
self.TABLES[cur_idx]['name'],
self.TABLES[cur_idx]['display_fields']) + where_clause + " " + self._get_order() + self._get_limit()
)
finally:
self._restore_details_view_columns(
self.TABLES[cur_idx]['view'].horizontalHeader(),
"{0}{1}".format(Config.STATS_VIEW_COL_STATE, cur_idx)
)
self._restore_scroll_value()
self._restore_last_selected_row()
def _cb_main_table_double_clicked(self, row):
data = row.data()
idx = row.column()
cur_idx = 1
if idx == StatsDialog.COL_NODE:
cur_idx = self.TAB_NODES
self.IN_DETAIL_VIEW[cur_idx] = True
self.LAST_SELECTED_ITEM = row.model().index(row.row(), self.COL_NODE).data()
self.tabWidget.setCurrentIndex(cur_idx)
self._set_active_widgets(True, str(data))
p, addr = self._nodes.get_addr(data)
self._set_nodes_query(addr)
elif idx == StatsDialog.COL_PROCS:
cur_idx = self.TAB_PROCS
self.IN_DETAIL_VIEW[cur_idx] = True
self.LAST_SELECTED_ITEM = row.model().index(row.row(), self.COL_PROCS).data()
self.tabWidget.setCurrentIndex(cur_idx)
self._set_active_widgets(True, str(data))
self._set_process_query(data)
elif idx == StatsDialog.COL_RULES:
cur_idx = self.TAB_RULES
self.IN_DETAIL_VIEW[cur_idx] = True
self.LAST_SELECTED_ITEM = row.model().index(row.row(), self.COL_RULES).data()
r_name, node = self._set_rules_tab_active(row, cur_idx, self.COL_RULES, self.COL_NODE)
self._set_active_widgets(True, str(data))
self._set_rules_query(r_name, node)
else:
return
self._restore_details_view_columns(
self.TABLES[cur_idx]['view'].horizontalHeader(),
"{0}{1}".format(Config.STATS_VIEW_DETAILS_COL_STATE, cur_idx)
)
def _cb_table_double_clicked(self, row):
cur_idx = self.tabWidget.currentIndex()
if self.IN_DETAIL_VIEW[cur_idx]:
return
self.IN_DETAIL_VIEW[cur_idx] = True
self.LAST_SELECTED_ITEM = row.model().index(row.row(), self.COL_TIME).data()
self.LAST_SCROLL_VALUE = self.TABLES[cur_idx]['view'].vScrollBar.value()
data = row.data()
if cur_idx == self.TAB_RULES:
rule_name = row.model().index(row.row(), self.COL_R_NAME).data()
self._set_active_widgets(True, rule_name)
r_name, node = self._set_rules_tab_active(row, cur_idx, self.COL_R_NAME, self.COL_R_NODE)
self.LAST_SELECTED_ITEM = row.model().index(row.row(), self.COL_R_NAME).data()
self._set_rules_query(r_name, node)
self._restore_details_view_columns(
self.TABLES[cur_idx]['view'].horizontalHeader(),
"{0}{1}".format(Config.STATS_VIEW_DETAILS_COL_STATE, cur_idx)
)
return
if cur_idx == self.TAB_NODES:
data = row.model().index(row.row(), self.COL_NODE).data()
self.LAST_SELECTED_ITEM = row.model().index(row.row(), self.COL_NODE).data()
if cur_idx > self.TAB_RULES:
self.LAST_SELECTED_ITEM = row.model().index(row.row(), self.COL_WHAT).data()
data = row.model().index(row.row(), self.COL_WHAT).data()
self._set_active_widgets(True, str(data))
if cur_idx == StatsDialog.TAB_NODES:
self._set_nodes_query(data)
elif cur_idx == StatsDialog.TAB_HOSTS:
self._set_hosts_query(data)
elif cur_idx == StatsDialog.TAB_PROCS:
self._set_process_query(data)
elif cur_idx == StatsDialog.TAB_ADDRS:
lbl_text = self.TABLES[cur_idx]['label'].text()
if lbl_text != "":
asn = self.asndb.get_asn(lbl_text)
if asn != "":
lbl_text += " (" + asn + ")"
self.TABLES[cur_idx]['label'].setText(lbl_text)
self._set_addrs_query(data)
elif cur_idx == StatsDialog.TAB_PORTS:
self._set_ports_query(data)
elif cur_idx == StatsDialog.TAB_USERS:
self._set_users_query(data)
self._restore_details_view_columns(
self.TABLES[cur_idx]['view'].horizontalHeader(),
"{0}{1}".format(Config.STATS_VIEW_DETAILS_COL_STATE, cur_idx)
)
# selection changes occur before tableview's clicked event
# if there're no rows selected, accept the selection. Otherwise clean it.
def _cb_table_selection_changed(self, selected, deselected):
cur_idx = self.tabWidget.currentIndex()
# only update the flag (that updates data), if there's more than 1
# row selected. When using the keyboard to move around, 1 row will
# be selected to indicate where you are.
# NOTE: in some qt versions you can select a row and setQuery() won't
# reset the selection, but in others it gets resetted.
self.TABLES[cur_idx]['rows_selected'] = len(self.TABLES[cur_idx]['view'].selectionModel().selectedRows(0)) > 1
def _cb_prefs_clicked(self):
self._prefs_dialog.show()
def _cb_rules_filter_combo_changed(self, idx):
if idx == self.RULES_TREE_APPS:
self._set_rules_filter()
elif idx == self.RULES_COMBO_PERMANENT:
self._set_rules_filter(self.RULES_TREE_APPS, self.RULES_TREE_PERMANENT)
elif idx == self.RULES_COMBO_TEMPORARY:
self._set_rules_filter(self.RULES_TREE_APPS, self.RULES_TREE_TEMPORARY)
def _cb_rules_tree_item_clicked(self, item, col):
"""
Event fired when the user clicks on the left panel of the rules tab
"""
item_model = self.rulesTreePanel.indexFromItem(item, col)
parent = item.parent()
parent_row = -1
if parent != None:
parent_model = self.rulesTreePanel.indexFromItem(parent, col)
parent_row = parent_model.row()
self._set_rules_filter(parent_row, item_model.row(), item.text(0))
def _cb_rules_splitter_moved(self, pos, index):
self.comboRulesFilter.setVisible(pos == 0)
self._cfg.setSettings(Config.STATS_RULES_SPLITTER_POS, self.rulesSplitter.saveState())
def _cb_start_clicked(self):
if self.daemon_connected == False:
self.startButton.setChecked(False)
self.startButton.setIcon(self.iconStart)
return
self.update_interception_status(self.startButton.isChecked())
self._status_changed_trigger.emit(self.startButton.isChecked())
if self.startButton.isChecked():
nid, noti = self._nodes.start_interception(_callback=self._notification_callback)
else:
nid, noti = self._nodes.stop_interception(_callback=self._notification_callback)
self._notifications_sent[nid] = noti
def _cb_new_rule_clicked(self):
self._rules_dialog.new_rule()
def _cb_edit_rule_clicked(self):
cur_idx = self.tabWidget.currentIndex()
records = self._get_rule(self.TABLES[cur_idx]['label'].text(), self.nodeRuleLabel.text())
if records == None:
return
self._rules_dialog.edit_rule(records, self.nodeRuleLabel.text())
def _cb_del_rule_clicked(self):
ret = Message.yes_no(
QC.translate("stats", " You are about to delete this rule. "),
QC.translate("stats", " Are you sure?"),
QtWidgets.QMessageBox.Warning)
if ret == QtWidgets.QMessageBox.Cancel:
return
self._del_rule(self.TABLES[self.tabWidget.currentIndex()]['label'].text(), self.nodeRuleLabel.text())
self.TABLES[self.TAB_RULES]['cmd'].click()
self.nodeRuleLabel.setText("")
self._refresh_active_table()
def _cb_enable_rule_toggled(self, state):
rule = ui_pb2.Rule(name=self.TABLES[self.tabWidget.currentIndex()]['label'].text())
rule.enabled = False
rule.action = ""
rule.duration = ""
rule.operator.type = ""
rule.operator.operand = ""
rule.operator.data = ""
notType = ui_pb2.DISABLE_RULE
if state == True:
notType = ui_pb2.ENABLE_RULE
rule.enabled = state
noti = ui_pb2.Notification(type=notType, rules=[rule])
self._notification_trigger.emit(noti)
def _cb_prev_button_clicked(self):
model = self._get_active_table().model()
model.fetchMore()
def _cb_next_button_clicked(self):
model = self._get_active_table().model()
model.fetchMore()
def _cb_help_button_clicked(self):
QuickHelp.show(
QC.translate("stats",
"<p><b>Quick help</b></p>" \
"<p>- Use CTRL+c to copy selected rows.</p>" \
"<p>- Use Home,End,PgUp,PgDown,PgUp,Up or Down keys to navigate rows.</p>" \
"<p>- Use right click on a row to stop refreshing the view.</p>" \
"<p>- Selecting more than one row also stops refreshing the view.</p>"
"<p>- On the Events view, clicking on columns Node, Process or Rule<br>" \
"jumps to the view of the selected item.</p>" \
"<p>- On the rest of the views, double click on a row to get detailed<br>" \
" information.</p><br>" \
"<p>For more information visit the <a href=\"{0}\">wiki</a></p>" \
"<br>".format(Config.HELP_URL)
)
)
# must be called after setModel() or setQuery()
def _show_columns(self):
cols = self._cfg.getSettings(Config.STATS_SHOW_COLUMNS)
if cols == None:
return
for c in range(StatsDialog.GENERAL_COL_NUM):
self.eventsTable.setColumnHidden(c, str(c) not in cols)
def _update_status_label(self, running=False, text=FIREWALL_DISABLED):
self.statusLabel.setText("%12s" % text)
if running:
self.statusLabel.setStyleSheet('color: green; margin: 5px')
self.startButton.setIcon(self.iconPause)
else:
self.statusLabel.setStyleSheet('color: rgb(206, 92, 0); margin: 5px')
self.startButton.setIcon(self.iconStart)
def _get_rulesTree_item(self, index):
try:
return self.rulesTreePanel.topLevelItem(index)
except Exception:
return None
def _add_rulesTree_nodes(self):
if self._nodes.count() > 0:
nodesItem = self.rulesTreePanel.topLevelItem(self.RULES_TREE_NODES)
nodesItem.takeChildren()
for n in self._nodes.get_nodes():
nodesItem.addChild(QtWidgets.QTreeWidgetItem([n]))
def _clear_rows_selection(self):
cur_idx = self.tabWidget.currentIndex()
self.TABLES[cur_idx]['view'].selectionModel().reset()
self.TABLES[cur_idx]['rows_selected'] = False
def _are_rows_selected(self):
cur_idx = self.tabWidget.currentIndex()
return self.TABLES[cur_idx]['rows_selected']
def _get_rule(self, rule_name, node_name):
"""
get rule records, given the name of the rule and the node
"""
cur_idx = self.tabWidget.currentIndex()
records = self._db.get_rule(rule_name, node_name)
if records.next() == False:
print("[stats dialog] edit rule, no records: ", rule_name, node_name)
self.TABLES[cur_idx]['cmd'].click()
return None
return records
def _get_filter_line_clause(self, idx, text):
if text == "":
return ""
if idx == StatsDialog.TAB_RULES:
return " WHERE rules.name LIKE '%{0}%' ".format(text)
elif idx == StatsDialog.TAB_HOSTS or idx == StatsDialog.TAB_PROCS or \
idx == StatsDialog.TAB_ADDRS or idx == StatsDialog.TAB_PORTS:
return " WHERE what LIKE '%{0}%' ".format(text)
return ""
def _get_limit(self):
return " " + self.LIMITS[self.limitCombo.currentIndex()]
def _get_order(self, field=None):
cur_idx = self.tabWidget.currentIndex()
order_field = self.TABLES[cur_idx]['last_order_by']
if field != None:
order_field = field
return " ORDER BY %s %s" % (order_field, self.SORT_ORDER[self.TABLES[cur_idx]['last_order_to']])
def _refresh_active_table(self):
model = self._get_active_table().model()
lastQuery = model.query().lastQuery()
if "LIMIT" not in lastQuery:
lastQuery += self._get_limit()
self.setQuery(model, lastQuery)
def _get_active_table(self):
return self.TABLES[self.tabWidget.currentIndex()]['view']
def _set_active_widgets(self, state, label_txt=""):
cur_idx = self.tabWidget.currentIndex()
self._clear_rows_selection()
self.TABLES[cur_idx]['label'].setVisible(state)
self.TABLES[cur_idx]['label'].setText(label_txt)
self.TABLES[cur_idx]['cmd'].setVisible(state)
if self.TABLES[cur_idx]['filterLine'] != None:
self.TABLES[cur_idx]['filterLine'].setVisible(not state)
if self.TABLES[cur_idx].get('cmdCleanStats') != None:
if cur_idx == StatsDialog.TAB_RULES or cur_idx == StatsDialog.TAB_NODES:
self.TABLES[cur_idx]['cmdCleanStats'].setVisible(state)
header = self.TABLES[cur_idx]['view'].horizontalHeader()
if state == True:
# going to normal state
self._cfg.setSettings("{0}{1}".format(Config.STATS_VIEW_COL_STATE, cur_idx), header.saveState())
else:
# going to details state
self._cfg.setSettings("{0}{1}".format(Config.STATS_VIEW_DETAILS_COL_STATE, cur_idx), header.saveState())
def _restore_last_selected_row(self):
cur_idx = self.tabWidget.currentIndex()
col = self.COL_TIME
if cur_idx == self.TAB_RULES:
col = self.TAB_RULES
elif cur_idx == self.TAB_NODES:
col = self.TAB_RULES
self.TABLES[cur_idx]['view'].selectItem(self.LAST_SELECTED_ITEM, col)
self.LAST_SELECTED_ITEM = ""
def _restore_scroll_value(self):
if self.LAST_SCROLL_VALUE != None:
cur_idx = self.tabWidget.currentIndex()
self.TABLES[cur_idx]['view'].vScrollBar.setValue(self.LAST_SCROLL_VALUE)
self.LAST_SCROLL_VALUE = None
def _restore_details_view_columns(self, header, settings_key):
header.blockSignals(True);
col_state = self._cfg.getSettings(settings_key)
if type(col_state) == QtCore.QByteArray:
header.restoreState(col_state)
header.blockSignals(False);
def _restore_rules_tab_widgets(self, active):
self.delRuleButton.setVisible(not active)
self.editRuleButton.setVisible(not active)
self.nodeRuleLabel.setText("")
self.rulesTreePanel.setVisible(active)
if active:
self.rulesSplitter.refresh()
self.comboRulesFilter.setVisible(self.rulesTreePanel.width() == 0)
items = self.rulesTreePanel.selectedItems()
if len(items) == 0:
self._set_rules_filter()
return
item_m = self.rulesTreePanel.indexFromItem(items[0], 0)
parent = item_m.parent()
if parent != None:
self._set_rules_filter(parent.row(), item_m.row(), item_m.data())
def _set_rules_tab_active(self, row, cur_idx, name_idx, node_idx):
data = row.data()
self._restore_rules_tab_widgets(False)
self.comboRulesFilter.setVisible(False)
r_name = row.model().index(row.row(), name_idx).data()
node = row.model().index(row.row(), node_idx).data()
self.nodeRuleLabel.setText(node)
self.tabWidget.setCurrentIndex(cur_idx)
return r_name, node
def _set_events_query(self):
if self.tabWidget.currentIndex() != self.TAB_MAIN:
return
model = self.TABLES[self.TAB_MAIN]['view'].model()
qstr = self._db.get_query(self.TABLES[self.TAB_MAIN]['name'], self.TABLES[self.TAB_MAIN]['display_fields'])
filter_text = self.filterLine.text()
action = ""
if self.comboAction.currentIndex() == 1:
action = "Action = \"{0}\"".format(Config.ACTION_ALLOW)
elif self.comboAction.currentIndex() == 2:
action = "Action = \"{0}\"".format(Config.ACTION_DENY)
elif self.comboAction.currentIndex() == 3:
action = "Action = \"{0}\"".format(Config.ACTION_REJECT)
# FIXME: use prepared statements
if filter_text == "":
if action != "":
qstr += " WHERE " + action
else:
if action != "":
action += " AND "
qstr += " WHERE " + action + " ("\
" Process LIKE '%" + filter_text + "%'" \
" OR Destination LIKE '%" + filter_text + "%'" \
" OR Rule LIKE '%" + filter_text + "%'" \
" OR Node LIKE '%" + filter_text + "%'" \
" OR Time LIKE '%" + filter_text + "%'" \
" OR Protocol LIKE '%" + filter_text + "%')" \
qstr += self._get_order() + self._get_limit()
self.setQuery(model, qstr)
def _set_nodes_query(self, data):
s = "AND c.src_ip='%s'" % data if '/' not in data else ''
model = self._get_active_table().model()
self.setQuery(model, "SELECT " \
"MAX(c.time) as {0}, " \
"c.action as {1}, " \
"count(c.process) as {2}, " \
"c.uid as {3}, " \
"c.protocol as {4}, " \
"c.dst_ip as {5}, " \
"c.dst_host as {6}, " \
"c.dst_port as {7}, " \
"c.process || ' (' || c.pid || ')' as {8}, " \
"c.process_args as {9}, " \
"c.process_cwd as CWD, " \
"c.rule as {10} " \
"FROM connections as c " \
"WHERE c.node LIKE '%{11}%' {12} GROUP BY {13}, c.process_args, c.uid, c.src_ip, c.dst_ip, c.dst_host, c.dst_port, c.protocol {14}".format(
self.COL_STR_TIME,
self.COL_STR_ACTION,
self.COL_STR_HITS,
self.COL_STR_UID,
self.COL_STR_PROTOCOL,
self.COL_STR_DST_IP,
self.COL_STR_DST_HOST,
self.COL_STR_DST_PORT,
self.COL_STR_PROCESS,
self.COL_STR_PROC_ARGS,
self.COL_STR_RULE,
data, s,
self.COL_STR_PROCESS,
self._get_order() + self._get_limit()))
def _get_nodes_filter_query(self, lastQuery, text):
base_query = lastQuery.split("GROUP BY")
qstr = base_query[0]
if "AND" in qstr:
# strip out ANDs if any
os = qstr.split('AND')
qstr = os[0]
if text != "":
qstr += "AND (c.time LIKE '%{0}%' OR " \
"c.action LIKE '%{0}%' OR " \
"c.pid LIKE '%{0}%' OR " \
"c.src_port LIKE '%{0}%' OR " \
"c.dst_port LIKE '%{0}%' OR " \
"c.src_ip LIKE '%{0}%' OR " \
"c.dst_ip LIKE '%{0}%' OR " \
"c.dst_host LIKE '%{0}%' OR " \
"c.process LIKE '%{0}%' OR " \
"c.process_args LIKE '%{0}%')".format(text)
if len(base_query) > 1:
qstr += " GROUP BY" + base_query[1]
return qstr
def _set_rules_filter(self, parent_row=-1, item_row=0, what=""):
section = self.FILTER_TREE_APPS
if parent_row == -1:
if item_row == self.RULES_TREE_NODES:
section=self.FILTER_TREE_NODES
what=""
else:
section=self.FILTER_TREE_APPS
what=""
elif parent_row == self.RULES_TREE_APPS:
if item_row == self.RULES_TREE_PERMANENT:
section=self.FILTER_TREE_APPS
what=self.RULES_TYPE_PERMANENT
elif item_row == self.RULES_TREE_TEMPORARY:
section=self.FILTER_TREE_APPS
what=self.RULES_TYPE_TEMPORARY
elif parent_row == self.RULES_TREE_NODES:
section=self.FILTER_TREE_NODES
if section == self.FILTER_TREE_APPS:
if what == self.RULES_TYPE_TEMPORARY:
what = "WHERE r.duration != '%s'" % Config.DURATION_ALWAYS
elif what == self.RULES_TYPE_PERMANENT:
what = "WHERE r.duration = '%s'" % Config.DURATION_ALWAYS
elif section == self.FILTER_TREE_NODES and what != "":
what = "WHERE r.node = '%s'" % what
filter_text = self.filterLine.text()
if filter_text != "":
if what == "":
what = "WHERE"
else:
what = what + " AND"
what = what + " r.name LIKE '%{0}%'".format(filter_text)
model = self._get_active_table().model()
self.setQuery(model, "SELECT * FROM rules as r %s %s" % (what, self._get_order()))
self._restore_details_view_columns(
self.TABLES[self.TAB_RULES]['view'].horizontalHeader(),
"{0}{1}".format(Config.STATS_VIEW_COL_STATE, self.TAB_RULES)
)
def _set_rules_query(self, rule_name="", node=""):
if node != "":
node = "c.node = '%s' AND" % node
if rule_name != "":
rule_name = "r.name = '%s' AND" % rule_name
model = self._get_active_table().model()
self.setQuery(model, "SELECT " \
"MAX(c.time) as {0}, " \
"r.node as {1}, " \
"count(c.process) as {2}, " \
"c.uid as {3}, " \
"c.protocol as {4}, " \
"c.dst_port as {5}, " \
"CASE c.dst_host WHEN ''" \
" THEN c.dst_ip " \
" ELSE c.dst_host " \
"END {6}, " \
"c.process as {7}, " \
"c.process_args as {8}, " \
"c.process_cwd as CWD " \
"FROM rules as r, connections as c " \
"WHERE {9} {10} r.name = c.rule AND r.node = c.node GROUP BY c.process, c.process_args, c.uid, {11}, c.dst_port {12}".format(
self.COL_STR_TIME,
self.COL_STR_NODE,
self.COL_STR_HITS,
self.COL_STR_UID,
self.COL_STR_PROTOCOL,
self.COL_STR_DST_PORT,
self.COL_STR_DESTINATION,
self.COL_STR_PROCESS,
self.COL_STR_PROC_ARGS,
node,
rule_name,
self.COL_STR_DESTINATION,
self._get_order()))
def _set_hosts_query(self, data):
model = self._get_active_table().model()
self.setQuery(model, "SELECT " \
"MAX(c.time) as {0}, " \
"c.node as {1}, " \
"count(c.process) as {2}, " \
"c.action as {3}, " \
"c.uid as {4}, " \
"c.protocol as {5}, " \
"c.dst_port as {6}, " \
"c.dst_ip as {7}, " \
"c.process || ' (' || c.pid || ')' as {8}, " \
"c.process_args as {9}, " \
"c.process_cwd as CWD, " \
"c.rule as {10} " \
"FROM connections as c " \
"WHERE c.dst_host = '{11}' GROUP BY c.pid, {12}, c.process_args, c.src_ip, c.dst_ip, c.dst_port, c.protocol, c.action, c.node {13}".format(
self.COL_STR_TIME,
self.COL_STR_NODE,
self.COL_STR_HITS,
self.COL_STR_ACTION,
self.COL_STR_UID,
self.COL_STR_PROTOCOL,
self.COL_STR_DST_PORT,
self.COL_STR_DST_IP,
self.COL_STR_PROCESS,
self.COL_STR_PROC_ARGS,
self.COL_STR_RULE,
data,
self.COL_STR_PROCESS,
self._get_order("1") + self._get_limit()))
def _set_process_query(self, data):
model = self._get_active_table().model()
self.setQuery(model, "SELECT " \
"MAX(c.time) as {0}, " \
"c.node as {1}, " \
"count(c.dst_ip) as {2}, " \
"c.action as {3}, " \
"c.uid as {4}, " \
"CASE c.dst_host WHEN ''" \
" THEN c.dst_ip || ' -> ' || c.dst_port " \
" ELSE c.dst_host || ' -> ' || c.dst_port " \
"END {5}, " \
"c.pid as PID, " \
"c.process_args as {6}, " \
"c.process_cwd as CWD, " \
"c.rule as {7} " \
"FROM connections as c " \
"WHERE c.process = '{8}' " \
"GROUP BY c.src_ip, c.dst_ip, c.dst_host, c.dst_port, c.uid, c.action, c.node, c.pid, c.process_args {9}".format(
self.COL_STR_TIME,
self.COL_STR_NODE,
self.COL_STR_HITS,
self.COL_STR_ACTION,
self.COL_STR_UID,
self.COL_STR_DESTINATION,
self.COL_STR_PROC_ARGS,
self.COL_STR_RULE,
data,
self._get_order("1") + self._get_limit()))
nrows = self._get_active_table().model().rowCount()
self.cmdProcDetails.setVisible(nrows != 0)
def _set_addrs_query(self, data):
model = self._get_active_table().model()
self.setQuery(model, "SELECT " \
"MAX(c.time) as {0}, " \
"c.node as {1}, " \
"count(c.dst_ip) as {2}, " \
"c.action as {3}, " \
"c.uid as {4}, " \
"c.protocol as {5}, " \
"CASE c.dst_host WHEN ''" \
" THEN c.dst_ip " \
" ELSE c.dst_host " \
"END {6}, " \
"c.dst_port as {7}, " \
"c.process || ' (' || c.pid || ')' as {8}, " \
"c.process_args as {9}, " \
"c.process_cwd as CWD, " \
"c.rule as {10} " \
"FROM connections as c " \
"WHERE c.dst_ip = '{11}' GROUP BY c.pid, {12}, c.process_args, c.src_ip, c.dst_port, {13}, c.protocol, c.action, c.uid, c.node {14}".format(
self.COL_STR_TIME,
self.COL_STR_NODE,
self.COL_STR_HITS,
self.COL_STR_ACTION,
self.COL_STR_UID,
self.COL_STR_PROTOCOL,
self.COL_STR_DESTINATION,
self.COL_STR_DST_PORT,
self.COL_STR_PROCESS,
self.COL_STR_PROC_ARGS,
self.COL_STR_RULE,
data,
self.COL_STR_PROCESS,
self.COL_STR_DESTINATION,
self._get_order("1") + self._get_limit()))
def _set_ports_query(self, data):
model = self._get_active_table().model()
self.setQuery(model, "SELECT " \
"MAX(c.time) as {0}, " \
"c.node as {1}, " \
"count(c.dst_ip) as {2}, " \
"c.action as {3}, " \
"c.uid as {4}, " \
"c.protocol as {5}, " \
"c.dst_ip as {6}, " \
"CASE c.dst_host WHEN ''" \
" THEN c.dst_ip " \
" ELSE c.dst_host " \
"END {7}, " \
"c.process || ' (' || c.pid || ')' as {8}, " \
"c.process_args as {9}, " \
"c.process_cwd as CWD, " \
"c.rule as {10} " \
"FROM connections as c " \
"WHERE c.dst_port = '{11}' GROUP BY c.pid, {12}, c.process_args, {13}, c.src_ip, c.dst_ip, c.protocol, c.action, c.uid, c.node {14}".format(
self.COL_STR_TIME,
self.COL_STR_NODE,
self.COL_STR_HITS,
self.COL_STR_ACTION,
self.COL_STR_UID,
self.COL_STR_PROTOCOL,
self.COL_STR_DST_IP,
self.COL_STR_DESTINATION,
self.COL_STR_PROCESS,
self.COL_STR_PROC_ARGS,
self.COL_STR_RULE,
data,
self.COL_STR_PROCESS,
self.COL_STR_DESTINATION,
self._get_order("1") + self._get_limit()))
def _set_users_query(self, data):
uid = data.split(" ")
if len(uid) == 2:
uid = uid[1].strip("()")
else:
uid = uid[0]
model = self._get_active_table().model()
self.setQuery(model, "SELECT " \
"MAX(c.time) as {0}, " \
"c.uid, " \
"c.node as {1}, " \
"count(c.dst_ip) as {2}, " \
"c.action as {3}, " \
"c.protocol as {4}, " \
"c.dst_ip as {5}, " \
"c.dst_host as {6}, " \
"c.dst_port as {7}, " \
"c.process || ' (' || c.pid || ')' as {8}, " \
"c.process_args as {9}, " \
"c.process_cwd as CWD, " \
"c.rule as {10} " \
"FROM connections as c " \
"WHERE c.uid = '{11}' GROUP BY c.pid, {12}, c.process_args, c.src_ip, c.dst_ip, c.dst_host, c.dst_port, c.protocol, c.action, c.node {13}".format(
self.COL_STR_TIME,
self.COL_STR_NODE,
self.COL_STR_HITS,
self.COL_STR_ACTION,
self.COL_STR_PROTOCOL,
self.COL_STR_DST_IP,
self.COL_STR_DESTINATION,
self.COL_STR_DST_PORT,
self.COL_STR_PROCESS,
self.COL_STR_PROC_ARGS,
self.COL_STR_RULE,
uid,
self.COL_STR_PROCESS,
self._get_order("1") + self._get_limit()))
# get the query with filter by text when a tab is in the detail view.
def _get_indetail_filter_query(self, lastQuery, text):
cur_idx = self.tabWidget.currentIndex()
base_query = lastQuery.split("GROUP BY")
qstr = base_query[0]
if "AND" in qstr:
# strip out ANDs if any
os = qstr.split('AND')
qstr = os[0]
if text != "":
qstr += " AND (c.time LIKE '%{0}%' OR " \
"c.action LIKE '%{0}%' OR " \
"c.pid LIKE '%{0}%' OR " \
"c.src_port LIKE '%{0}%' OR " \
"c.src_ip LIKE '%{0}%' OR ".format(text)
# exclude from query the field of the view we're filtering by
if self.IN_DETAIL_VIEW[cur_idx] != self.TAB_PORTS:
qstr += "c.dst_port LIKE '%{0}%' OR ".format(text)
if self.IN_DETAIL_VIEW[cur_idx] != self.TAB_ADDRS:
qstr += "c.dst_ip LIKE '%{0}%' OR ".format(text)
if self.IN_DETAIL_VIEW[cur_idx] != self.TAB_HOSTS:
qstr += "c.dst_host LIKE '%{0}%' OR ".format(text)
if self.IN_DETAIL_VIEW[cur_idx] != self.TAB_PROCS:
qstr += "c.process LIKE '%{0}%' OR ".format(text)
qstr += "c.process_args LIKE '%{0}%')".format(text)
if len(base_query) > 1:
qstr += " GROUP BY" + base_query[1]
return qstr
@QtCore.pyqtSlot()
def _on_settings_saved(self):
self.settings_saved.emit()
def _on_save_clicked(self):
tab_idx = self.tabWidget.currentIndex()
filename = QtWidgets.QFileDialog.getSaveFileName(self,
QC.translate("stats", 'Save as CSV'),
self._file_names[tab_idx],
'All Files (*);;CSV Files (*.csv)')[0].strip()
if filename == '':
return
with self._lock:
table = self._tables[tab_idx]
ncols = table.model().columnCount()
nrows = table.model().rowCount()
cols = []
for col in range(0, ncols):
cols.append(table.model().headerData(col, QtCore.Qt.Horizontal))
with open(filename, 'w') as csvfile:
w = csv.writer(csvfile, dialect='excel')
w.writerow(cols)
if tab_idx == self.TAB_MAIN:
w.writerows(table.model().dumpRows())
else:
for row in range(0, nrows):
values = []
for col in range(0, ncols):
values.append(table.model().index(row, col).data())
w.writerow(values)
def _setup_table(self, widget, tableWidget, table_name, fields="*", group_by="", order_by="2", sort_direction=SORT_ORDER[1], limit="", resize_cols=(), model=None, delegate=None, verticalScrollBar=None):
tableWidget.setSortingEnabled(True)
if model == None:
model = self._db.get_new_qsql_model()
if delegate != None:
tableWidget.setItemDelegate(ColorizedDelegate(self, config=delegate))
if verticalScrollBar != None:
tableWidget.setVerticalScrollBar(verticalScrollBar)
tableWidget.vScrollBar.sliderPressed.connect(self._cb_scrollbar_pressed)
tableWidget.vScrollBar.sliderReleased.connect(self._cb_scrollbar_released)
self.setQuery(model, "SELECT " + fields + " FROM " + table_name + group_by + " ORDER BY " + order_by + " " + sort_direction + limit)
tableWidget.setModel(model)
header = tableWidget.horizontalHeader()
if header != None:
header.sortIndicatorChanged.connect(self._cb_table_header_clicked)
for _, col in enumerate(resize_cols):
header.setSectionResizeMode(col, QtWidgets.QHeaderView.ResizeToContents)
cur_idx = self.tabWidget.currentIndex()
self._cfg.setSettings("{0}{1}".format(Config.STATS_VIEW_DETAILS_COL_STATE, cur_idx), header.saveState())
return tableWidget
def update_interception_status(self, enabled):
self.startButton.setDown(enabled)
self.startButton.setChecked(enabled)
if enabled:
self._update_status_label(running=True, text=self.FIREWALL_RUNNING)
else:
self._update_status_label(running=False, text=self.FIREWALL_DISABLED)
# launched from a thread
def update(self, is_local=True, stats=None, need_query_update=True):
# lock mandatory when there're multiple clients
with self._lock:
if stats is not None:
self._stats = stats
# do not update any tab if the window is not visible
if self.isVisible() and self.isMinimized() == False:
self._trigger.emit(is_local, need_query_update)
def update_status(self):
self.startButton.setDown(self.daemon_connected)
self.startButton.setChecked(self.daemon_connected)
self.startButton.setDisabled(not self.daemon_connected)
if self.daemon_connected:
self._update_status_label(running=True, text=self.FIREWALL_RUNNING)
else:
self._update_status_label(running=False, text=self.FIREWALL_STOPPED)
self.statusLabel.setStyleSheet('color: red; margin: 5px')
@QtCore.pyqtSlot(bool, bool)
def _on_update_triggered(self, is_local, need_query_update=False):
if self._stats is None:
self.daemonVerLabel.setText("")
self.uptimeLabel.setText("")
self.rulesLabel.setText("")
self.consLabel.setText("")
self.droppedLabel.setText("")
else:
nodes = self._nodes.count()
self.daemonVerLabel.setText(self._stats.daemon_version)
if nodes <= 1:
self.uptimeLabel.setText(str(datetime.timedelta(seconds=self._stats.uptime)))
self.rulesLabel.setText("%s" % self._stats.rules)
self.consLabel.setText("%s" % self._stats.connections)
self.droppedLabel.setText("%s" % self._stats.dropped)
else:
self.uptimeLabel.setText("")
self.rulesLabel.setText("")
self.consLabel.setText("")
self.droppedLabel.setText("")
if need_query_update:
self._refresh_active_table()
# prevent a click on the window's x
# from quitting the whole application
def closeEvent(self, e):
self._save_settings()
e.accept()
self.hide()
def hideEvent(self, e):
self._save_settings()
# https://gis.stackexchange.com/questions/86398/how-to-disable-the-escape-key-for-a-dialog
def keyPressEvent(self, event):
if not event.key() == QtCore.Qt.Key_Escape:
super(StatsDialog, self).keyPressEvent(event)
def setQuery(self, model, q):
if self._context_menu_active == True or self.scrollbar_active == True or self._are_rows_selected():
return
with self._lock:
try:
model.query().clear()
model.setQuery(q, self._db_sqlite)
if model.lastError().isValid():
print("setQuery() error: ", model.lastError().text())
if self.tabWidget.currentIndex() != self.TAB_MAIN:
self.labelRowsCount.setText("{0}".format(model.rowCount()))
else:
self.labelRowsCount.setText("")
except Exception as e:
print(self._address, "setQuery() exception: ", e)
finally:
self._show_columns()
| evilsocket/opensnitch | ui/opensnitch/dialogs/stats.py | Python | gpl-3.0 | 89,901 | [
"VisIt"
] | 5d5da6317f75162986f54f8da32f6582904c2241d0da24af30ee7c0d15c2c4ce |
import os
from setuptools import setup,find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
requirements = read('requirements.txt').split()
setup(
name = "syft",
version = "0.1.0",
author = "Amber Trask",
author_email = "contact@openmined.org",
description = ("A library for Homomorphically Encrypted Deep Learning Algorithms"),
license = "Apache-2.0",
keywords = "deep learning machine artificial intelligence homomorphic encryption",
packages=find_packages(exclude=['notebooks', 'test*','dist']),
include_package_data=True,
long_description=read('README.md'),
url='github.com/OpenMined/Syft',
classifiers=[
"Development Status :: 1 - Alpha",
],
scripts=['bin/syft_cmd'],
install_requires=requirements,
setup_requires=['pytest-runner'],
tests_require=['pytest']
)
| cypherai/PySyft | setup.py | Python | apache-2.0 | 1,117 | [
"Amber"
] | 2fdf7eb71168faaa9d961e7603c989020550ac667437a79b3bea5b7f1345dc0b |
#!/usr/bin/env python
"""
TurnkeyLaserExporter
-----------------------------------
Maintained by Turnkey Tyranny (https://github.com/TurnkeyTyranny/laser-gcode-exporter-inkscape-plugin)
Designed to run on Ramps 1.4 + Marlin firmware on a K40 CO2 Laser Cutter.
Based on think|haus gcode inkscape extension
Based on a script by Nick Drobchenko from the CNC club
***
Copyright (C) 2009 Nick Drobchenko, nick@cnc-club.ru
based on gcode.py (C) 2007 hugomatic...
based on addnodes.py (C) 2005,2007 Aaron Spike, aaron@ekips.org
based on dots.py (C) 2005 Aaron Spike, aaron@ekips.org
based on interp.py (C) 2005 Aaron Spike, aaron@ekips.org
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
"""
Changelog 2015-04-11:
* Adapting to Gio's laser cutter (based on Marlin on Rumba, with custom laser wiring).
* Also provide a couple of bugfixes.
Changelog 2015-02-01:
* Beginning of the project. Based on a fork from ShinyLaser(https://github.com/ajfoul/thlaser-inkscape-plugin)
Changelog 2015-02-16:
Added an option to export as Marlin or Smoothie Power levels
Changelog 2015-03-07:
Added capability to pick out power, ppm, feedrate etc from the layer names
Added code to support Pulse Per Minute burning or continuous burning. Will default to continuous.
M649 S100 L300 P10 - Set Laser settings to 100 percent power, pulses are each 300ms, and 10 pulses per mm.
G0 : Move to a new location with the laser off.
G1 : Move to a new location with the laser on.
G2 : Move in a Clockwise Arc
G3 : Move in a Counter Clockwise Arc
Name your layer like 10 [feed=600,ppm=40] for 10% power, 600mm per minute cut and 40 pulse per millimetre at 60ms duration
Changelog 2015-03-27
Changelog 2015-03-28
Fixed many many bugs, completed the work on exporting objects and images as rasters.
Fixed up as many situations I could find that threw python error messages and replaced them with meaningful notices for the user.
Changelog 2015-03-30
Accounts for strokes on objects. Conditional raster export as some items in inkscape are positioned strangely.
Changelog 2015-04-1
Need to get the 'positioning for all' functionality working as exporting many raster objects is painfully slow.
Updated script to export rasters with top left as the origin or bottom left.
"""
###
### Gcode tools
###
import inkex, simplestyle, simplepath
import cubicsuperpath, simpletransform, bezmisc
import os
import math
import bezmisc
import re
import copy
import sys
import time
import traceback
import pprint
#Image processing for rastering
import base64
from PIL import Image
import ImageOps
import subprocess
import simplestyle
import getopt
from io import BytesIO
#_ = inkex._
################################################################################
###
### Constants
###
################################################################################
VERSION = "1.0.1"
STRAIGHT_TOLERANCE = 0.0001
STRAIGHT_DISTANCE_TOLERANCE = 0.0001
LASER_ON = "G4 P0\nM42 P8 S%d ;turn the laser on" # LASER ON MCODE
LASER_OFF = "G4 P0\nM42 P8 S0 ;turn the laser off" # LASER OFF MCODE
HEADER_TEXT = ""
FOOTER_TEXT = ""
BIARC_STYLE = {
'biarc0': simplestyle.formatStyle({ 'stroke': '#88f', 'fill': 'none', 'strokeWidth':'1' }),
'biarc1': simplestyle.formatStyle({ 'stroke': '#8f8', 'fill': 'none', 'strokeWidth':'1' }),
'line': simplestyle.formatStyle({ 'stroke': '#f88', 'fill': 'none', 'strokeWidth':'1' }),
'area': simplestyle.formatStyle({ 'stroke': '#777', 'fill': 'none', 'strokeWidth':'0.1' }),
}
# Inkscape group tag
SVG_GROUP_TAG = inkex.addNS("g", "svg")
SVG_PATH_TAG = inkex.addNS('path','svg')
SVG_IMAGE_TAG = inkex.addNS('image', 'svg')
SVG_TEXT_TAG = inkex.addNS('text', 'svg')
SVG_LABEL_TAG = inkex.addNS("label", "inkscape")
GCODE_EXTENSION = ".g" # changed to be Marlin friendly (ajf)
options = {}
################################################################################
###
### Common functions
###
################################################################################
###
### Just simple output function for better debugging
###
class Logger(object):
first = True
enabled = True
def __init__(self):
home = os.getenv("HOME") or os.getenv("USERPROFILE")
self.logpath = os.path.join(home, "thlaser.log")
def write(self, s):
if (not self.enabled):
return
if self.first and os.path.isfile(self.logpath):
os.remove(self.logpath)
self.first = False
f = open(self.logpath, "a")
f.write(str(s)+"\n")
f.close()
# The global logger object
logger = Logger()
#logger.write(" ".join(sys.argv))
###
### Point (x,y) operations
###
## Pretty much what it sounds like: defines some arithmetic functions that can be applied to points.
class P:
def __init__(self, x, y=None):
if not y==None:
self.x, self.y = float(x), float(y)
else:
self.x, self.y = float(x[0]), float(x[1])
def __add__(self, other): return P(self.x + other.x, self.y + other.y)
def __sub__(self, other): return P(self.x - other.x, self.y - other.y)
def __neg__(self): return P(-self.x, -self.y)
def __mul__(self, other):
if isinstance(other, P):
return self.x * other.x + self.y * other.y
return P(self.x * other, self.y * other)
__rmul__ = __mul__
def __div__(self, other): return P(self.x / other, self.y / other)
def mag(self): return math.hypot(self.x, self.y)
def unit(self):
h = self.mag()
if h: return self / h
else: return P(0,0)
def dot(self, other): return self.x * other.x + self.y * other.y
def rot(self, theta):
c = math.cos(theta)
s = math.sin(theta)
return P(self.x * c - self.y * s, self.x * s + self.y * c)
def angle(self): return math.atan2(self.y, self.x)
def __repr__(self): return '%f,%f' % (self.x, self.y)
def pr(self): return "%.2f,%.2f" % (self.x, self.y)
def to_list(self): return [self.x, self.y]
###
### Functions to operate with CubicSuperPath
###
def csp_at_t(sp1,sp2,t):
bez = (sp1[1][:],sp1[2][:],sp2[0][:],sp2[1][:])
return bezmisc.bezierpointatt(bez,t)
def cspbezsplit(sp1, sp2, t = 0.5):
s1,s2 = bezmisc.beziersplitatt((sp1[1],sp1[2],sp2[0],sp2[1]),t)
return [ [sp1[0][:], sp1[1][:], list(s1[1])], [list(s1[2]), list(s1[3]), list(s2[1])], [list(s2[2]), sp2[1][:], sp2[2][:]] ]
def cspbezsplitatlength(sp1, sp2, l = 0.5, tolerance = 0.01):
bez = (sp1[1][:],sp1[2][:],sp2[0][:],sp2[1][:])
t = bezmisc.beziertatlength(bez, l, tolerance)
return cspbezsplit(sp1, sp2, t)
def cspseglength(sp1,sp2, tolerance = 0.001):
bez = (sp1[1][:],sp1[2][:],sp2[0][:],sp2[1][:])
return bezmisc.bezierlength(bez, tolerance)
def csplength(csp):
total = 0
lengths = []
for sp in csp:
for i in xrange(1,len(sp)):
l = cspseglength(sp[i-1],sp[i])
lengths.append(l)
total += l
return lengths, total
def estimate_bb_area(curve):
"""Roughly estimates the area of the bounding box of a curve.
It just computes min and max of the handles, does not bother with
computing the real bounding box of higher-order curves.
"""
bb = [float('inf'), float('inf'), float('-inf'), float('-inf')]
for x in curve:
for y in x:
for z in y:
p = x[1]
assert len(p) == 2
bb[0] = min(bb[0], p[0])
bb[1] = min(bb[1], p[1])
bb[2] = max(bb[2], p[0])
bb[3] = max(bb[3], p[1])
return (bb[2] - bb[0]) * (bb[3] - bb[1])
###
### Distance calculattion from point to arc
###
def between(c,x,y):
return x-STRAIGHT_TOLERANCE<=c<=y+STRAIGHT_TOLERANCE or y-STRAIGHT_TOLERANCE<=c<=x+STRAIGHT_TOLERANCE
def distance_from_point_to_arc(p, arc):
P0,P2,c,a = arc
dist = None
p = P(p)
r = (P0-c).mag()
if r>0 :
i = c + (p-c).unit()*r
alpha = ((i-c).angle() - (P0-c).angle())
if a*alpha<0:
if alpha>0: alpha = alpha-2*math.pi
else: alpha = 2*math.pi+alpha
if between(alpha,0,a) or min(abs(alpha),abs(alpha-a))<STRAIGHT_TOLERANCE :
return (p-i).mag(), [i.x, i.y]
else :
d1, d2 = (p-P0).mag(), (p-P2).mag()
if d1<d2 :
return (d1, [P0.x,P0.y])
else :
return (d2, [P2.x,P2.y])
def get_distance_from_csp_to_arc(sp1,sp2, arc1, arc2, tolerance = 0.001 ): # arc = [start,end,center,alpha]
n, i = 10, 0
d, d1, dl = (0,(0,0)), (0,(0,0)), 0
while i<1 or (abs(d1[0]-dl[0])>tolerance and i<2):
i += 1
dl = d1*1
for j in range(n+1):
t = float(j)/n
p = csp_at_t(sp1,sp2,t)
d = min(distance_from_point_to_arc(p,arc1), distance_from_point_to_arc(p,arc2))
d1 = max(d1,d)
n=n*2
return d1[0]
################################################################################
###
### Biarc function
###
### Calculates biarc approximation of cubic super path segment
### splits segment if needed or approximates it with straight line
###
################################################################################
def biarc(sp1, sp2, z1, z2, depth=0,):
def biarc_split(sp1,sp2, z1, z2, depth):
if depth<options.biarc_max_split_depth:
sp1,sp2,sp3 = cspbezsplit(sp1,sp2)
l1, l2 = cspseglength(sp1,sp2), cspseglength(sp2,sp3)
if l1+l2 == 0 : zm = z1
else : zm = z1+(z2-z1)*l1/(l1+l2)
return biarc(sp1,sp2,depth+1,z1,zm)+biarc(sp2,sp3,depth+1,z1,zm)
else: return [ [sp1[1],'line', 0, 0, sp2[1], [z1,z2]] ]
P0, P4 = P(sp1[1]), P(sp2[1])
TS, TE, v = (P(sp1[2])-P0), -(P(sp2[0])-P4), P0 - P4
tsa, tea, va = TS.angle(), TE.angle(), v.angle()
if TE.mag()<STRAIGHT_DISTANCE_TOLERANCE and TS.mag()<STRAIGHT_DISTANCE_TOLERANCE:
# Both tangents are zerro - line straight
return [ [sp1[1],'line', 0, 0, sp2[1], [z1,z2]] ]
if TE.mag() < STRAIGHT_DISTANCE_TOLERANCE:
TE = -(TS+v).unit()
r = TS.mag()/v.mag()*2
elif TS.mag() < STRAIGHT_DISTANCE_TOLERANCE:
TS = -(TE+v).unit()
r = 1/( TE.mag()/v.mag()*2 )
else:
r=TS.mag()/TE.mag()
TS, TE = TS.unit(), TE.unit()
tang_are_parallel = ((tsa-tea)%math.pi<STRAIGHT_TOLERANCE or math.pi-(tsa-tea)%math.pi<STRAIGHT_TOLERANCE )
if ( tang_are_parallel and
((v.mag()<STRAIGHT_DISTANCE_TOLERANCE or TE.mag()<STRAIGHT_DISTANCE_TOLERANCE or TS.mag()<STRAIGHT_DISTANCE_TOLERANCE) or
1-abs(TS*v/(TS.mag()*v.mag()))<STRAIGHT_TOLERANCE) ):
# Both tangents are parallel and start and end are the same - line straight
# or one of tangents still smaller then tollerance
# Both tangents and v are parallel - line straight
return [ [sp1[1],'line', 0, 0, sp2[1], [z1,z2]] ]
c,b,a = v*v, 2*v*(r*TS+TE), 2*r*(TS*TE-1)
if v.mag()==0:
return biarc_split(sp1, sp2, z1, z2, depth)
asmall, bsmall, csmall = abs(a)<10**-10,abs(b)<10**-10,abs(c)<10**-10
if asmall and b!=0: beta = -c/b
elif csmall and a!=0: beta = -b/a
elif not asmall:
discr = b*b-4*a*c
if discr < 0: raise ValueError, (a,b,c,discr)
disq = discr**.5
beta1 = (-b - disq) / 2 / a
beta2 = (-b + disq) / 2 / a
if beta1*beta2 > 0 : raise ValueError, (a,b,c,disq,beta1,beta2)
beta = max(beta1, beta2)
elif asmall and bsmall:
return biarc_split(sp1, sp2, z1, z2, depth)
alpha = beta * r
ab = alpha + beta
P1 = P0 + alpha * TS
P3 = P4 - beta * TE
P2 = (beta / ab) * P1 + (alpha / ab) * P3
def calculate_arc_params(P0,P1,P2):
D = (P0+P2)/2
if (D-P1).mag()==0: return None, None
R = D - ( (D-P0).mag()**2/(D-P1).mag() )*(P1-D).unit()
p0a, p1a, p2a = (P0-R).angle()%(2*math.pi), (P1-R).angle()%(2*math.pi), (P2-R).angle()%(2*math.pi)
alpha = (p2a - p0a) % (2*math.pi)
if (p0a<p2a and (p1a<p0a or p2a<p1a)) or (p2a<p1a<p0a) :
alpha = -2*math.pi+alpha
if abs(R.x)>1000000 or abs(R.y)>1000000 or (R-P0).mag<options.min_arc_radius :
return None, None
else :
return R, alpha
R1,a1 = calculate_arc_params(P0,P1,P2)
R2,a2 = calculate_arc_params(P2,P3,P4)
if R1==None or R2==None or (R1-P0).mag()<STRAIGHT_TOLERANCE or (R2-P2).mag()<STRAIGHT_TOLERANCE : return [ [sp1[1],'line', 0, 0, sp2[1], [z1,z2]] ]
d = get_distance_from_csp_to_arc(sp1,sp2, [P0,P2,R1,a1],[P2,P4,R2,a2])
if d > options.biarc_tolerance and depth<options.biarc_max_split_depth : return biarc_split(sp1, sp2, z1, z2, depth)
else:
if R2.mag()*a2 == 0 : zm = z2
else : zm = z1 + (z2-z1)*(R1.mag()*a1)/(R2.mag()*a2+R1.mag()*a1)
return [ [ sp1[1], 'arc', [R1.x,R1.y], a1, [P2.x,P2.y], [z1,zm] ], [ [P2.x,P2.y], 'arc', [R2.x,R2.y], a2, [P4.x,P4.y], [zm,z2] ] ]
################################################################################
###
### Inkscape helper functions
###
################################################################################
# Returns true if the given node is a layer
def is_layer(node):
logger.write("Considering tag %s with groupmode %s" % (node.tag, node.get(inkex.addNS("groupmode", "inkscape"), None)))
return (node.tag == SVG_GROUP_TAG and
node.get(inkex.addNS("groupmode", "inkscape")) == "layer")
def get_layers(document):
layers = []
root = document.getroot()
for node in root.iterchildren():
if (is_layer(node)):
logger.write("FOUND!")
# Found an inkscape layer
layers.append(node)
return layers
def parse_layer_name(txt):
params = {}
try:
n = txt.index("[")
except ValueError:
layerName = txt.strip()
else:
layerName = txt[0:n].strip()
args = txt[n+1:].strip()
if (args.endswith("]")):
args = args[0:-1]
for arg in args.split(","):
try:
(field, value) = arg.split("=")
except:
raise ValueError("Invalid argument in layer '%s'" % layerName)
if (field == "feed" or field == "ppm"):
try:
value = float(value)
except:
raise ValueError("Invalid layer name '%s'" % value)
params[field] = value
logger.write("%s == %s" % (field, value))
return (layerName, params)
################################################################################
###
### Gcode tools class
###
################################################################################
class Gcode_tools(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
outdir = os.getenv("HOME") or os.getenv("USERPROFILE")
if (outdir):
outdir = os.path.join(outdir, "Desktop")
else:
outdir = os.getcwd()
self.OptionParser.add_option("-d", "--directory", action="store", type="string", dest="directory", default=outdir, help="Directory for gcode file")
self.OptionParser.add_option("-f", "--filename", action="store", type="string", dest="file", default="-1.0", help="File name")
self.OptionParser.add_option("-u", "--Xscale", action="store", type="float", dest="Xscale", default="1.0", help="Scale factor X")
self.OptionParser.add_option("-v", "--Yscale", action="store", type="float", dest="Yscale", default="1.0", help="Scale factor Y")
self.OptionParser.add_option("-x", "--Xoffset", action="store", type="float", dest="Xoffset", default="0.0", help="Offset along X")
self.OptionParser.add_option("-y", "--Yoffset", action="store", type="float", dest="Yoffset", default="0.0", help="Offset along Y")
# added move (laser off) feedrate and laser intensity; made all int rather than float - (ajf)
self.OptionParser.add_option("-m", "--Mfeed", action="store", type="int", dest="Mfeed", default="2000", help="Default Move Feed rate in unit/min")
self.OptionParser.add_option("-p", "--feed", action="store", type="int", dest="feed", default="300", help="Default Cut Feed rate in unit/min")
self.OptionParser.add_option("-l", "--laser", action="store", type="int", dest="laser", default="10", help="Default Laser intensity (0-255)")
self.OptionParser.add_option("-b", "--homebefore", action="store", type="inkbool", dest="homebefore", default=True, help="Home all beofre starting (G28)")
self.OptionParser.add_option("-a", "--homeafter", action="store", type="inkbool", dest="homeafter", default=False, help="Home X Y at end of job")
self.OptionParser.add_option("", "--draw-order", action="store", type="string", dest="draw_order", default="inside_first", help="Drawing order ('inside-first', 'outside-first' or 'no_sort')")
self.OptionParser.add_option("", "--biarc-tolerance", action="store", type="float", dest="biarc_tolerance", default="1", help="Tolerance used when calculating biarc interpolation.")
self.OptionParser.add_option("", "--biarc-max-split-depth", action="store", type="int", dest="biarc_max_split_depth", default="4", help="Defines maximum depth of splitting while approximating using biarcs.")
self.OptionParser.add_option("", "--unit", action="store", type="string", dest="unit", default="G21 (All units in mm)\n", help="Units")
self.OptionParser.add_option("", "--function", action="store", type="string", dest="function", default="Curve", help="What to do: Curve|Area|Area inkscape")
self.OptionParser.add_option("", "--tab", action="store", type="string", dest="tab", default="", help="Means nothing right now. Notebooks Tab.")
self.OptionParser.add_option("", "--generate_not_parametric_code",action="store", type="inkbool", dest="generate_not_parametric_code", default=False,help="Generated code will be not parametric.")
self.OptionParser.add_option("", "--double_sided_cutting",action="store", type="inkbool", dest="double_sided_cutting", default=False,help="Generate code for double-sided cutting.")
self.OptionParser.add_option("", "--draw-curves", action="store", type="inkbool", dest="drawCurves", default=False,help="Draws curves to show what geometry was processed")
self.OptionParser.add_option("", "--logging", action="store", type="inkbool", dest="logging", default=False, help="Enable output logging from the plugin")
self.OptionParser.add_option("", "--loft-distances", action="store", type="string", dest="loft_distances", default="10", help="Distances between paths.")
self.OptionParser.add_option("", "--loft-direction", action="store", type="string", dest="loft_direction", default="crosswise", help="Direction of loft's interpolation.")
self.OptionParser.add_option("", "--loft-interpolation-degree",action="store", type="float", dest="loft_interpolation_degree", default="2", help="Which interpolation use to loft the paths smooth interpolation or staright.")
self.OptionParser.add_option("", "--min-arc-radius", action="store", type="float", dest="min_arc_radius", default="0.0005", help="All arc having radius less than minimum will be considered as straight line")
self.OptionParser.add_option("", "--mainboard", action="store", type="string", dest="mainboard", default="ramps", help="Mainboard")
self.OptionParser.add_option("", "--origin", action="store", type="string", dest="origin", default="topleft", help="Origin of the Y Axis")
def parse_curve(self, path):
# if self.options.Xscale!=self.options.Yscale:
# xs,ys = self.options.Xscale,self.options.Yscale
# self.options.Xscale,self.options.Yscale = 1.0, 1.0
# else :
xs,ys = 1.0,1.0
# ### Sort to reduce Rapid distance
# np = [p[0]]
# del p[0]
# while len(p)>0:
# end = np[-1][-1][1]
# dist = None
# for i in range(len(p)):
# start = p[i][0][1]
#
# dist = max( ( -( ( end[0]-start[0])**2+(end[1]-start[1])**2 ) ,i) , dist )
# np += [p[dist[1]][:]]
# del p[dist[1]]
# p = np[:]
if(path['type'] == "vector") :
lst = {}
lst['type'] = "vector"
lst['data'] = []
# If we sort with respect to the area of the BB, we have
# more or less draw first things inside. It is good to
# draw later outer borders, so that pieces inside do not
# risk to move while they detach from the bulk material
# (however, this also probably draw paths in a completely
# unoptimized order).
if self.options.draw_order == 'inside_first':
path['data'].sort(key=estimate_bb_area)
elif self.options.draw_order == 'outside_first':
path['data'].sort(key=estimate_bb_area, reverse=True)
for subpath in path['data']:
lst['data'].append(
[[subpath[0][1][0]*xs, subpath[0][1][1]*ys], 'move', 0, 0]
)
for i in range(1,len(subpath)):
sp1 = [ [subpath[i-1][j][0]*xs, subpath[i-1][j][1]*ys] for j in range(3)]
sp2 = [ [subpath[i ][j][0]*xs, subpath[i ][j][1]*ys] for j in range(3)]
lst['data'] += biarc(sp1,sp2,0,0)
lst['data'].append(
[[subpath[-1][1][0]*xs, subpath[-1][1][1]*ys], 'end', 0, 0]
)
return lst
#Raster image data, cut/burn left to right, drop down a line, repeat in reverse until completed.
else:
#No need to modify
return path
def draw_curve(self, curve, group=None, style=BIARC_STYLE):
if group==None:
group = inkex.etree.SubElement( self.biarcGroup, SVG_GROUP_TAG )
s, arcn = '', 0
for si in curve:
if s!='':
if s[1] == 'line':
inkex.etree.SubElement( group, SVG_PATH_TAG,
{
'style': style['line'],
'd':'M %s,%s L %s,%s' % (s[0][0], s[0][1], si[0][0], si[0][1]),
'comment': str(s)
}
)
elif s[1] == 'arc':
arcn += 1
sp = s[0]
c = s[2]
a = ( (P(si[0])-P(c)).angle() - (P(s[0])-P(c)).angle() )%(2*math.pi) #s[3]
if s[3]*a<0:
if a>0: a = a-2*math.pi
else: a = 2*math.pi+a
r = math.sqrt( (sp[0]-c[0])**2 + (sp[1]-c[1])**2 )
a_st = ( math.atan2(sp[0]-c[0],- (sp[1]-c[1])) - math.pi/2 ) % (math.pi*2)
if a>0:
a_end = a_st+a
else:
a_end = a_st*1
a_st = a_st+a
inkex.etree.SubElement( group, inkex.addNS('path','svg'),
{
'style': style['biarc%s' % (arcn%2)],
inkex.addNS('cx','sodipodi'): str(c[0]),
inkex.addNS('cy','sodipodi'): str(c[1]),
inkex.addNS('rx','sodipodi'): str(r),
inkex.addNS('ry','sodipodi'): str(r),
inkex.addNS('start','sodipodi'): str(a_st),
inkex.addNS('end','sodipodi'): str(a_end),
inkex.addNS('open','sodipodi'): 'true',
inkex.addNS('type','sodipodi'): 'arc',
'comment': str(s)
})
s = si
def check_dir(self):
if (os.path.isdir(self.options.directory)):
if (os.path.isfile(self.options.directory+'/header')):
f = open(self.options.directory+'/header', 'r')
self.header = f.read()
f.close()
else:
self.header = HEADER_TEXT
if (os.path.isfile(self.options.directory+'/footer')):
f = open(self.options.directory+'/footer','r')
self.footer = f.read()
f.close()
else:
self.footer = FOOTER_TEXT
else:
inkex.errormsg(("Directory specified for output gcode does not exist! Please create it."))
return False
return True
# Turns a list of arguments into gcode-style parameters (eg (1, 2, 3) -> "X1 Y2 Z3"),
# taking scaling, offsets and the "parametric curve" setting into account
def make_args(self, c):
c = [c[i] if i<len(c) else None for i in range(6)]
if c[5] == 0:
c[5] = None
# next few lines generate the stuff at the front of the file - scaling, offsets, etc (adina)
if self.options.generate_not_parametric_code:
s = ["X", "Y", "Z", "I", "J", "K"]
s1 = ["","","","","",""]
# my replacement that hopefully makes sense (adina, june 22 2010)
m = [self.options.Xscale, -self.options.Yscale, 1,
self.options.Xscale, -self.options.Yscale, 1]
a = [self.options.Xoffset, self.options.Yoffset, 0, 0, 0, 0]
else:
s = ["X", "Y", "Z", "I", "J", "K"]
s1 = ["", "", "", "", "", ""]
m = [1, -1, 1, 1, -1, 1]
a = [0, 0, 0, 0, 0, 0]
#Invert the y axis only if the origin should be the bottom left.
if (self.options.origin == 'topleft'):
m = [1, 1, 1, 1, -1, 1]
else:
a[1] += self.pageHeight
#I think this is the end of generating the header stuff (adina, june 22 2010)
args = []
for i in range(6):
if c[i]!=None:
value = self.unitScale*(c[i]*m[i]+a[i])
args.append(s[i] + ("%f" % value) + s1[i])
return " ".join(args)
def generate_raster_gcode(self, curve, laserPower, altfeed=None):
gcode = ''
forward = True
#Setup our feed rate, either from the layer name or from the default value.
if (altfeed):
# Use the "alternative" feed rate specified
cutFeed = "F%i" % altfeed
else:
if self.options.generate_not_parametric_code:
cutFeed = "F%i" % self.options.feed
else:
cutFeed = "F%i" % self.options.feed
#This extension assumes that your copy of Inkscape is running at 90dpi (it is by default)
#R = mm per pixel
#R = 1 / dots per mm
#90dpi = 1 / (90 / 25.4)
gcode += '\n\n;Beginning of Raster Image '+str(curve['id'])+' pixel size: '+str(curve['width'])+'x'+str(curve['height'])+'\n'
gcode += 'M649 S'+str(laserPower)+' B2 D0 R0.09406\n'
#Do not remove these two lines, they're important. Will not raster correctly if feedrate is not set prior.
#Move fast to point, cut at correct speed.
if(cutFeed < self.options.Mfeed):
gcode += 'G0 X'+str(curve['x'])+' Y'+str(curve['y'])+' F'+str(self.options.Mfeed)+'\n'
gcode += 'G0 X'+str(curve['x'])+' Y'+str(curve['y'])+' '+cutFeed+'\n'
#def get_chunks(arr, chunk_size = 51):
def get_chunks(arr, chunk_size = 51):
chunks = [ arr[start:start+chunk_size] for start in range(0, len(arr), chunk_size)]
return chunks
#return the last pixel that holds data.
def last_in_list(arr):
end = 0
for i in range(len(arr)):
if (arr[i] > 0):
end = i
return end
#return the last pixel that holds data.
def first_in_list(arr):
end = 0
for i in range(len(arr)):
if (arr[i] == 0):
end = i
if (arr[i] > 0):
break
return end
first = True
#Flip the image top to bottom.
row = curve['data'][::-1]
#Turnkey - 29/3/15 - No more flipping.
#row = curve['data']
previousRight = 99999999999
previousLeft = 0
firstRow = True
for index, rowData in enumerate(row):
splitRight = 0
splitLeft = 0
if(index+1 < len(row)):
# Determine where to split the lines.
##################################################
#If the left most pixel of the next row is earlier than the current row, then extend.
if(first_in_list(row[index +1]) > first_in_list(rowData)):
splitLeft = first_in_list(rowData)
else:
splitLeft = first_in_list(row[index +1])
#If the end pixel of the next line is later than the current line, extend.
if(last_in_list(row[index +1]) > last_in_list(rowData)):
splitRight = last_in_list(row[index +1])
else:
splitRight = last_in_list(rowData)
else:
splitLeft = first_in_list(rowData)
splitRight = last_in_list(rowData)
#Positive direction
if forward:
#Split the right side.
###########################################
#Don't split more than the start of the last row as we print in reverse for alternate lines
splitLeft = previousLeft
previousRight = splitRight
#Negative direction
else:
#Split the left side.
###########################################
#Don't split more than the end of the last row as we print in reverse for alternate lines
splitRight = previousRight
previousLeft = splitLeft
#Exception to the rule : Don't split the left of the first row.
if(firstRow):
splitLeft = (previousLeft)
firstRow = False
row2 = rowData[(splitLeft+1):(splitRight+1)]
if not forward:
result_row = row2[::-1]
else:
result_row = row2
for chunk in get_chunks(result_row,51):
if first:
if forward:
gcode += ("\nG7 $1 ")
else:
gcode += ("\nG7 $0 ")
first = not first
else:
gcode += ("G7 ")
b64 = base64.b64encode("".join(chr(y) for y in chunk))
gcode += ("L"+str(len(b64))+" ")
gcode += ("D"+b64+ "\n")
forward = not forward
first = not first
gcode += ("M5 \n");
gcode += ';End of Raster Image '+str(curve['id'])+'\n\n'
return gcode
def generate_gcode(self, curve, depth, laserPower, altfeed=None, altppm=None):
gcode = ''
#Setup our feed rate, either from the layer name or from the default value.
if (altfeed):
# Use the "alternative" feed rate specified
cutFeed = "F%i" % altfeed
else:
if self.options.generate_not_parametric_code:
cutFeed = "F%i" % self.options.feed
else:
cutFeed = "F%i" % self.options.feed
#Setup our pulse per millimetre option, if applicable
#B: laser firing mode (0 = continuous, 1 = pulsed, 2 = raster)
if (altppm):
# Use the "alternative" ppm - L60000 is 60us
ppmValue = "L60000 P%.2f B1 D0" % altppm
else:
#Set the laser firing mode to continuous.
ppmValue = "B0 D0"
ppmValue = ""
cwArc = "G02"
ccwArc = "G03"
# The geometry is reflected, so invert the orientation of the arcs to match
if (self.flipArcs):
(cwArc, ccwArc) = (ccwArc, cwArc)
# The 'laser on' and 'laser off' m-codes get appended to the GCODE generation
lg = 'G00'
for i in range(1,len(curve['data'])):
s, si = curve['data'][i-1], curve['data'][i]
#G00 : Move with the laser off to a new point
if s[1] == 'move':
#Turn off the laser if it was on previously.
if lg != "G00":
gcode += LASER_OFF + "\n"
gcode += "G00" + " " + self.make_args(si[0]) + " F%i" % self.options.Mfeed + "\n"
lg = 'G00'
elif s[1] == 'end':
if lg != "G00":
gcode += LASER_OFF + "\n"
lg = 'G00'
#G01 : Move with the laser turned on to a new point
elif s[1] == 'line':
if lg == "G00":
gcode += LASER_ON % (laserPower) + "\n"
gcode += "G01 " + self.make_args(si[0]) + " %s " % cutFeed + "%s" % ppmValue + "\n"
lg = 'G01'
#G02 and G03 : Move in an arc with the laser turned on.
elif s[1] == 'arc':
dx = s[2][0]-s[0][0]
dy = s[2][1]-s[0][1]
if abs((dx**2 + dy**2)*self.options.Xscale) > self.options.min_arc_radius:
r1 = P(s[0])-P(s[2])
r2 = P(si[0])-P(s[2])
if abs(r1.mag() - r2.mag()) < 0.001:
if lg == "G00":
gcode += LASER_ON % (laserPower) + "\n"
if (s[3] > 0):
gcode += cwArc
else:
gcode += ccwArc
gcode += " " + self.make_args(si[0] + [None, dx, dy, None]) + " %s " % cutFeed + "%s" % ppmValue + "\n"
else:
r = (r1.mag()+r2.mag())/2
if lg == "G00":
gcode += LASER_ON % (laserPower) + "\n"
if (s[3] > 0):
gcode += cwArc
else:
gcode += ccwArc
gcode += " " + self.make_args(si[0]) + " R%f" % (r*self.options.Xscale) + " %s " % cutFeed + "%s" % ppmValue + "\n"
lg = cwArc
#The arc is less than the minimum arc radius, draw it as a straight line.
else:
if lg == "G00":
gcode += LASER_ON % (laserPower) + "\n"
gcode += "G01 " + self.make_args(si[0]) + " %s " % cutFeed + "%s" % ppmValue + "\n"
lg = 'G01'
#The end of the layer.
if si[1] == 'end':
gcode += LASER_OFF + '\n'
return gcode
def tool_change(self):
# Include a tool change operation
gcode = TOOL_CHANGE % (self.currentTool+1)
# Select the next available tool
self.currentTool = (self.currentTool+1) % 32
return gcode
#Determine the tmp directory for the user's operating system.
def getTmpPath(self):
"""Define the temporary folder path depending on the operating system"""
if os.name == 'nt':
return 'C:\\WINDOWS\\Temp\\'
else:
return '/tmp/'
################################################################################
###
### Curve to Gcode
###
################################################################################
def effect_curve(self, selected):
selected = list(selected)
# Set group
if self.options.drawCurves and len(selected)>0:
self.biarcGroup = inkex.etree.SubElement( selected[0].getparent(), SVG_GROUP_TAG )
options.Group = self.biarcGroup
# Recursively compiles a list of paths that are decendant from the given node
self.skipped = 0
def compile_paths(parent, node, trans):
# Apply the object transform, along with the parent transformation
mat = node.get('transform', None)
path = {}
if mat:
mat = simpletransform.parseTransform(mat)
trans = simpletransform.composeTransform(trans, mat)
logger.write("traceback: %s" % ("".join(traceback.format_stack())))
logger.write("id = %s; trans = %r" % (node.get('id', ''), trans))
if node.tag == SVG_PATH_TAG:
# This is a path object
if (not node.get("d")): return []
csp = cubicsuperpath.parsePath(node.get("d"))
path['type'] = "vector"
path['id'] = node.get("id")
path['data'] = []
if (trans):
simpletransform.applyTransformToPath(trans, csp)
path['data'] = csp
return path
elif node.tag == SVG_GROUP_TAG:
# This node is a group of other nodes
pathsGroup = []
for child in node.iterchildren():
data = compile_paths(parent, child, trans)
#inkex.errormsg(str(data))
if type(data) is not list:
pathsGroup.append(data.copy())
else:
pathsGroup += data
return pathsGroup
else :
#Raster the results.
if(node.get("x") > 0):
tmp = self.getTmpPath() #OS tmp directory
bgcol = "#ffffff" #White
curfile = curfile = self.args[-1] #The current inkscape project we're exporting from.
command="inkscape --export-dpi 270 -i %s --export-id-only -e \"%stmpinkscapeexport.png\" -b \"%s\" %s" % (node.get("id"),tmp,bgcol,curfile)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return_code = p.wait()
f = p.stdout
err = p.stderr
#Fetch the image Data
filename = "%stmpinkscapeexport.png" % (tmp)
if (self.options.origin == 'topleft'):
im = Image.open(filename).transpose(Image.FLIP_TOP_BOTTOM).convert('L')
else:
im = Image.open(filename).convert('L')
img = ImageOps.invert(im)
#Get the image size
imageDataWidth, imageDataheight = img.size
#Compile the pixels.
pixels = list(img.getdata())
pixels = [pixels[i * (imageDataWidth):(i + 1) * (imageDataWidth)] for i in xrange(imageDataheight)]
path['type'] = "raster"
path['width'] = imageDataWidth
path['height'] = imageDataheight
#A slow, but reliable way of getting correct coordinates since working with inkscape transpositions and transforms is a major pain in the ass.
#command="inkscape -X --query-id=%s %s" % (node.get("id"),curfile)
#p2 = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#return_code = p2.wait()
#text = p2.communicate()[0]
#x_position = float(text)
#command="inkscape -Y --query-id=%s %s" % (node.get("id"),curfile)
#p3 = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#return_code = p3.wait()
#text = p3.communicate()[0]
#y_position = float(text)*-1+self.pageHeight
if not hasattr(parent, 'glob_nodePositions'):
#Get the XY position of all elements in the inkscape job.
command="inkscape -S %s" % (curfile)
p5 = subprocess.Popen(command, stdout=subprocess.PIPE)
dataString = str(p5.communicate()[0]).split('\r\n')
del dataString[-1]
elementList = dict((item.split(",",1)[0],item.split(",",1)[1]) for item in dataString)
parent.glob_nodePositions = elementList
#Lookup the xy coords for this node.
elementData = parent.glob_nodePositions[node.get("id")].split(',')
x_position = float(elementData[0])
y_position = float(elementData[1])*-1+self.pageHeight
#Text is y positioned from the top left.
if (self.options.origin == 'topleft'):
#Don't flip the y position. Since we're moving the origin from bottom left to top left.
y_position = float(elementData[1])
else:
#Very small loss of positioning due to conversion of the dpi in the exported image.
y_position -= imageDataheight/3
#Convert from pixels to mm
path['x'] = float(str("%.3f") %(self.unitScale * x_position))
path['y'] = float(str("%.3f") %(self.unitScale * y_position))
#Do not permit being < 0
if(path['y'] < 0):
path['y'] = 0
if(path['x'] < 0):
path['x'] = 0
path['id'] = node.get("id")
path['data'] = pixels
return path
else:
inkex.errormsg("Unable to generate raster for object " + str(node.get("id"))+" as it does not have an x-y coordinate associated.")
"""
elif node.tag == SVG_IMAGE_TAG:
#inkex.errormsg( )
#Work together to destroy
#visit https://www.python.org/downloads/ and download python 2.7.9
#Install it
#In the folder : C:\Program Files\Inkscape you will need to rename the folder "Python" to "Python-old" so it uses the new system install instead.
#pip install wheel
#From http://www.lfd.uci.edu/~gohlke/pythonlibs/#pil , download "Pillow-2.7.0-cp27-none-win32.whl"
#pip install Pillow-2.7.0-cp27-none-win32.whl
#You're good to go!
#Fetch the image Data
inkscapeWidth = int(float(node.get("width")))
inkscapeHeight = int(float(node.get("height")))
data = str((node.get(inkex.addNS('href','xlink')) )).replace("data:image/png;base64,","").replace("data:image/jpeg;base64,","")
im = Image.open(BytesIO(base64.b64decode(data))).convert('L')
img = ImageOps.invert(im)
imageDataWidth, imageDataheight = img.size
#Resize to match the dimensions in Inkscape
im_resized = img.resize((inkscapeWidth*3, inkscapeHeight*3), Image.ANTIALIAS)
#Resize the image here for highter DPI - say 300dpi
#Compile the pixels.
pixels = list(im_resized.getdata())
pixels = [pixels[i * (inkscapeWidth*3):(i + 1) * (inkscapeWidth * 3)] for i in xrange(inkscapeHeight*3)]
path['type'] = "raster"
path['width'] = inkscapeWidth
path['height'] = inkscapeHeight
path['x'] = self.unitScale*(float(node.get("x")) * 1)
#Add the height in px from inkscape from the image, as its top is measured from the origin top left, though in inkscape the origin is bottom left so we need to begin scanning the px at the bottom of the image for our laser bed.
path['y'] = self.unitScale * ((float(node.get("y"))+float(node.get("height")))*-1+self.pageHeight)
path['id'] = node.get("id")
path['data'] = pixels
#inkex.errormsg(str(path))
return path
#The object isn't a path, and it's not an image. Convert it to an image to be rastered.
"""
inkex.errormsg("skipping node " + str(node.get("id")))
self.skipped += 1
return []
# Compile a list of layers in this document. We compile a list of only the layers
# we need to use, so we can know ahead of time whether to put tool change
# operations between them.
layers = []
logger.write("layers = %r" % (layers))
for layer in reversed(get_layers(self.document)):
for node in layer.iterchildren():
if (node in selected):
layers.append(layer)
break
logger.write("layers = %r" % (layers))
layers = list(reversed(get_layers(self.document)))
logger.write("layers = %r" % (layers))
# Loop over the layers and objects
gcode = ""
gcode_raster = ""
for layer in layers:
logger.write("Looping over %r" % (layer))
label = layer.get(SVG_LABEL_TAG).strip()
if (label.startswith("#")):
# Ignore everything selected in this layer
for node in layer.iterchildren():
if (node in selected):
selected.remove(node)
continue
logger.write("Was not cut")
# Parse the layer label text, which consists of the layer name followed
# by an optional number of arguments in square brackets.
try:
originalLayerName = label
(layerName, layerParams) = parse_layer_name(label)
except ValueError,e:
inkex.errormsg("Your inkscape layer is named incorrectly. Please use the format '20 [ppm=40,feed=300]' without the quotes. This would set the power at 20%, cutting at 300mm per minute at a pulse rate of 40 pulse per millimetre. The ppm option is optional, leaving it out will set the laser to continuous wave mode.")
return
logger.write("Saluti!")
# Check if the layer specifies an alternative (from the default) feed rate
altfeed = layerParams.get("feed", self.options.feed)
altppm = layerParams.get("ppm", None)
logger.write("layer %s" % layerName)
if (layerParams):
logger.write("layer params == %s" % layerParams)
pathList = []
# Apply the layer transform to all objects within the layer
trans = layer.get('transform', None)
trans = simpletransform.parseTransform(trans)
logger.write("Layer has transform %r" % (trans))
for node in layer.iterchildren():
logger.write("node = %r, selected = %r" % (node, selected))
# Next line is wrong: you loose objects that
# indirectly belong to the layer's child
if (node in selected) or True:
#Vector path data, cut from x to y in a line or curve
logger.write("node %s" % str(node.tag))
#selected.remove(node)
try:
newPath = compile_paths(self, node, trans).copy();
pathList.append(newPath)
inkex.errormsg("Built gcode for "+str(node.get("id"))+" - will be cut as %s." % (newPath['type']) )
except:
messageOnce = True
for objectData in compile_paths(self, node, trans):
#if (messageOnce):
inkex.errormsg("Built gcode for group "+str(node.get("id"))+", item %s - will be cut as %s." % (objectData['id'], objectData['type']) )
#messageOnce = False
pathList.append(objectData)
else:
logger.write("skipping node %s" % node)
if (not pathList):
logger.write("no objects in layer")
continue
#Determind the power of the laser that this layer should be cut at.
#If the layer is not named as an integer value then default to the laser intensity set at the export settings.
#Fetch the laser power from the export dialog box.
laserPower = self.options.laser
try:
if (int(layerName) >= 0 and int(layerName) <= 255):
laserPower = int(layerName)
else :
laserPower = self.options.laser
except ValueError,e:
laserPower = self.options.laser
inkex.errormsg("Unable to parse power level for layer name. Using default power level %d." % (self.options.laser))
#Switch between smoothie power levels and ramps+marlin power levels
#ramps and marlin expect 0 to 100 while smoothie wants 0.0 to 1.0
if (self.options.mainboard == 'smoothie'):
laserPower = float(laserPower) / 100
#Fetch the vector or raster data and turn it into GCode
for objectData in pathList:
curve = self.parse_curve(objectData)
header_data = ""
#Turnkey : Always output the layer header for information.
if (len(layers) > 0):
header_data += LASER_OFF+"\n"
size = 60
header_data += ";(%s)\n" % ("*"*size)
header_data += (";(***** Layer: %%-%ds *****)\n" % (size-19)) % (originalLayerName)
header_data += (";(***** Laser Power: %%-%ds *****)\n" % (size-25)) % (laserPower)
header_data += (";(***** Feed Rate: %%-%ds *****)\n" % (size-23)) % (altfeed)
if(altppm):
header_data += (";(***** Pulse Rate: %%-%ds *****)\n" % (size-24)) % (altppm)
header_data += ";(%s)\n" % ("*"*size)
header_data += ";(MSG,Starting layer '%s')\n\n" % originalLayerName
#Generate the GCode for this layer
if (curve['type'] == "vector"):
#Should the curves be drawn in inkscape?
if (self.options.drawCurves):
self.draw_curve(curve)
gcode += header_data+self.generate_gcode(curve, 0, laserPower, altfeed=altfeed, altppm=altppm)
elif (curve['type'] == "raster"):
gcode_raster += header_data+self.generate_raster_gcode(curve, laserPower, altfeed=altfeed)
#Turnkey - Need to figure out why inkscape sometimes gets to this point and hasn't found the objects above.
# If there are any objects left over, it's because they don't belong
# to any inkscape layer (bug in inkscape?). Output those now.
if (selected) and False:
pathList = []
# Use the identity transform (eg no transform) for the root objects
trans = simpletransform.parseTransform("")
for node in selected:
try:
newPath = compile_paths(self, node, trans).copy();
pathList.append(newPath)
inkex.errormsg("Built gcode for "+str(node.get("id"))+" - will be cut as %s." % (newPath['type']) )
except:
messageOnce = True
for objectData in compile_paths(self, node, trans):
#if (messageOnce):
inkex.errormsg("Built gcode for group "+str(node.get("id"))+", item %s - will be cut as %s." % (objectData['id'], objectData['type']) )
#messageOnce = False
pathList.append(objectData)
if (pathList):
for objectData in pathList:
curve = self.parse_curve(objectData)
#Determind the power of the laser that this layer should be cut at.
#If the layer is not named as an integer value then default to the laser intensity set at the export settings.
#Fetch the laser power from the export dialog box.
laserPower = self.options.laser
try:
if (int(layerName) >= 0 and int(layerName) <= 255):
laserPower = int(layerName)
else :
laserPower = self.options.laser
except ValueError,e:
laserPower = self.options.laser
inkex.errormsg("Unable to parse power level for layer name. Using default power level %d." % (self.options.laser))
#Switch between smoothie power levels and ramps+marlin power levels
#ramps and marlin expect 0 to 100 while smoothie wants 0.0 to 1.0
if (self.options.mainboard == 'smoothie'):
laserPower = float(laserPower) / 100
header_data = ""
#Turnkey : Always output the layer header for information.
if (len(layers) > 0):
header_data += LASER_OFF+"\n"
size = 60
header_data += ";(%s)\n" % ("*"*size)
header_data += (";(***** Layer: %%-%ds *****)\n" % (size-19)) % (originalLayerName)
header_data += (";(***** Laser Power: %%-%ds *****)\n" % (size-25)) % (laserPower)
header_data += (";(***** Feed Rate: %%-%ds *****)\n" % (size-23)) % (altfeed)
if(altppm):
header_data += (";(***** Pulse Rate: %%-%ds *****)\n" % (size-24)) % (altppm)
header_data += ";(%s)\n" % ("*"*size)
header_data += ";(MSG,Starting layer '%s')\n\n" % originalLayerName
#Generate the GCode for this layer
if (curve['type'] == "vector"):
#Should the curves be drawn in inkscape?
if (self.options.drawCurves):
self.draw_curve(curve)
gcode += header_data+self.generate_gcode(curve, 0, laserPower, altfeed=altfeed, altppm=altppm)
elif (curve['type'] == "raster"):
gcode_raster += header_data+self.generate_raster_gcode(curve, laserPower, altfeed=altfeed)
if self.options.homeafter:
gcode += "\n\nG00 X0 Y0 F4000 ; home"
#Always raster before vector cutting.
gcode = gcode_raster+"\n\n"+gcode
return gcode
def effect(self):
global options
options = self.options
selected = self.selected.values()
root = self.document.getroot()
#See if the user has the document setup in mm or pixels.
try:
self.pageHeight = float(root.get("height", None))
except:
inkex.errormsg(("Please change your inkscape project units to be in pixels, not inches or mm. In Inkscape press ctrl+shift+d and change 'units' on the page tab to px. The option 'default units' can be set to mm or inch, these are the units displayed on your rulers."))
return
self.flipArcs = (self.options.Xscale*self.options.Yscale < 0)
self.currentTool = 0
self.filename = options.file.strip()
if (self.filename == "-1.0" or self.filename == ""):
inkex.errormsg(("Please select an output file name."))
return
if (not self.filename.lower().endswith(GCODE_EXTENSION)):
# Automatically append the correct extension
self.filename += GCODE_EXTENSION
logger.enabled = self.options.logging
logger.write("Laser script started")
logger.write("output file == %s" % self.options.file)
if len(selected)<=0:
inkex.errormsg(("This extension requires at least one selected path."))
return
dirExists = self.check_dir()
if (not dirExists):
return
gcode = self.header;
# Here it is assumed that one Inkscape base unit is 1/90 of an
# inch (this is true up to Inkscape 0.91, as indicated in
# http://wiki.inkscape.org/wiki/index.php/Units_In_Inkscape;
# then it should pass to the CSS standards 1/96)
if (self.options.unit == "mm"):
self.unitScale = 25.4 / 90.0
gcode += "G21 ; All units in mm\n"
elif (self.options.unit == "in"):
self.unitScale = 1.0 / 90.0
gcode += "G20 ; All units in in\n"
else:
inkex.errormsg(("You must choose mm or in"))
return
if not self.options.generate_not_parametric_code:
gcode += """
; Raster data will always precede vector data
; Default Cut Feedrate %i mm per minute
; Default Move Feedrate %i mm per minute
; Default Laser Intensity %i percent\n""" % (self.options.feed, self.options.Mfeed, self.options.laser)
if self.options.homebefore:
gcode += "G28 ; home all\n\n"
#if self.options.function == 'Curve':
data = self.effect_curve(selected)
if data:
gcode += data
if (self.options.double_sided_cutting):
gcode += "\n\n;(MSG,Please flip over material)\n\n"
# Include a tool change operation
gcode += self.tool_change()
logger.write("*** processing mirror image")
self.options.Yscale *= -1
self.flipArcs = not(self.flipArcs)
self.options.generate_not_parametric_code = True
self.pageHeight = 0
gcode += self.effect_curve(selected)
try:
f = open(self.options.directory+'/'+self.options.file, "w")
f.write(gcode + self.footer)
f.close()
except:
inkex.errormsg(("Can not write to specified file!"))
return
if (self.skipped > 0):
inkex.errormsg(("Warning: skipped %d object(s) because they were not paths (Vectors) or images (Raster). Please convert them to paths using the menu 'Path->Object To Path'" % self.skipped))
e = Gcode_tools()
e.affect()
inkex.errormsg("Finished processing.")
| giomasce/thlaser-inkscape-plugin | turnkeylaser.py | Python | gpl-2.0 | 60,842 | [
"VisIt"
] | e2773d9ad2d1804dc449870222cb0a0274ad2068c23facb273398c4b530070c4 |
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006-2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Contains parsers for all supported programs"""
# These import statements are added for the convenience of users...
# Rather than having to type:
# from cclib.parser.gaussianparser import Gaussian
# they can use:
# from cclib.parser import Gaussian
from .adfparser import ADF
from .gamessparser import GAMESS
from .gamessukparser import GAMESSUK
from .gaussianparser import Gaussian
from .jaguarparser import Jaguar
from .molproparser import Molpro
from .nwchemparser import NWChem
from .orcaparser import ORCA
from .psiparser import Psi
from .qchemparser import QChem
# This allow users to type:
# from cclib.parser import ccopen
from .ccopen import ccopen
from .data import ccData
| Clyde-fare/cclib | src/cclib/parser/__init__.py | Python | lgpl-2.1 | 1,190 | [
"ADF",
"GAMESS",
"Gaussian",
"Jaguar",
"Molpro",
"NWChem",
"ORCA",
"cclib"
] | ac623f2c208d31471874574f3ffedf7631a900234815d86d3fc326e609915139 |
"""
lwr_client
==========
This module contains logic for interfacing with an external LWR server.
------------------
Configuring Galaxy
------------------
Galaxy job runners are configured in Galaxy's ``job_conf.xml`` file. See ``job_conf.xml.sample_advanced``
in your Galaxy code base or on
`Bitbucket <https://bitbucket.org/galaxy/galaxy-dist/src/tip/job_conf.xml.sample_advanced?at=default>`_
for information on how to configure Galaxy to interact with the LWR.
Galaxy also supports an older, less rich configuration of job runners directly
in its main ``galaxy.ini`` file. The following section describes how to
configure Galaxy to communicate with the LWR in this legacy mode.
Legacy
------
A Galaxy tool can be configured to be executed remotely via LWR by
adding a line to the ``galaxy.ini`` file under the ``galaxy:tool_runners``
section with the format::
<tool_id> = lwr://http://<lwr_host>:<lwr_port>
As an example, if a host named remotehost is running the LWR server
application on port ``8913``, then the tool with id ``test_tool`` can
be configured to run remotely on remotehost by adding the following
line to ``galaxy.ini``::
test_tool = lwr://http://remotehost:8913
Remember this must be added after the ``[galaxy:tool_runners]`` header
in the ``universe.ini`` file.
"""
from .staging.down import finish_job
from .staging.up import submit_job
from .staging import ClientJobDescription
from .staging import LwrOutputs
from .staging import ClientOutputs
from .client import OutputNotFoundException
from .manager import build_client_manager
from .destination import url_to_destination_params
from .path_mapper import PathMapper
__all__ = [
build_client_manager,
OutputNotFoundException,
url_to_destination_params,
finish_job,
submit_job,
ClientJobDescription,
LwrOutputs,
ClientOutputs,
PathMapper,
]
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/jobs/runners/lwr_client/__init__.py | Python | gpl-3.0 | 1,873 | [
"Galaxy"
] | 83cbb8e84f8de4492189dee1abbb90b85921e997f53e736c6b44e7f7a2c5c67b |
from __future__ import division, print_function; __metaclass__ = type
import os
import errno
import random
import time
import glob
import numpy as np
import west
from west.propagators import WESTPropagator
from west import Segment
from west.states import BasisState, InitialState
import simtk.openmm.openmm as openmm
import simtk.unit as units
import logging
log = logging.getLogger(__name__)
log.debug('loading module %r' % __name__)
pcoord_dtype = np.float32
class OpenMMPropagator(WESTPropagator):
def __init__(self, rc=None):
super(OpenMMPropagator, self).__init__(rc)
self.pcoord_dtype = pcoord_dtype
self.pcoord_ndim = 1
# Default platform properties
self.platform_properties = {'OpenCLPrecision': 'mixed',
'OpenCLPlatformIndex': '0',
'OpenCLDeviceIndex': '0',
'CudaPrecision': 'mixed',
'CudaDeviceIndex': '0'}
config = self.rc.config
# Validate configuration
for key in [('west', 'openmm', 'system', 'file'),
('west', 'openmm', 'integrator', 'file'),
('west', 'openmm', 'integrator', 'steps_per_tau'),
('west', 'openmm', 'integrator', 'steps_per_write'),
('west', 'openmm', 'platform', 'name'),
('west', 'data', 'data_refs', 'initial_state')]:
config.require(key)
self.initial_state_ref_template = config['west','data','data_refs','initial_state']
self.basis_state_ref_template = config['west','data','data_refs','basis_state']
system_xml_file = config['west', 'openmm', 'system', 'file']
self.integrator_xml_file = config['west', 'openmm', 'integrator', 'file']
self.steps_per_tau = config['west', 'openmm', 'integrator', 'steps_per_tau']
self.steps_per_write = config['west', 'openmm', 'integrator', 'steps_per_write']
self.nblocks = (self.steps_per_tau // self.steps_per_write) + 1
platform_name = config['west', 'openmm', 'platform', 'name'] or 'Reference'
# Set up OpenMM
with open(system_xml_file, 'r') as f:
# NOTE: calling the system self.system causes a namespace collision in the propagator
self.mmsystem = openmm.XmlSerializer.deserialize(f.read())
with open(self.integrator_xml_file, 'r') as f:
integrator = openmm.XmlSerializer.deserialize(f.read())
self.platform = openmm.Platform.getPlatformByName(platform_name)
self.temperature = integrator.getTemperature()
@staticmethod
def dist(x, y):
return np.sqrt(np.sum((x-y)**2))
@staticmethod
def makepath(template, template_args=None,
expanduser=True, expandvars=True, abspath=False, realpath=False):
template_args = template_args or {}
path = template.format(**template_args)
if expandvars: path = os.path.expandvars(path)
if expanduser: path = os.path.expanduser(path)
if realpath: path = os.path.realpath(path)
if abspath: path = os.path.abspath(path)
path = os.path.normpath(path)
return path
@staticmethod
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_pcoord(self, state):
if isinstance(state, BasisState):
template_args = {'basis_state': state}
bstate_data_ref = self.makepath(self.basis_state_ref_template, template_args)
print(bstate_data_ref)
coords = 10.0 * np.load(bstate_data_ref)
elif isinstance(state, InitialState):
template_args = {'initial_state': state}
istate_data_ref = self.makepath(self.initial_state_ref_template, template_args)
coords = 10.0 * np.load(istate_data_ref)
else:
raise TypeError('state must be BasisState or InitialState')
state.pcoord = self.dist(coords[0,:], coords[1,:])
print(state.pcoord)
@staticmethod
def load_parent_data(n_iter):
restart_files = glob.glob('traj_segs/iter_{:06d}_*.npz'.format(n_iter))
parent_coords = {}
parent_velocs = {}
for rf in restart_files:
data = np.load(rf)
for si, seg_id in enumerate(data['seg_ids']):
parent_coords[seg_id] = data['coords'][si]
parent_velocs[seg_id] = data['velocs'][si]
return parent_coords, parent_velocs
def propagate(self, segments):
platform_properties = {key: value for key, value in self.platform_properties.iteritems() if key.startswith(self.platform.getName())}
try:
process_id = os.environ['WM_PROCESS_INDEX']
if self.platform.getName() == 'OpenCL':
platform_properties['OpenCLDeviceIndex'] = process_id
elif self.platform.getName() == 'CUDA':
platform_properties['CudaDeviceIndex'] = process_id
elif self.platform.getName() == 'CPU':
platform_properties['CpuThreads'] = '1'
except KeyError:
process_id = 0
with open(self.integrator_xml_file, 'r') as f:
integrator = openmm.XmlSerializer.deserialize(f.read())
integrator.setRandomNumberSeed(random.randint(0, 2**16))
context = openmm.Context(self.mmsystem, integrator, self.platform, platform_properties)
if segments[0].n_iter > 1:
parent_coords, parent_velocs = self.load_parent_data(segments[0].n_iter - 1)
block_coordinates = np.empty((len(segments), self.mmsystem.getNumParticles(), 3))
block_velocities = np.empty((len(segments), self.mmsystem.getNumParticles(), 3))
block_seg_ids = np.empty(len(segments), dtype=np.int)
for si, segment in enumerate(segments):
starttime = time.time()
# Set up arrays to hold trajectory data for pcoords, coordinates and velocities
pcoords = np.empty((self.nblocks, 1))
pcoords[0] = segment.pcoord[0]
coordinates = np.empty((self.nblocks, self.mmsystem.getNumParticles(), 3))
velocities = np.empty((self.nblocks, self.mmsystem.getNumParticles(), 3))
# Get initial coordinates and velocities from restarts or initial state
if segment.initpoint_type == Segment.SEG_INITPOINT_CONTINUES:
# Get restart data
coordinates[0] = parent_coords[segment.parent_id]
velocities[0] = parent_velocs[segment.parent_id]
initial_coords = units.Quantity(parent_coords[segment.parent_id], units.nanometer)
initial_velocs = units.Quantity(parent_velocs[segment.parent_id], units.nanometer / units.picosecond)
context.setPositions(initial_coords)
context.setVelocities(initial_velocs)
elif segment.initpoint_type == Segment.SEG_INITPOINT_NEWTRAJ:
initial_state_id = segment.initial_state_id
basis_state_id = self.initial_states[initial_state_id].basis_state_id
assert basis_state_id in [0,1]
if basis_state_id == 0:
tag = '_a'
else:
tag = '_b'
basis_fname = os.path.join(os.environ['WEST_SIM_ROOT'], 'bstates', 'init_coords{}.npy'.format(tag))
initial_coords = units.Quantity(np.load(basis_fname), units.nanometer)
# Set up context for this segment
context.setPositions(initial_coords)
context.setVelocitiesToTemperature(self.temperature)
state = context.getState(getPositions=True, getVelocities=True)
coordinates[0] = state.getPositions(asNumpy=True)
velocities[0] = state.getVelocities(asNumpy=True)
# Run dynamics
for istep in xrange(1, self.nblocks):
integrator.step(self.steps_per_write)
state = context.getState(getPositions=True, getVelocities=True)
coordinates[istep] = state.getPositions(asNumpy=True)
velocities[istep] = state.getVelocities(asNumpy=True)
pcoords[istep] = 10.0 * self.dist(coordinates[istep,0,:], coordinates[istep,1,:])
# Check for system blowing up
assert pcoords[istep] < 12.0, 'pcoord dist: {}'.format(pcoords[istep])
assert coordinates[istep].max() < 20.0, 'max coord: {}'.format(coordinates[istep].max())
# Finalize segment trajectory
segment.pcoord = pcoords[...].astype(pcoord_dtype)
segment.status = Segment.SEG_STATUS_COMPLETE
block_coordinates[si] = coordinates[-1]
block_velocities[si] = velocities[-1]
block_seg_ids[si] = segment.seg_id
segment.walltime = time.time() - starttime
np.savez_compressed('traj_segs/iter_{:06d}_{:06d}.npz'.format(segments[0].n_iter, block_seg_ids[0]),
coords=block_coordinates,
velocs=block_velocities,
seg_ids=block_seg_ids)
return segments
| nrego/westpa | lib/examples/wca-dimer_openmm/we_custom/openmm_propagator.py | Python | gpl-3.0 | 9,399 | [
"OpenMM"
] | 06be87de16052591d01fa372dfb300add7691229a25b78c1cc83ce116537f4ab |
import neuron
if __name__ == '__main__':
neuron.measure_ca2()
| lennart96/neurons | main.py | Python | isc | 67 | [
"NEURON"
] | 59c955167344c80964142a30e490e39886c774428d4fe30937bfff1222d29c93 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Cached networks to use with the population classes, as the only
variables being used is "nodes_ex" and "nodes_in" VERSION THAT WORKS.
"""
import numpy as np
import os
from glob import glob
if 'DISPLAY' not in os.environ:
import matplotlib
matplotlib.use('Agg')
from .gdf import GDF
import matplotlib.pyplot as plt
import h5py
from mpi4py import MPI
################# Initialization of MPI stuff ############################
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
############## Functions #################################################
def remove_axis_junk(ax, which=['right', 'top']):
"""
Remove axis lines from axes object that exist in list which.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot` object
which : list of str
Entries in ['right', 'top', 'bottom', 'left'].
Returns
-------
None
"""
for loc, spine in ax.spines.items():
if loc in which:
spine.set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
################ Classes #################################################
class CachedNetwork(object):
"""
Offline processing and storing of network spike events, used by other
class objects in the package hybridLFPy.
Parameters
----------
simtime : float
Simulation duration.
dt : float,
Simulation timestep size.
spike_output_path : str
Path to gdf/dat-files with spikes.
label : str
Prefix of spiking gdf/dat-files.
ext : str
File extension of gdf/dat-files.
GIDs : dict
dictionary keys are population names and value a list of length 2
with first GID in population and population size
X : list
names of each network population
autocollect : bool
If True, class init will process gdf/dat files.
cmap : str
Name of colormap, must be in `dir(plt.cm)`.
Returns
-------
`hybridLFPy.cachednetworks.CachedNetwork` object
See also
--------
CachedFixedSpikesNetwork, CachedNoiseNetwork
"""
def __init__(self,
simtime=1000.,
dt=0.1,
spike_output_path='spike_output_path',
label='spikes',
ext='gdf',
GIDs={'EX': [1, 400], 'IN': [401, 100]},
X=['EX', 'IN'],
autocollect=True,
skiprows=0,
cmap='Dark2',
):
"""
Offline processing and storing of network spike events, used by other
class objects in the package `hybridLFPy`.
Parameters
----------
simtime : float
Simulation duration.
dt : float
Simulation timestep size.
spike_output_path : str
Path to gdf-files with spikes.
label : str
Prefix of spiking gdf-files.
ext : str
File extension of gdf-files.
GIDs : dict
dictionary keys are population names and item a list with first
GID in population and population size
X : list
names of each network population
autocollect : bool
If True, class init will process gdf files.
skiprows : int
Number of skipped first lines
cmap : str
Name of colormap, must be in dir(plt.cm).
Returns
-------
`hybridLFPy.cachednetworks.CachedNetwork` object
See also
--------
CachedFixedSpikesNetwork, CachedNoiseNetwork
"""
# Set some attributes
self.simtime = simtime
self.dt = dt
self.spike_output_path = spike_output_path
self.label = label
self.ext = ext
self.dbname = ':memory:'
self.GIDs = GIDs
self.X = X
self.autocollect = autocollect
self.skiprows = skiprows
# Create a dictionary of nodes with proper layernames
self.nodes = {}
for X in self.X:
self.nodes[X] = np.arange(self.GIDs[X][1]) + self.GIDs[X][0]
# list population sizes
self.N_X = np.array([self.GIDs[X][1] for X in self.X])
if self.autocollect:
# collect the gdf files
self.collect_gdf()
# Specify some plot colors used for each population:
if 'TC' in self.X:
numcolors = len(self.X) - 1
else:
numcolors = len(self.X)
self.colors = []
for i in range(numcolors):
self.colors += [plt.get_cmap(cmap, numcolors)(i)]
if 'TC' in self.X:
self.colors = ['k'] + self.colors
def collect_gdf(self):
"""
Collect the gdf-files from network sim in folder `spike_output_path`
into sqlite database, using the GDF-class.
Parameters
----------
None
Returns
-------
None
"""
# Resync
COMM.Barrier()
# Raise Exception if there are no gdf files to be read
if len(glob(os.path.join(self.spike_output_path,
self.label + '*.' + self.ext))) == 0:
raise Exception(
'path to files contain no {}-files!'.format(self.ext))
# create in-memory databases of spikes
if not hasattr(self, 'dbs'):
self.dbs = {}
for X in self.X:
db = GDF(os.path.join(self.dbname),
debug=True, new_db=True)
db.create(re=os.path.join(self.spike_output_path,
'{0}*{1}*{2}'.format(self.label, X,
self.ext)),
index=True,
skiprows=self.skiprows)
self.dbs.update({
X: db
})
COMM.Barrier()
def get_xy(self, xlim, fraction=1.):
"""
Get pairs of node units and spike trains on specific time interval.
Parameters
----------
xlim : list of floats
Spike time interval, e.g., [0., 1000.].
fraction : float in [0, 1.]
If less than one, sample a fraction of nodes in random order.
Returns
-------
x : dict
In `x` key-value entries are population name and neuron spike
times.
y : dict
Where in `y` key-value entries are population name and neuron
gid number.
"""
x = {}
y = {}
for X, nodes in self.nodes.items():
x[X] = np.array([])
y[X] = np.array([])
if fraction != 1:
nodes = sorted(
np.random.permutation(nodes)[
:int(
nodes.size *
fraction)])
spiketimes = self.dbs[X].select_neurons_interval(nodes, T=xlim)
i = 0
for times in spiketimes:
x[X] = np.r_[x[X], times]
y[X] = np.r_[y[X], np.zeros(times.size) + nodes[i]]
i += 1
return x, y
def plot_raster(self, ax, xlim, x, y, pop_names=False,
markersize=20., alpha=1., legend=True,
marker='o', rasterized=True):
"""
Plot network raster plot in subplot object.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot` object
plot axes
xlim : list
List of floats. Spike time interval, e.g., [0., 1000.].
x : dict
Key-value entries are population name and neuron spike times.
y : dict
Key-value entries are population name and neuron gid number.
pop_names: bool
If True, show population names on yaxis instead of gid number.
markersize : float
raster plot marker size
alpha : float in [0, 1]
transparency of marker
legend : bool
Switch on axes legends.
marker : str
marker symbol for matplotlib.pyplot.plot
rasterized : bool
if True, the scatter plot will be treated as a bitmap embedded in
pdf file output
Returns
-------
None
"""
yoffset = [sum(self.N_X) if X == 'TC' else 0 for X in self.X]
for i, X in enumerate(self.X):
if y[X].size > 0:
ax.plot(
x[X],
y[X] + yoffset[i],
marker,
markersize=markersize,
mfc=self.colors[i],
mec='none' if marker in '.ov><v^1234sp*hHDd'
else self.colors[i],
alpha=alpha,
label=X,
rasterized=rasterized,
clip_on=True)
# don't draw anything for the may-be-quiet TC population
N_X_sum = 0
for i, X in enumerate(self.X):
if y[X].size > 0:
N_X_sum += self.N_X[i]
ax.axis([xlim[0], xlim[1],
self.GIDs[self.X[0]][0], self.GIDs[self.X[0]][0] + N_X_sum])
ax.set_ylim(ax.get_ylim()[::-1])
ax.set_ylabel('cell id', labelpad=0)
ax.set_xlabel('$t$ (ms)', labelpad=0)
if legend:
ax.legend()
if pop_names:
yticks = []
yticklabels = []
for i, X in enumerate(self.X):
if y[X] != []:
yticks.append(y[X].mean() + yoffset[i])
yticklabels.append(self.X[i])
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels)
# Add some horizontal lines separating the populations
for i, X in enumerate(self.X):
if y[X].size > 0:
ax.plot([xlim[0], xlim[1]],
[y[X].max() + yoffset[i], y[X].max() + yoffset[i]],
'k', lw=0.25)
def plot_f_rate(
self,
ax,
X,
i,
xlim,
x,
y,
binsize=1,
yscale='linear',
plottype='fill_between',
show_label=False,
rasterized=False):
"""
Plot network firing rate plot in subplot object.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot` object.
X : str
Population name.
i : int
Population index in class attribute `X`.
xlim : list of floats
Spike time interval, e.g., [0., 1000.].
x : dict
Key-value entries are population name and neuron spike times.
y : dict
Key-value entries are population name and neuron gid number.
yscale : 'str'
Linear, log, or symlog y-axes in rate plot.
plottype : str
plot type string in `['fill_between', 'bar']`
show_label : bool
whether or not to show labels
Returns
-------
None
"""
bins = np.arange(xlim[0], xlim[1] + binsize, binsize)
(hist, bins) = np.histogram(x[X], bins=bins)
if plottype == 'fill_between':
ax.fill_between(bins[:-1],
hist * 1000. / self.N_X[i],
color=self.colors[i],
lw=0.5,
label=X,
rasterized=rasterized,
clip_on=False)
ax.plot(bins[:-1], hist * 1000. / self.N_X[i],
color='k', lw=0.5, label=X, rasterized=rasterized,
clip_on=False)
elif plottype == 'bar':
ax.bar(bins[:-1], hist * 1000. / self.N_X[i],
color=self.colors[i], label=X, rasterized=rasterized,
linewidth=0.25, width=0.9, clip_on=False)
else:
mssg = "plottype={} not in ['fill_between', 'bar']".format(
plottype)
raise Exception(mssg)
remove_axis_junk(ax)
ax.axis(ax.axis('tight'))
ax.set_yscale(yscale)
ax.set_xlim(xlim[0], xlim[1])
if show_label:
ax.text(xlim[0] + .05 * (xlim[1] - xlim[0]), ax.axis()[3] * 1.5, X,
va='center', ha='left')
def raster_plots(self, xlim=[0, 1000], markersize=1, alpha=1., marker='o'):
"""
Pretty plot of the spiking output of each population as raster and
rate.
Parameters
----------
xlim : list
List of floats. Spike time interval, e.g., `[0., 1000.]`.
markersize : float
marker size for plot, see `matplotlib.pyplot.plot`
alpha : float
transparency for markers, see `matplotlib.pyplot.plot`
marker : :mod:`A valid marker style <matplotlib.markers>`
Returns
-------
fig : `matplotlib.figure.Figure` object
"""
x, y = self.get_xy(xlim)
fig = plt.figure()
fig.subplots_adjust(left=0.12, hspace=0.15)
ax0 = fig.add_subplot(211)
self.plot_raster(ax0, xlim, x, y, markersize=markersize, alpha=alpha,
marker=marker)
remove_axis_junk(ax0)
ax0.set_title('spike raster')
ax0.set_xlabel("")
nrows = len(self.X)
bottom = np.linspace(0.1, 0.45, nrows + 1)[::-1][1:]
thickn = np.abs(np.diff(bottom))[0] * 0.9
for i, layer in enumerate(self.X):
ax1 = fig.add_axes([0.12, bottom[i], 0.78, thickn])
self.plot_f_rate(ax1, layer, i, xlim, x, y, )
if i == nrows - 1:
ax1.set_xlabel('time (ms)')
else:
ax1.set_xticklabels([])
if i == 4:
ax1.set_ylabel(r'population rates ($s^{-1}$)')
if i == 0:
ax1.set_title(r'population firing rates ($s^{-1}$)')
return fig
class CachedFixedSpikesNetwork(CachedNetwork):
"""
Subclass of CachedNetwork.
Fake nest output, where each cell in a subpopulation spike
simultaneously, and each subpopulation is activated at times given in
kwarg activationtimes.
Parameters
----------
activationtimes : list of floats
Each entry set spike times of all cells in each population
autocollect : bool
whether or not to automatically gather gdf file output
**kwargs : see parent class `hybridLFPy.cachednetworks.CachedNetwork`
Returns
-------
`hybridLFPy.cachednetworks.CachedFixedSpikesNetwork` object
See also
--------
CachedNetwork, CachedNoiseNetwork,
"""
def __init__(
self,
activationtimes=[
200,
300,
400,
500,
600,
700,
800,
900,
1000],
autocollect=False,
**kwargs):
"""
Subclass of CachedNetwork
Fake nest output, where each cell in a subpopulation spike
simultaneously, and each subpopulation is activated at times given in
kwarg activationtimes.
Parameters
----------
activationtimes : list
Each entry set spike times of all cells in each population
autocollect : bool
whether or not to automatically gather gdf file output
**kwargs : see parent class `hybridLFPy.cachednetworks.CachedNetwork`
Returns
-------
`hybridLFPy.cachednetworks.CachedFixedSpikesNetwork` object
See also
--------
CachedNetwork, CachedNoiseNetwork,
"""
CachedNetwork.__init__(self, autocollect=autocollect, **kwargs)
# Set some attributes
self.activationtimes = activationtimes
if len(activationtimes) != len(self.N_X):
raise Exception('len(activationtimes != len(self.N_X))')
""" Create a dictionary of nodes with proper layernames
self.nodes = {}.
"""
if RANK == 0:
for i, N in enumerate(self.N_X):
nodes = self.nodes[self.X[i]]
cell_spt = list(zip(nodes, [self.activationtimes[i]
for x in range(nodes.size)]))
cell_spt = np.array(cell_spt, dtype=[('a', int), ('b', float)])
np.savetxt(
os.path.join(
self.spike_output_path,
self.label +
'_{}.{}'.format(
self.X[i],
self.ext)),
cell_spt,
fmt=[
'%i',
'%.1f'])
# Resync
COMM.barrier()
# Collect the gdf files
self.collect_gdf()
class CachedNoiseNetwork(CachedNetwork):
"""
Subclass of CachedNetwork.
Use Nest to generate N_X poisson-generators each with rate frate,
and record every vector, and create database with spikes.
Parameters
----------
frate : list
Rate of each layer, may be tuple (onset, rate, offset)
autocollect : bool
whether or not to automatically gather gdf file output
**kwargs : see parent class `hybridLFPy.cachednetworks.CachedNetwork`
Returns
-------
`hybridLFPy.cachednetworks.CachedNoiseNetwork` object
See also
--------
CachedNetwork, CachedFixedSpikesNetwork
"""
def __init__(self,
frate=dict(EX=5., IN=10.),
autocollect=False,
**kwargs):
"""
Subclass of `CachedNetwork`.
Use Nest to generate N_X poisson-generators each with rate frate,
and record every vector, and create database with spikes.
Parameters
----------
frate : dict
Rate of each layer, value may be tuple (onset, rate, offset).
autocollect : bool
whether or not to automatically gather gdf file output
**kwargs : see parent class `hybridLFPy.cachednetworks.CachedNetwork`
Returns
-------
`hybridLFPy.cachednetworks.CachedNoiseNetwork` object
See also
--------
CachedNetwork, CachedFixedSpikesNetwork
"""
CachedNetwork.__init__(self, autocollect=autocollect, **kwargs)
"""
Putting import nest here, avoid making `nest` a mandatory
`hybridLFPy` dependency.
"""
import nest
# set some attributes:
self.frate = frate
if len(self.frate.keys()) != self.N_X.size:
raise Exception('self.frate.keys().size != self.N_X.size')
self.total_num_virtual_procs = SIZE
# Reset nest kernel and set some kernel status variables, destroy old
# nodes etc in the process
nest.ResetKernel()
# if dt is in powers of two, dt must be multiple of ms_per_tic
if self.dt in 2**np.arange(-32., 0):
nest.SetKernelStatus({
"tics_per_ms": 2**2 / self.dt,
"resolution": self.dt,
"print_time": True,
"overwrite_files": True,
"total_num_virtual_procs": self.total_num_virtual_procs,
})
else:
nest.SetKernelStatus({
"resolution": self.dt,
"print_time": True,
"overwrite_files": True,
"total_num_virtual_procs": self.total_num_virtual_procs,
})
nest.SetDefaults("spike_detector", {
'withtime': True,
'withgid': True,
'to_file': True,
'to_memory': False,
})
# Create some populations of parrot neurons that echo the input Poisson
# spike times
self.nodes = {}
for i, N in enumerate(self.N_X):
self.nodes[self.X[i]] = nest.Create('parrot_neuron', N)
if os.path.isfile(os.path.join(self.spike_output_path, self.dbname)):
mystring = os.path.join(self.spike_output_path, self.dbname)
print('db %s exist, will not rerun sim or collect gdf!' % mystring)
else:
# Create on spike detector per population
self.spikes = nest.Create("spike_detector", len(self.N_X))
# set label per spike detector
for spt, X in zip(self.spikes, self.X):
nest.SetStatus([spt],
dict(label=os.path.join(self.spike_output_path,
self.label + '_' + X)))
""" Create independent poisson spike trains with the some rate,
but each layer population should really have different rates.
"""
self.noise = []
# for X, rate in self.frate.items():
for X in self.X:
rate = self.frate[X]
if isinstance(rate, tuple):
self.noise.append(nest.Create("poisson_generator", 1,
{"start": rate[0],
"rate": rate[1],
"stop": rate[2]}))
else:
self.noise.append(nest.Create("poisson_generator", 1,
{"rate": rate}))
# Connect parrots and spike detector
for X, spt in zip(self.X, self.spikes):
nest.Connect(self.nodes[X], [spt],
syn_spec='static_synapse')
# Connect noise generators and nodes
for i, X in enumerate(self.X):
nest.Connect(self.noise[i], self.nodes[X],
syn_spec='static_synapse')
# Run simulation
nest.Simulate(self.simtime)
# sync
COMM.Barrier()
# Collect the gdf files
self.collect_gdf()
# Nodes need to be collected in np.ndarrays:
for key in list(self.nodes.keys()):
self.nodes[key] = np.array(self.nodes[key])
class CachedTopoNetwork(CachedNetwork):
def __init__(self,
autocollect=True,
label_positions='brunel-py-pos',
**kwargs):
'''
Parameters
----------
autocollect : bool
whether or not to automatically gather gdf file output
label_positions : str
file prefix of position txt files
**kwargs :
parameters for parent class hybridLFPy.CachedNetwork
'''
# initialize parent class
CachedNetwork.__init__(self, autocollect=autocollect, **kwargs)
# set class attributes
self.label_positions = label_positions
# load positions and set them as attributes
self.positions = {}
for X in self.X:
fname = os.path.join(self.spike_output_path, 'all_positions.h5')
if os.path.isfile(fname):
f = h5py.File(fname, 'r')
# set positions, units from mm to mum !!!!!!!!!!!!!!!!!!!!!!!!!
# self.positions[X] = f[X][()][:, 1:] * 1E3
self.positions[X] = np.c_[f[X]['x-position_mm'][()],
f[X]['y-position_mm'][()]] * 1E3
f.close()
else:
fnames = glob(
os.path.join(
self.spike_output_path,
label_positions +
'*{0}*.dat'.format(X)))
for i, fname in enumerate(fnames):
if i == 0:
tmp_pos = np.loadtxt(fname, dtype=object)
else:
tmp_pos = np.vstack((tmp_pos,
np.loadtxt(fname, dtype=object)))
# sorting array
argsort = np.argsort(tmp_pos[:, 0].astype(int))
# set positions
self.positions[X] = tmp_pos[argsort, 1:].astype(float)
def plot_raster(self, ax, xlim, x, y, pop_names=False,
markersize=20., alpha=1., legend=True,
marker='o'):
"""
Plot network raster plot in subplot object.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot` object
plot axes
xlim : list
List of floats. Spike time interval, e.g., [0., 1000.].
x : dict
Key-value entries are population name and neuron spike times.
y : dict
Key-value entries are population name and neuron gid number.
pop_names: bool
If True, show population names on yaxis instead of gid number.
markersize : float
raster plot marker size
alpha : float in [0, 1]
transparency of marker
legend : bool
Switch on axes legends.
Returns
-------
None
"""
for i, X in enumerate(self.X):
ax.plot(x[X], y[X], marker,
markersize=markersize,
markerfacecolor=self.colors[i],
markeredgecolor=self.colors[i],
alpha=alpha,
label=X, rasterized=True,
clip_on=True)
ax.axis([xlim[0], xlim[1], 0, self.N_X.sum()])
ax.set_ylim(ax.get_ylim()[::-1])
ax.set_ylabel('cell id', labelpad=0)
ax.set_xlabel('$t$ (ms)', labelpad=0)
if legend:
ax.legend()
if pop_names:
yticks = []
yticklabels = []
for i, X in enumerate(self.X):
if y[X] != []:
yticks.append(y[X].mean())
yticklabels.append(self.X[i])
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels)
# Add some horizontal lines separating the populations
for X in self.X:
if y[X].size > 0:
ax.plot([xlim[0], xlim[1]], [y[X].max(), y[X].max()],
'k', lw=0.25)
if __name__ == '__main__':
import doctest
doctest.testmod()
| espenhgn/hybridLFPy | hybridLFPy/cachednetworks.py | Python | gpl-3.0 | 26,644 | [
"NEURON"
] | f3ca59f567b68e468af5808aa20c6880814f8623c3ac880ec9684ceccd5c7fc6 |
# Copyright 2014 SolidBuilds.com. All rights reserved
#
# Authors: Ling Thio <ling.thio@gmail.com>
from __future__ import print_function # Use print() instead of print
from flask import url_for
def test_page_urls(client):
# Visit home page
response = client.get(url_for('core.home_page'))
assert b'<h1>Home page</h1>' in response.data
# Login as user and visit User page
response = client.post(url_for('user.login'), follow_redirects=True,
data=dict(email='user@example.com', password='Password1'))
assert b'<h1>Home page</h1>' in response.data
response = client.get(url_for('core.user_page'))
assert b'<h1>User page</h1>' in response.data
# Edit User Profile page
response = client.get(url_for('core.user_profile_page'))
assert b'<h1>User Profile</h1>' in response.data
response = client.post(url_for('core.user_profile_page'), follow_redirects=True,
data=dict(first_name='User', last_name='User'))
response = client.get(url_for('core.user_page'))
assert b'<h1>User page</h1>' in response.data
# Logout
response = client.get(url_for('user.logout'), follow_redirects=True)
assert b'<h1>Home page</h1>' in response.data
# Login as admin and visit Admin page
response = client.post(url_for('user.login'), follow_redirects=True,
data=dict(email='admin@example.com', password='Password1'))
assert b'<h1>Home page</h1>' in response.data
response = client.get(url_for('core.admin_page'))
assert b'<h1>Admin page</h1>' in response.data
# Logout
response = client.get(url_for('user.logout'), follow_redirects=True)
assert b'<h1>Home page</h1>' in response.data
| sanchousese/Flask-User-starter-app | tests/test_page_urls.py | Python | bsd-2-clause | 1,743 | [
"VisIt"
] | d91350b5174edae62c6c1f09ae9fc780dffe441108d2e0b593cb623d967f47ac |
# DONT CHANGE THESE !!
VISIT_REASON = [
('scheduled', '1. Scheduled visit/contact'),
('missed', '2. Missed Scheduled visit'),
('unscheduled', '3. Unscheduled visit at which lab samples or data are being submitted'),
]
| botswana-harvard/eit | eit/apps/eit_infant/choices.py | Python | gpl-3.0 | 231 | [
"VisIt"
] | 7e9a1fe1ab111e02699d79f0c5c8be47517db4bc478ce7c11402f675a7ace496 |
"""Test Axis VMD4 API.
pytest --cov-report term-missing --cov=axis.applications.vmd4 tests/applications/test_vmd4.py
"""
import json
import pytest
import respx
from axis.applications.vmd4 import Vmd4
from ..conftest import HOST
@pytest.fixture
def vmd4(axis_device) -> Vmd4:
"""Returns the vmd4 mock object."""
return Vmd4(axis_device.vapix.request)
@respx.mock
@pytest.mark.asyncio
async def test_get_empty_configuration(vmd4):
"""Test empty get_configuration"""
route = respx.post(f"http://{HOST}:80/local/vmd/control.cgi").respond(
json=response_get_configuration_empty,
)
await vmd4.update()
assert route.called
assert route.calls.last.request.method == "POST"
assert route.calls.last.request.url.path == "/local/vmd/control.cgi"
assert json.loads(route.calls.last.request.content) == {
"method": "getConfiguration",
"apiVersion": "1.2",
"context": "Axis library",
}
assert len(vmd4.values()) == 0
@respx.mock
@pytest.mark.asyncio
async def test_get_configuration(vmd4):
"""Test get_supported_versions"""
respx.post(f"http://{HOST}:80/local/vmd/control.cgi").respond(
json=response_get_configuration,
)
await vmd4.update()
assert len(vmd4.values()) == 1
vmd4 = vmd4["Camera1Profile1"]
assert vmd4.id == "Camera1Profile1"
assert vmd4.name == "Profile 1"
assert vmd4.camera == 1
assert vmd4.uid == 1
assert vmd4.triggers == [
{
"type": "includeArea",
"data": [
[-0.97, -0.97],
[-0.97, 0.97],
[0.97, 0.97],
[0.97, -0.97],
],
}
]
assert vmd4.filters == [
{"data": 1, "active": True, "type": "timeShortLivedLimit"},
{"data": 5, "active": True, "type": "distanceSwayingObject"},
{"data": [5, 5], "active": True, "type": "sizePercentage"},
]
@respx.mock
@pytest.mark.asyncio
async def test_get_configuration_error(vmd4):
"""Test empty get_configuration.
await _request returns an empty dict on error.
"""
respx.post(f"http://{HOST}:80/local/vmd/control.cgi").respond(
json={},
)
await vmd4.update()
assert len(vmd4.values()) == 0
response_get_configuration_empty = {
"apiVersion": "1.4",
"method": "getConfiguration",
"context": "Axis library",
"data": {
"cameras": [{"id": 1, "rotation": 0, "active": True}],
"configurationStatus": 26,
"profiles": [],
},
}
response_get_configuration = {
"apiVersion": "1.4",
"method": "getConfiguration",
"context": "Axis library",
"data": {
"cameras": [{"id": 1, "rotation": 0, "active": True}],
"configurationStatus": 2,
"profiles": [
{
"filters": [
{"data": 1, "active": True, "type": "timeShortLivedLimit"},
{"data": 5, "active": True, "type": "distanceSwayingObject"},
{"data": [5, 5], "active": True, "type": "sizePercentage"},
],
"camera": 1,
"triggers": [
{
"type": "includeArea",
"data": [
[-0.97, -0.97],
[-0.97, 0.97],
[0.97, 0.97],
[0.97, -0.97],
],
}
],
"name": "Profile 1",
"uid": 1,
}
],
},
}
response_get_configuration_error = {
"apiVersion": "1.1",
"method": "getConfiguration",
"context": "Axis library",
"error": {
"code": "2000",
"message": "The requested version of the application is not supported.",
},
}
| Kane610/axis | tests/applications/test_vmd4.py | Python | mit | 3,856 | [
"VMD"
] | 147b5eb3df075a6e6c5ede74778911f6642a362ca4982423c826c8161a4be93a |
import datetime
import json
import unittest
from unittest.mock import Mock
import pytz
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.http import Http404, HttpRequest
from django.test import Client, TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.utils import timezone, translation
from freezegun import freeze_time
from wagtail.core.models import (
Comment, Locale, Page, PageLogEntry, PageManager, ParentNotTranslatedError, Site,
get_page_models, get_translatable_models)
from wagtail.core.signals import page_published
from wagtail.tests.testapp.models import (
AbstractPage, Advert, AlwaysShowInMenusPage, BlogCategory, BlogCategoryBlogPage, BusinessChild,
BusinessIndex, BusinessNowherePage, BusinessSubIndex, CustomManager, CustomManagerPage,
CustomPageQuerySet, EventCategory, EventIndex, EventPage, EventPageSpeaker, GenericSnippetPage,
ManyToManyBlogPage, MTIBasePage, MTIChildPage, MyCustomPage, OneToOnePage,
PageWithExcludedCopyField, SimpleChildPage, SimplePage, SimpleParentPage, SingleEventPage,
SingletonPage, StandardIndex, StreamPage, TaggedGrandchildPage, TaggedPage)
from wagtail.tests.utils import WagtailTestUtils
def get_ct(model):
return ContentType.objects.get_for_model(model)
class TestValidation(TestCase):
fixtures = ['test.json']
def test_can_create(self):
"""
Check that basic page creation works
"""
homepage = Page.objects.get(url_path='/home/')
hello_page = SimplePage(title="Hello world", slug='hello-world', content="hello")
homepage.add_child(instance=hello_page)
# check that hello_page exists in the db
retrieved_page = Page.objects.get(id=hello_page.id)
self.assertEqual(retrieved_page.title, "Hello world")
def test_title_is_required(self):
homepage = Page.objects.get(url_path='/home/')
hello_page = SimplePage(slug='hello-world', content="hello")
with self.assertRaises(ValidationError):
homepage.add_child(instance=hello_page)
hello_page = SimplePage(title="", slug='hello-world', content="hello")
with self.assertRaises(ValidationError):
homepage.add_child(instance=hello_page)
def test_slug_is_autogenerated(self):
homepage = Page.objects.get(url_path='/home/')
# slug should be auto-assigned to a slugified version of the title
hello_page = SimplePage(title="Hello world", content="hello")
homepage.add_child(instance=hello_page)
retrieved_page = Page.objects.get(id=hello_page.id)
self.assertEqual(retrieved_page.slug, 'hello-world')
# auto-generated slug should receive a suffix to make it unique
events_page = SimplePage(title="Events", content="hello")
homepage.add_child(instance=events_page)
retrieved_page = Page.objects.get(id=events_page.id)
self.assertEqual(retrieved_page.slug, 'events-2')
def test_slug_must_be_unique_within_parent(self):
homepage = Page.objects.get(url_path='/home/')
events_page = SimplePage(title="Events", slug='events', content="hello")
with self.assertRaises(ValidationError):
homepage.add_child(instance=events_page)
def test_slug_can_duplicate_other_sections(self):
homepage = Page.objects.get(url_path='/home/')
# the Events section has a page with slug='christmas', but we still allow
# it as a slug elsewhere
christmas_page = SimplePage(title="Christmas", slug='christmas', content="hello")
homepage.add_child(instance=christmas_page)
self.assertTrue(Page.objects.filter(id=christmas_page.id).exists())
@override_settings(WAGTAIL_ALLOW_UNICODE_SLUGS=True)
def test_slug_generation_respects_unicode_setting_true(self):
page = Page(title="A mööse bit me önce")
Page.get_first_root_node().add_child(instance=page)
self.assertEqual(page.slug, 'a-mööse-bit-me-önce')
@override_settings(WAGTAIL_ALLOW_UNICODE_SLUGS=False)
def test_slug_generation_respects_unicode_setting_false(self):
page = Page(title="A mööse bit me önce")
Page.get_first_root_node().add_child(instance=page)
self.assertEqual(page.slug, 'a-moose-bit-me-once')
def test_get_admin_display_title(self):
homepage = Page.objects.get(url_path='/home/')
self.assertEqual(homepage.draft_title, homepage.get_admin_display_title())
def test_get_admin_display_title_with_blank_draft_title(self):
# Display title should fall back on the live title if draft_title is blank;
# this can happen if the page was created through fixtures or migrations that
# didn't explicitly account for draft_title
# (since draft_title doesn't get populated automatically on save in those cases)
Page.objects.filter(url_path='/home/').update(title='live title', draft_title='')
homepage = Page.objects.get(url_path='/home/')
self.assertEqual(homepage.get_admin_display_title(), 'live title')
def test_draft_title_is_autopopulated(self):
homepage = Page.objects.get(url_path='/home/')
hello_page = SimplePage(title="Hello world", content="hello")
homepage.add_child(instance=hello_page)
retrieved_page = Page.objects.get(id=hello_page.id)
self.assertEqual(retrieved_page.draft_title, "Hello world")
hello_page = SimplePage(title="Hello world", draft_title="Hello world edited", content="hello")
homepage.add_child(instance=hello_page)
retrieved_page = Page.objects.get(id=hello_page.id)
self.assertEqual(retrieved_page.draft_title, "Hello world edited")
@override_settings(ALLOWED_HOSTS=['localhost', 'events.example.com', 'about.example.com', 'unknown.site.com'])
class TestSiteRouting(TestCase):
fixtures = ['test.json']
def setUp(self):
self.default_site = Site.objects.get(is_default_site=True)
events_page = Page.objects.get(url_path='/home/events/')
about_page = Page.objects.get(url_path='/home/about-us/')
self.events_site = Site.objects.create(hostname='events.example.com', root_page=events_page)
self.alternate_port_events_site = Site.objects.create(
hostname='events.example.com',
root_page=events_page,
port='8765'
)
self.about_site = Site.objects.create(hostname='about.example.com', root_page=about_page)
self.alternate_port_default_site = Site.objects.create(hostname=self.default_site.hostname, port='8765', root_page=self.default_site.root_page)
self.unrecognised_port = '8000'
self.unrecognised_hostname = 'unknown.site.com'
def test_valid_headers_route_to_specific_site(self):
# requests with a known Host: header should be directed to the specific site
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = self.events_site.hostname
request.META['SERVER_PORT'] = self.events_site.port
with self.assertNumQueries(1):
self.assertEqual(Site.find_for_request(request), self.events_site)
def test_ports_in_request_headers_are_respected(self):
# ports in the Host: header should be respected
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = self.alternate_port_events_site.hostname
request.META['SERVER_PORT'] = self.alternate_port_events_site.port
with self.assertNumQueries(1):
self.assertEqual(Site.find_for_request(request), self.alternate_port_events_site)
def test_unrecognised_host_header_routes_to_default_site(self):
# requests with an unrecognised Host: header should be directed to the default site
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = self.unrecognised_hostname
request.META['SERVER_PORT'] = '80'
with self.assertNumQueries(1):
self.assertEqual(Site.find_for_request(request), self.default_site)
def test_unrecognised_port_and_default_host_routes_to_default_site(self):
# requests to the default host on an unrecognised port should be directed to the default site
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = self.default_site.hostname
request.META['SERVER_PORT'] = self.unrecognised_port
with self.assertNumQueries(1):
self.assertEqual(Site.find_for_request(request), self.default_site)
def test_unrecognised_port_and_unrecognised_host_routes_to_default_site(self):
# requests with an unrecognised Host: header _and_ an unrecognised port
# should be directed to the default site
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = self.unrecognised_hostname
request.META['SERVER_PORT'] = self.unrecognised_port
with self.assertNumQueries(1):
self.assertEqual(Site.find_for_request(request), self.default_site)
def test_unrecognised_port_on_known_hostname_routes_there_if_no_ambiguity(self):
# requests on an unrecognised port should be directed to the site with
# matching hostname if there is no ambiguity
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = self.about_site.hostname
request.META['SERVER_PORT'] = self.unrecognised_port
with self.assertNumQueries(1):
self.assertEqual(Site.find_for_request(request), self.about_site)
def test_unrecognised_port_on_known_hostname_routes_to_default_site_if_ambiguity(self):
# requests on an unrecognised port should be directed to the default
# site, even if their hostname (but not port) matches more than one
# other entry
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = self.events_site.hostname
request.META['SERVER_PORT'] = self.unrecognised_port
with self.assertNumQueries(1):
self.assertEqual(Site.find_for_request(request), self.default_site)
def test_port_in_http_host_header_is_ignored(self):
# port in the HTTP_HOST header is ignored
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = "%s:%s" % (self.events_site.hostname, self.events_site.port)
request.META['SERVER_PORT'] = self.alternate_port_events_site.port
with self.assertNumQueries(1):
self.assertEqual(Site.find_for_request(request), self.alternate_port_events_site)
class TestRouting(TestCase):
fixtures = ['test.json']
# need to clear urlresolver caches before/after tests, because we override ROOT_URLCONF
# in some tests here
def setUp(self):
from django.urls import clear_url_caches
clear_url_caches()
def tearDown(self):
from django.urls import clear_url_caches
clear_url_caches()
def test_urls(self):
default_site = Site.objects.get(is_default_site=True)
homepage = Page.objects.get(url_path='/home/')
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
# Basic installation only has one site configured, so page.url will return local URLs
self.assertEqual(
homepage.get_url_parts(),
(default_site.id, 'http://localhost', '/')
)
self.assertEqual(homepage.full_url, 'http://localhost/')
self.assertEqual(homepage.url, '/')
self.assertEqual(homepage.relative_url(default_site), '/')
self.assertEqual(homepage.get_site(), default_site)
self.assertEqual(
christmas_page.get_url_parts(),
(default_site.id, 'http://localhost', '/events/christmas/')
)
self.assertEqual(christmas_page.full_url, 'http://localhost/events/christmas/')
self.assertEqual(christmas_page.url, '/events/christmas/')
self.assertEqual(christmas_page.relative_url(default_site), '/events/christmas/')
self.assertEqual(christmas_page.get_site(), default_site)
def test_page_with_no_url(self):
root = Page.objects.get(url_path='/')
default_site = Site.objects.get(is_default_site=True)
self.assertEqual(root.get_url_parts(), None)
self.assertEqual(root.full_url, None)
self.assertEqual(root.url, None)
self.assertEqual(root.relative_url(default_site), None)
self.assertEqual(root.get_site(), None)
@override_settings(ALLOWED_HOSTS=['localhost', 'testserver', 'events.example.com', 'second-events.example.com'])
def test_urls_with_multiple_sites(self):
events_page = Page.objects.get(url_path='/home/events/')
events_site = Site.objects.create(hostname='events.example.com', root_page=events_page)
# An underscore is not valid according to RFC 1034/1035
# and will raise a DisallowedHost Exception
second_events_site = Site.objects.create(
hostname='second-events.example.com', root_page=events_page)
default_site = Site.objects.get(is_default_site=True)
homepage = Page.objects.get(url_path='/home/')
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
# with multiple sites, page.url will return full URLs to ensure that
# they work across sites
self.assertEqual(
homepage.get_url_parts(),
(default_site.id, 'http://localhost', '/')
)
self.assertEqual(homepage.full_url, 'http://localhost/')
self.assertEqual(homepage.url, 'http://localhost/')
self.assertEqual(homepage.relative_url(default_site), '/')
self.assertEqual(homepage.relative_url(events_site), 'http://localhost/')
self.assertEqual(homepage.get_site(), default_site)
self.assertEqual(
christmas_page.get_url_parts(),
(events_site.id, 'http://events.example.com', '/christmas/')
)
self.assertEqual(christmas_page.full_url, 'http://events.example.com/christmas/')
self.assertEqual(christmas_page.url, 'http://events.example.com/christmas/')
self.assertEqual(christmas_page.relative_url(default_site), 'http://events.example.com/christmas/')
self.assertEqual(christmas_page.relative_url(events_site), '/christmas/')
self.assertEqual(christmas_page.get_site(), events_site)
request = HttpRequest()
request.META['HTTP_HOST'] = events_site.hostname
request.META['SERVER_PORT'] = events_site.port
self.assertEqual(
christmas_page.get_url_parts(request=request),
(events_site.id, 'http://events.example.com', '/christmas/')
)
request2 = HttpRequest()
request2.META['HTTP_HOST'] = second_events_site.hostname
request2.META['SERVER_PORT'] = second_events_site.port
self.assertEqual(
christmas_page.get_url_parts(request=request2),
(second_events_site.id, 'http://second-events.example.com', '/christmas/')
)
@override_settings(ROOT_URLCONF='wagtail.tests.non_root_urls')
def test_urls_with_non_root_urlconf(self):
default_site = Site.objects.get(is_default_site=True)
homepage = Page.objects.get(url_path='/home/')
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
# Basic installation only has one site configured, so page.url will return local URLs
self.assertEqual(
homepage.get_url_parts(),
(default_site.id, 'http://localhost', '/site/')
)
self.assertEqual(homepage.full_url, 'http://localhost/site/')
self.assertEqual(homepage.url, '/site/')
self.assertEqual(homepage.relative_url(default_site), '/site/')
self.assertEqual(homepage.get_site(), default_site)
self.assertEqual(
christmas_page.get_url_parts(),
(default_site.id, 'http://localhost', '/site/events/christmas/')
)
self.assertEqual(christmas_page.full_url, 'http://localhost/site/events/christmas/')
self.assertEqual(christmas_page.url, '/site/events/christmas/')
self.assertEqual(christmas_page.relative_url(default_site), '/site/events/christmas/')
self.assertEqual(christmas_page.get_site(), default_site)
@override_settings(ROOT_URLCONF='wagtail.tests.headless_urls')
def test_urls_headless(self):
default_site = Site.objects.get(is_default_site=True)
homepage = Page.objects.get(url_path='/home/')
# The page should not be routable because wagtail_serve is not registered
# However it is still associated with a site
self.assertEqual(
homepage.get_url_parts(),
(default_site.id, None, None)
)
self.assertEqual(homepage.full_url, None)
self.assertEqual(homepage.url, None)
def test_request_routing(self):
homepage = Page.objects.get(url_path='/home/')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
request = HttpRequest()
request.path = '/events/christmas/'
(found_page, args, kwargs) = homepage.route(request, ['events', 'christmas'])
self.assertEqual(found_page, christmas_page)
def test_request_serving(self):
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
request = HttpRequest()
request.user = AnonymousUser()
request.META['HTTP_HOST'] = Site.objects.first().hostname
response = christmas_page.serve(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context_data['self'], christmas_page)
# confirm that the event_page.html template was used
self.assertContains(response, '<h2>Event</h2>')
def test_route_to_unknown_page_returns_404(self):
homepage = Page.objects.get(url_path='/home/')
request = HttpRequest()
request.path = '/events/quinquagesima/'
with self.assertRaises(Http404):
homepage.route(request, ['events', 'quinquagesima'])
def test_route_to_unpublished_page_returns_404(self):
homepage = Page.objects.get(url_path='/home/')
request = HttpRequest()
request.path = '/events/tentative-unpublished-event/'
with self.assertRaises(Http404):
homepage.route(request, ['events', 'tentative-unpublished-event'])
# Override CACHES so we don't generate any cache-related SQL queries (tests use DatabaseCache
# otherwise) and so cache.get will always return None.
@override_settings(CACHES={'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}})
@override_settings(ALLOWED_HOSTS=['dummy'])
def test_request_scope_site_root_paths_cache(self):
homepage = Page.objects.get(url_path='/home/')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
# without a request, get_url should only issue 1 SQL query
with self.assertNumQueries(1):
self.assertEqual(homepage.get_url(), '/')
# subsequent calls with the same page should generate no SQL queries
with self.assertNumQueries(0):
self.assertEqual(homepage.get_url(), '/')
# subsequent calls with a different page will still generate 1 SQL query
with self.assertNumQueries(1):
self.assertEqual(christmas_page.get_url(), '/events/christmas/')
# with a request, the first call to get_url should issue 1 SQL query
request = HttpRequest()
request.META['HTTP_HOST'] = "dummy"
request.META['SERVER_PORT'] = "8888"
# first call with "balnk" request issues a extra query for the Site.find_for_request() call
with self.assertNumQueries(2):
self.assertEqual(homepage.get_url(request=request), '/')
# subsequent calls should issue no SQL queries
with self.assertNumQueries(0):
self.assertEqual(homepage.get_url(request=request), '/')
# even if called on a different page
with self.assertNumQueries(0):
self.assertEqual(christmas_page.get_url(request=request), '/events/christmas/')
@override_settings(ROOT_URLCONF='wagtail.tests.urls_multilang',
LANGUAGE_CODE='en',
WAGTAIL_I18N_ENABLED=True,
LANGUAGES=[('en', "English"), ('en-us', "English (United States)"), ('fr', "French")],
WAGTAIL_CONTENT_LANGUAGES=[('en', "English"), ('fr', "French")])
class TestRoutingWithI18N(TestRouting):
# This inherits from TestRouting so contains all the same test cases
# Only the test cases that behave differently under internationalisation are overridden here
def test_urls(self, expected_language_code='en'):
default_site = Site.objects.get(is_default_site=True)
homepage = Page.objects.get(url_path='/home/')
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
# Basic installation only has one site configured, so page.url will return local URLs
# self.assertEqual(
# homepage.get_url_parts(),
# (default_site.id, 'http://localhost', f'/{expected_language_code}/')
# )
self.assertEqual(homepage.full_url, f'http://localhost/{expected_language_code}/')
self.assertEqual(homepage.url, f'/{expected_language_code}/')
self.assertEqual(homepage.relative_url(default_site), f'/{expected_language_code}/')
self.assertEqual(homepage.get_site(), default_site)
self.assertEqual(
christmas_page.get_url_parts(),
(default_site.id, 'http://localhost', f'/{expected_language_code}/events/christmas/')
)
self.assertEqual(christmas_page.full_url, f'http://localhost/{expected_language_code}/events/christmas/')
self.assertEqual(christmas_page.url, f'/{expected_language_code}/events/christmas/')
self.assertEqual(christmas_page.relative_url(default_site), f'/{expected_language_code}/events/christmas/')
self.assertEqual(christmas_page.get_site(), default_site)
def test_urls_with_translation_activated(self):
# This should have no effect as the URL is determined from the page's locale
# and not the active locale
with translation.override("fr"):
self.test_urls()
def test_urls_with_region_specific_translation_activated(self):
# One exception to the above rule is when the active locale
# is a more specific one to what the page was authored in
# and the active locale is not in WAGTAIL_CONTENT_LANGUAGES
# This is because, in this situation, the same page will be
# served under both /en/ and /en-us/ prefixes
with translation.override("en-us"):
self.test_urls(expected_language_code='en-us')
@override_settings(WAGTAIL_CONTENT_LANGUAGES=[
('en', "English"),
('en-us', "English (United States)"),
('fr', "French")
])
def test_urls_with_region_specific_translation_activated_thats_in_wagtail_content_languages(self):
# But, if en-us is also a content language, then this rule doesn't apply
# because that page won't be served under /en-us/.
with translation.override("en-us"):
self.test_urls()
def test_urls_with_language_not_in_wagtail_content_languages(self):
# If the active locale doesn't map to anything in WAGTAIL_CONTENT_LANGUAGES,
# URL prefixes should remain the same as the page's reported locale
with translation.override("se"):
self.test_urls()
def test_urls_with_different_language_tree(self):
default_site = Site.objects.get(is_default_site=True)
homepage = Page.objects.get(url_path='/home/')
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
fr_locale = Locale.objects.create(language_code="fr")
fr_homepage = homepage.copy_for_translation(fr_locale)
fr_christmas_page = christmas_page.copy_for_translation(fr_locale, copy_parents=True)
fr_christmas_page.slug = 'noel'
fr_christmas_page.save(update_fields=['slug'])
# Basic installation only has one site configured, so page.url will return local URLs
self.assertEqual(
fr_homepage.get_url_parts(),
(default_site.id, 'http://localhost', '/fr/')
)
self.assertEqual(fr_homepage.full_url, 'http://localhost/fr/')
self.assertEqual(fr_homepage.url, '/fr/')
self.assertEqual(fr_homepage.relative_url(default_site), '/fr/')
self.assertEqual(fr_homepage.get_site(), default_site)
self.assertEqual(
fr_christmas_page.get_url_parts(),
(default_site.id, 'http://localhost', '/fr/events/noel/')
)
self.assertEqual(fr_christmas_page.full_url, 'http://localhost/fr/events/noel/')
self.assertEqual(fr_christmas_page.url, '/fr/events/noel/')
self.assertEqual(fr_christmas_page.relative_url(default_site), '/fr/events/noel/')
self.assertEqual(fr_christmas_page.get_site(), default_site)
@override_settings(ALLOWED_HOSTS=['localhost', 'testserver', 'events.example.com', 'second-events.example.com'])
def test_urls_with_multiple_sites(self):
events_page = Page.objects.get(url_path='/home/events/')
events_site = Site.objects.create(hostname='events.example.com', root_page=events_page)
# An underscore is not valid according to RFC 1034/1035
# and will raise a DisallowedHost Exception
second_events_site = Site.objects.create(
hostname='second-events.example.com', root_page=events_page)
default_site = Site.objects.get(is_default_site=True)
homepage = Page.objects.get(url_path='/home/')
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
# with multiple sites, page.url will return full URLs to ensure that
# they work across sites
self.assertEqual(
homepage.get_url_parts(),
(default_site.id, 'http://localhost', '/en/')
)
self.assertEqual(homepage.full_url, 'http://localhost/en/')
self.assertEqual(homepage.url, 'http://localhost/en/')
self.assertEqual(homepage.relative_url(default_site), '/en/')
self.assertEqual(homepage.relative_url(events_site), 'http://localhost/en/')
self.assertEqual(homepage.get_site(), default_site)
self.assertEqual(
christmas_page.get_url_parts(),
(events_site.id, 'http://events.example.com', '/en/christmas/')
)
self.assertEqual(christmas_page.full_url, 'http://events.example.com/en/christmas/')
self.assertEqual(christmas_page.url, 'http://events.example.com/en/christmas/')
self.assertEqual(christmas_page.relative_url(default_site), 'http://events.example.com/en/christmas/')
self.assertEqual(christmas_page.relative_url(events_site), '/en/christmas/')
self.assertEqual(christmas_page.get_site(), events_site)
request = HttpRequest()
request.META['HTTP_HOST'] = events_site.hostname
request.META['SERVER_PORT'] = events_site.port
self.assertEqual(
christmas_page.get_url_parts(request=request),
(events_site.id, 'http://events.example.com', '/en/christmas/')
)
request2 = HttpRequest()
request2.META['HTTP_HOST'] = second_events_site.hostname
request2.META['SERVER_PORT'] = second_events_site.port
self.assertEqual(
christmas_page.get_url_parts(request=request2),
(second_events_site.id, 'http://second-events.example.com', '/en/christmas/')
)
# Override CACHES so we don't generate any cache-related SQL queries (tests use DatabaseCache
# otherwise) and so cache.get will always return None.
@override_settings(CACHES={'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}})
@override_settings(ALLOWED_HOSTS=['dummy'])
def test_request_scope_site_root_paths_cache(self):
homepage = Page.objects.get(url_path='/home/')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
# without a request, get_url should only issue 2 SQL queries
with self.assertNumQueries(2):
self.assertEqual(homepage.get_url(), '/en/')
# subsequent calls with the same page should generate no SQL queries
with self.assertNumQueries(0):
self.assertEqual(homepage.get_url(), '/en/')
# subsequent calls with a different page will still generate 2 SQL queries
with self.assertNumQueries(2):
self.assertEqual(christmas_page.get_url(), '/en/events/christmas/')
# with a request, the first call to get_url should issue 1 SQL query
request = HttpRequest()
request.META['HTTP_HOST'] = "dummy"
request.META['SERVER_PORT'] = "8888"
# first call with "balnk" request issues a extra query for the Site.find_for_request() call
with self.assertNumQueries(3):
self.assertEqual(homepage.get_url(request=request), '/en/')
# subsequent calls should issue no SQL queries
with self.assertNumQueries(0):
self.assertEqual(homepage.get_url(request=request), '/en/')
# even if called on a different page
with self.assertNumQueries(0):
self.assertEqual(christmas_page.get_url(request=request), '/en/events/christmas/')
class TestServeView(TestCase):
fixtures = ['test.json']
def setUp(self):
# Explicitly clear the cache of site root paths. Normally this would be kept
# in sync by the Site.save logic, but this is bypassed when the database is
# rolled back between tests using transactions.
from django.core.cache import cache
cache.delete('wagtail_site_root_paths')
# also need to clear urlresolver caches before/after tests, because we override
# ROOT_URLCONF in some tests here
from django.urls import clear_url_caches
clear_url_caches()
def tearDown(self):
from django.urls import clear_url_caches
clear_url_caches()
def test_serve(self):
response = self.client.get('/events/christmas/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'tests/event_page.html')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
self.assertEqual(response.context['self'], christmas_page)
self.assertContains(response, '<h1>Christmas</h1>')
self.assertContains(response, '<h2>Event</h2>')
@override_settings(ROOT_URLCONF='wagtail.tests.non_root_urls')
def test_serve_with_non_root_urls(self):
response = self.client.get('/site/events/christmas/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'tests/event_page.html')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
self.assertEqual(response.context['self'], christmas_page)
self.assertContains(response, '<h1>Christmas</h1>')
self.assertContains(response, '<h2>Event</h2>')
def test_serve_unknown_page_returns_404(self):
response = self.client.get('/events/quinquagesima/')
self.assertEqual(response.status_code, 404)
def test_serve_unpublished_page_returns_404(self):
response = self.client.get('/events/tentative-unpublished-event/')
self.assertEqual(response.status_code, 404)
@override_settings(ALLOWED_HOSTS=['localhost', 'events.example.com'])
def test_serve_with_multiple_sites(self):
events_page = Page.objects.get(url_path='/home/events/')
Site.objects.create(hostname='events.example.com', root_page=events_page)
response = self.client.get('/christmas/', HTTP_HOST='events.example.com')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'tests/event_page.html')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
self.assertEqual(response.context['self'], christmas_page)
self.assertContains(response, '<h1>Christmas</h1>')
self.assertContains(response, '<h2>Event</h2>')
# same request to the default host should return a 404
c = Client()
response = c.get('/christmas/', HTTP_HOST='localhost')
self.assertEqual(response.status_code, 404)
def test_serve_with_custom_context_name(self):
EventPage.context_object_name = 'event_page'
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
response = self.client.get('/events/christmas/')
# Context should contain context_object_name key along with standard page keys
self.assertEqual(response.context['event_page'], christmas_page)
self.assertEqual(response.context['page'], christmas_page)
self.assertEqual(response.context['self'], christmas_page)
def test_serve_with_custom_context(self):
response = self.client.get('/events/')
self.assertEqual(response.status_code, 200)
# should render the whole page
self.assertContains(response, '<h1>Events</h1>')
# response should contain data from the custom 'events' context variable
self.assertContains(response, '<a href="/events/christmas/">Christmas</a>')
def test_ajax_response(self):
response = self.client.get('/events/', HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# should only render the content of includes/event_listing.html, not the whole page
self.assertNotContains(response, '<h1>Events</h1>')
self.assertContains(response, '<a href="/events/christmas/">Christmas</a>')
def test_before_serve_hook(self):
response = self.client.get('/events/', HTTP_USER_AGENT='GoogleBot')
self.assertContains(response, 'bad googlebot no cookie')
class TestStaticSitePaths(TestCase):
def setUp(self):
self.root_page = Page.objects.get(id=1)
# For simple tests
self.home_page = self.root_page.add_child(
instance=SimplePage(title="Homepage", slug="home2", content="hello")
)
self.about_page = self.home_page.add_child(
instance=SimplePage(title="About us", slug="about", content="hello")
)
self.contact_page = self.home_page.add_child(
instance=SimplePage(title="Contact", slug="contact", content="hello")
)
# For custom tests
self.event_index = self.root_page.add_child(instance=EventIndex(title="Events", slug="events"))
for i in range(20):
self.event_index.add_child(instance=EventPage(
title="Event " + str(i), slug="event" + str(i),
location='the moon', audience='public',
cost='free', date_from='2001-01-01',
))
def test_local_static_site_paths(self):
paths = list(self.about_page.get_static_site_paths())
self.assertEqual(paths, ['/'])
def test_child_static_site_paths(self):
paths = list(self.home_page.get_static_site_paths())
self.assertEqual(paths, ['/', '/about/', '/contact/'])
def test_custom_static_site_paths(self):
paths = list(self.event_index.get_static_site_paths())
# Event index path
expected_paths = ['/']
# One path for each page of results
expected_paths.extend(['/' + str(i + 1) + '/' for i in range(5)])
# One path for each event page
expected_paths.extend(['/event' + str(i) + '/' for i in range(20)])
paths.sort()
expected_paths.sort()
self.assertEqual(paths, expected_paths)
class TestMovePage(TestCase):
fixtures = ['test.json']
def test_move_page(self):
about_us_page = SimplePage.objects.get(url_path='/home/about-us/')
events_index = EventIndex.objects.get(url_path='/home/events/')
events_index.move(about_us_page, pos='last-child')
# re-fetch events index to confirm that db fields have been updated
events_index = EventIndex.objects.get(id=events_index.id)
self.assertEqual(events_index.url_path, '/home/about-us/events/')
self.assertEqual(events_index.depth, 4)
self.assertEqual(events_index.get_parent().id, about_us_page.id)
# children of events_index should also have been updated
christmas = events_index.get_children().get(slug='christmas')
self.assertEqual(christmas.depth, 5)
self.assertEqual(christmas.url_path, '/home/about-us/events/christmas/')
class TestPrevNextSiblings(TestCase):
fixtures = ['test.json']
def test_get_next_siblings(self):
christmas_event = Page.objects.get(url_path='/home/events/christmas/')
self.assertTrue(christmas_event.get_next_siblings().filter(url_path='/home/events/final-event/').exists())
def test_get_next_siblings_inclusive(self):
christmas_event = Page.objects.get(url_path='/home/events/christmas/')
# First element must always be the current page
self.assertEqual(christmas_event.get_next_siblings(inclusive=True).first(), christmas_event)
def test_get_prev_siblings(self):
final_event = Page.objects.get(url_path='/home/events/final-event/')
self.assertTrue(final_event.get_prev_siblings().filter(url_path='/home/events/christmas/').exists())
# First element must always be the current page
self.assertEqual(final_event.get_prev_siblings(inclusive=True).first(), final_event)
class TestLiveRevision(TestCase):
fixtures = ['test.json']
@freeze_time("2017-01-01 12:00:00")
def test_publish_method_will_set_live_revision(self):
page = Page.objects.get(id=2)
revision = page.save_revision()
revision.publish()
page.refresh_from_db()
self.assertEqual(page.live_revision, revision)
if settings.USE_TZ:
self.assertEqual(
page.last_published_at, datetime.datetime(2017, 1, 1, 12, 0, 0, tzinfo=pytz.utc)
)
# first_published_at should not change
self.assertEqual(
page.first_published_at, datetime.datetime(2014, 1, 1, 12, 0, 0, tzinfo=pytz.utc)
)
else:
self.assertEqual(
# interpret the "2017-01-01 12:00:00" in freeze_time above as a naive local date
page.last_published_at, datetime.datetime(2017, 1, 1, 12, 0, 0)
)
# first_published_at should not change
self.assertEqual(
# convert the "2014-01-01T12:00:00.000Z" in the test fixture to a naive local time
page.first_published_at, timezone.make_naive(datetime.datetime(2014, 1, 1, 12, 0, 0, tzinfo=pytz.utc))
)
@freeze_time("2017-01-01 12:00:00")
def test_unpublish_method_will_clean_live_revision(self):
page = Page.objects.get(id=2)
revision = page.save_revision()
revision.publish()
page.refresh_from_db()
page.unpublish()
page.refresh_from_db()
self.assertIsNone(page.live_revision)
# first_published_at / last_published_at should remain unchanged on unpublish
if settings.USE_TZ:
self.assertEqual(
page.first_published_at, datetime.datetime(2014, 1, 1, 12, 0, 0, tzinfo=pytz.utc)
)
self.assertEqual(
page.last_published_at, datetime.datetime(2017, 1, 1, 12, 0, 0, tzinfo=pytz.utc)
)
else:
self.assertEqual(
# convert the "2014-01-01T12:00:00.000Z" in the test fixture to a naive local time
page.first_published_at, timezone.make_naive(datetime.datetime(2014, 1, 1, 12, 0, 0, tzinfo=pytz.utc))
)
self.assertEqual(
# interpret the "2017-01-01 12:00:00" in freeze_time above as a naive local date
page.last_published_at, datetime.datetime(2017, 1, 1, 12, 0, 0)
)
@freeze_time("2017-01-01 12:00:00")
def test_copy_method_with_keep_live_will_update_live_revision(self):
about_us = SimplePage.objects.get(url_path='/home/about-us/')
revision = about_us.save_revision()
revision.publish()
new_about_us = about_us.copy(keep_live=True, update_attrs={'title': "New about us", 'slug': 'new-about-us'})
self.assertIsNotNone(new_about_us.live_revision)
self.assertNotEqual(about_us.live_revision, new_about_us.live_revision)
# first_published_at / last_published_at should reflect the current time,
# not the source page's publish dates, since the copied page is being published
# for the first time
if settings.USE_TZ:
self.assertEqual(new_about_us.first_published_at, datetime.datetime(2017, 1, 1, 12, 0, 0, tzinfo=pytz.utc))
self.assertEqual(new_about_us.last_published_at, datetime.datetime(2017, 1, 1, 12, 0, 0, tzinfo=pytz.utc))
else:
self.assertEqual(new_about_us.first_published_at, datetime.datetime(2017, 1, 1, 12, 0, 0))
self.assertEqual(new_about_us.last_published_at, datetime.datetime(2017, 1, 1, 12, 0, 0))
def test_copy_method_without_keep_live_will_not_update_live_revision(self):
about_us = SimplePage.objects.get(url_path='/home/about-us/')
revision = about_us.save_revision()
revision.publish()
about_us.refresh_from_db()
self.assertIsNotNone(about_us.live_revision)
new_about_us = about_us.copy(keep_live=False, update_attrs={'title': "New about us", 'slug': 'new-about-us'})
self.assertIsNone(new_about_us.live_revision)
# first_published_at / last_published_at should be blank, because the copied article
# has not been published
self.assertIsNone(new_about_us.first_published_at)
self.assertIsNone(new_about_us.last_published_at)
@freeze_time("2017-01-01 12:00:00")
def test_publish_with_future_go_live_does_not_set_live_revision(self):
about_us = SimplePage.objects.get(url_path='/home/about-us/')
if settings.USE_TZ:
about_us.go_live_at = datetime.datetime(2018, 1, 1, 12, 0, 0, tzinfo=pytz.utc)
else:
about_us.go_live_at = datetime.datetime(2018, 1, 1, 12, 0, 0)
revision = about_us.save_revision()
revision.publish()
about_us.refresh_from_db()
self.assertFalse(about_us.live)
self.assertIsNone(about_us.live_revision)
# first_published_at / last_published_at should remain unchanged
if settings.USE_TZ:
self.assertEqual(
about_us.first_published_at, datetime.datetime(2014, 1, 1, 12, 0, 0, tzinfo=pytz.utc)
)
self.assertEqual(
about_us.last_published_at, datetime.datetime(2014, 2, 1, 12, 0, 0, tzinfo=pytz.utc)
)
else:
self.assertEqual(
about_us.first_published_at, timezone.make_naive(datetime.datetime(2014, 1, 1, 12, 0, 0, tzinfo=pytz.utc))
)
self.assertEqual(
about_us.last_published_at, timezone.make_naive(datetime.datetime(2014, 2, 1, 12, 0, 0, tzinfo=pytz.utc))
)
class TestPageGetSpecific(TestCase):
fixtures = ['test.json']
def setUp(self):
super().setUp()
self.page = Page.objects.get(url_path="/home/about-us/")
self.page.foo = 'ABC'
self.page.bar = {'key': 'value'}
self.page.baz = 999
def test_default(self):
# Field values are fetched from the database, hence the query
with self.assertNumQueries(1):
result = self.page.get_specific()
# The returned instance is the correct type
self.assertIsInstance(result, SimplePage)
# Generic page field values can be accessed for free
with self.assertNumQueries(0):
self.assertEqual(result.id, self.page.id)
self.assertEqual(result.title, self.page.title)
# Specific model fields values are available without additional queries
with self.assertNumQueries(0):
self.assertTrue(result.content)
# All non-field attributes should have been copied over...
for attr in ('foo', 'bar', 'baz'):
with self.subTest(attribute=attr):
self.assertIs(getattr(result, attr), getattr(self.page, attr))
def test_deferred(self):
# Field values are NOT fetched from the database, hence no query
with self.assertNumQueries(0):
result = self.page.get_specific(deferred=True)
# The returned instance is the correct type
self.assertIsInstance(result, SimplePage)
# Generic page field values can be accessed for free
with self.assertNumQueries(0):
self.assertEqual(result.id, self.page.id)
self.assertEqual(result.title, self.page.title)
# But, specific model fields values are NOT available without additional queries
with self.assertNumQueries(1):
self.assertTrue(result.content)
# All non-field attributes should have been copied over...
for attr in ('foo', 'bar', 'baz'):
with self.subTest(attribute=attr):
self.assertIs(getattr(result, attr), getattr(self.page, attr))
def test_copy_attrs(self):
result = self.page.get_specific(copy_attrs=['foo', 'bar'])
# foo and bar should have been copied over
self.assertIs(result.foo, self.page.foo)
self.assertIs(result.bar, self.page.bar)
# but baz should not have been
self.assertFalse(hasattr(result, 'baz'))
def test_copy_attrs_with_empty_list(self):
result = self.page.get_specific(copy_attrs=())
# No non-field attributes should have been copied over...
for attr in ('foo', 'bar', 'baz'):
with self.subTest(attribute=attr):
self.assertFalse(hasattr(result, attr))
def test_copy_attrs_exclude(self):
result = self.page.get_specific(copy_attrs_exclude=['baz'])
# foo and bar should have been copied over
self.assertIs(result.foo, self.page.foo)
self.assertIs(result.bar, self.page.bar)
# but baz should not have been
self.assertFalse(hasattr(result, 'baz'))
def test_copy_attrs_exclude_with_empty_list(self):
result = self.page.get_specific(copy_attrs_exclude=())
# All non-field attributes should have been copied over...
for attr in ('foo', 'bar', 'baz'):
with self.subTest(attribute=attr):
self.assertIs(getattr(result, attr), getattr(self.page, attr))
def test_specific_cached_property(self):
# invoking several times to demonstrate that field values
# are fetched only once from the database, and each time the
# same object is returned
with self.assertNumQueries(1):
result = self.page.specific
result_2 = self.page.specific
result_3 = self.page.specific
self.assertIs(result, result_2)
self.assertIs(result, result_3)
self.assertIsInstance(result, SimplePage)
# Specific model fields values are available without additional queries
with self.assertNumQueries(0):
self.assertTrue(result.content)
def test_specific_deferred_cached_property(self):
# invoking several times to demonstrate that the property
# returns the same object (without any queries)
with self.assertNumQueries(0):
result = self.page.specific_deferred
result_2 = self.page.specific_deferred
result_3 = self.page.specific_deferred
self.assertIs(result, result_2)
self.assertIs(result, result_3)
self.assertIsInstance(result, SimplePage)
# Specific model fields values are not available without additional queries
with self.assertNumQueries(1):
self.assertTrue(result.content)
class TestCopyPage(TestCase):
fixtures = ['test.json']
def test_copy_page_copies(self):
about_us = SimplePage.objects.get(url_path='/home/about-us/')
# Copy it
new_about_us = about_us.copy(update_attrs={'title': "New about us", 'slug': 'new-about-us'})
# Check that new_about_us is correct
self.assertIsInstance(new_about_us, SimplePage)
self.assertEqual(new_about_us.title, "New about us")
self.assertEqual(new_about_us.slug, 'new-about-us')
# Check that new_about_us is a different page
self.assertNotEqual(about_us.id, new_about_us.id)
# Check that the url path was updated
self.assertEqual(new_about_us.url_path, '/home/new-about-us/')
def test_copy_page_copies_child_objects(self):
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
# Copy it
new_christmas_event = christmas_event.copy(
update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'}
)
# Check that the speakers were copied
self.assertEqual(new_christmas_event.speakers.count(), 1, "Child objects weren't copied")
# Check that the speakers weren't removed from old page
self.assertEqual(christmas_event.speakers.count(), 1, "Child objects were removed from the original page")
# Check that advert placements were also copied (there's a gotcha here, since the advert_placements
# relation is defined on Page, not EventPage)
self.assertEqual(
new_christmas_event.advert_placements.count(), 1, "Child objects defined on the superclass weren't copied"
)
self.assertEqual(
christmas_event.advert_placements.count(),
1,
"Child objects defined on the superclass were removed from the original page"
)
def test_copy_page_copies_parental_relations(self):
"""Test that a page will be copied with parental many to many relations intact."""
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
summer_category = EventCategory.objects.create(name='Summer')
holiday_category = EventCategory.objects.create(name='Holidays')
# add parental many to many relations
christmas_event.categories = (summer_category, holiday_category)
christmas_event.save()
# Copy it
new_christmas_event = christmas_event.copy(
update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'}
)
# check that original eventt is untouched
self.assertEqual(
christmas_event.categories.count(),
2,
"Child objects (parental many to many) defined on the superclass were removed from the original page"
)
# check that parental many to many are copied
self.assertEqual(
new_christmas_event.categories.count(),
2,
"Child objects (parental many to many) weren't copied"
)
# check that the original and copy are related to the same categories
self.assertEqual(
new_christmas_event.categories.all().in_bulk(),
christmas_event.categories.all().in_bulk()
)
def test_copy_page_does_not_copy_comments(self):
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
christmas_event.wagtail_admin_comments = [Comment(text='test', user=christmas_event.owner)]
christmas_event.save()
# Copy the page as in `test_copy_page_copies_child_objects()``, but using exclude_fields
# to prevent 'advert_placements' from being copied to the new version
new_christmas_event = christmas_event.copy(
update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'}
)
# Check that the comments weren't removed from old page
self.assertEqual(christmas_event.wagtail_admin_comments.count(), 1, "Comments were removed from the original page")
# Check that comments were NOT copied over
self.assertFalse(
new_christmas_event.wagtail_admin_comments.exists(),
"Comments were copied"
)
def test_copy_page_does_not_copy_child_objects_if_accessor_name_in_exclude_fields(self):
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
# Copy the page as in `test_copy_page_copies_child_objects()``, but using exclude_fields
# to prevent 'advert_placements' from being copied to the new version
new_christmas_event = christmas_event.copy(
update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'},
exclude_fields=['advert_placements']
)
# Check that the speakers were copied
self.assertEqual(new_christmas_event.speakers.count(), 1, "Child objects weren't copied")
# Check that the speakers weren't removed from old page
self.assertEqual(christmas_event.speakers.count(), 1, "Child objects were removed from the original page")
# Check that advert placements were NOT copied over, but were not removed from the old page
self.assertFalse(
new_christmas_event.advert_placements.exists(),
"Child objects were copied despite accessor_name being specified in `exclude_fields`"
)
self.assertEqual(
christmas_event.advert_placements.count(),
1,
"Child objects defined on the superclass were removed from the original page"
)
def test_copy_page_with_process_child_object_supplied(self):
# We'll provide this when copying and test that it gets called twice:
# Once for the single speaker, and another for the single advert_placement
modify_child = Mock()
old_event = EventPage.objects.get(url_path='/home/events/christmas/')
# Create a child event
child_event = old_event.copy(update_attrs={'title': "Child christmas event", 'slug': 'child-christmas-event'})
child_event.move(old_event, pos='last-child')
new_event = old_event.copy(
update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'},
process_child_object=modify_child,
recursive=True,
)
# The method should have been called with these arguments when copying
# the advert placement
relationship = EventPage._meta.get_field('advert_placements')
child_object = new_event.advert_placements.get()
modify_child.assert_any_call(old_event, new_event, relationship, child_object)
# And again with these arguments when copying the speaker
relationship = EventPage._meta.get_field('speaker')
child_object = new_event.speakers.get()
modify_child.assert_any_call(old_event, new_event, relationship, child_object)
# Check that process_child_object was run on the child event page as well
new_child_event = new_event.get_children().get().specific
child_object = new_child_event.speakers.get()
modify_child.assert_any_call(child_event, new_child_event, relationship, child_object)
def test_copy_page_copies_revisions(self):
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
christmas_event.save_revision()
# Copy it
new_christmas_event = christmas_event.copy(
update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'}
)
# Check that the revisions were copied
# Copying creates a new revision so we're expecting the new page to have two revisions
self.assertEqual(new_christmas_event.revisions.count(), 2)
# Check that the revisions weren't removed from old page
self.assertEqual(christmas_event.revisions.count(), 1, "Revisions were removed from the original page")
# Check that the attributes were updated in the latest revision
latest_revision = new_christmas_event.get_latest_revision_as_page()
self.assertEqual(latest_revision.title, "New christmas event")
self.assertEqual(latest_revision.slug, 'new-christmas-event')
# get_latest_revision_as_page might bypass the revisions table if it determines
# that there are no draft edits since publish - so retrieve it explicitly from the
# revision data, to ensure it's been updated there too
latest_revision = new_christmas_event.get_latest_revision().as_page_object()
self.assertEqual(latest_revision.title, "New christmas event")
self.assertEqual(latest_revision.slug, 'new-christmas-event')
# Check that the ids within the revision were updated correctly
new_revision = new_christmas_event.revisions.first()
new_revision_content = json.loads(new_revision.content_json)
self.assertEqual(new_revision_content['pk'], new_christmas_event.id)
self.assertEqual(new_revision_content['speakers'][0]['page'], new_christmas_event.id)
# Also, check that the child objects in the new revision are given new IDs
old_speakers_ids = set(christmas_event.speakers.values_list('id', flat=True))
new_speakers_ids = set(speaker['pk'] for speaker in new_revision_content['speakers'])
self.assertFalse(
old_speakers_ids.intersection(new_speakers_ids),
"Child objects in revisions were not given a new primary key"
)
def test_copy_page_copies_revisions_and_doesnt_submit_for_moderation(self):
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
christmas_event.save_revision(submitted_for_moderation=True)
# Copy it
new_christmas_event = christmas_event.copy(
update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'}
)
# Check that the old revision is still submitted for moderation
self.assertTrue(christmas_event.revisions.order_by('created_at').first().submitted_for_moderation)
# Check that the new revision is not submitted for moderation
self.assertFalse(new_christmas_event.revisions.order_by('created_at').first().submitted_for_moderation)
def test_copy_page_copies_revisions_and_doesnt_change_created_at(self):
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
christmas_event.save_revision(submitted_for_moderation=True)
# Set the created_at of the revision to a time in the past
revision = christmas_event.get_latest_revision()
revision.created_at = datetime.datetime(2014, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
revision.save()
# Copy it
new_christmas_event = christmas_event.copy(
update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'}
)
# Check that the created_at time is the same
christmas_event_created_at = christmas_event.revisions.order_by('created_at').first().created_at
new_christmas_event_created_at = new_christmas_event.revisions.order_by('created_at').first().created_at
self.assertEqual(christmas_event_created_at, new_christmas_event_created_at)
def test_copy_page_copies_revisions_and_doesnt_schedule(self):
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
if settings.USE_TZ:
christmas_event.save_revision(
approved_go_live_at=datetime.datetime(2014, 9, 16, 9, 12, 00, tzinfo=pytz.utc)
)
else:
christmas_event.save_revision(
approved_go_live_at=datetime.datetime(2014, 9, 16, 9, 12, 00)
)
# Copy it
new_christmas_event = christmas_event.copy(
update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'}
)
# Check that the old revision is still scheduled
if settings.USE_TZ:
self.assertEqual(
christmas_event.revisions.order_by('created_at').first().approved_go_live_at,
datetime.datetime(2014, 9, 16, 9, 12, 00, tzinfo=pytz.utc)
)
else:
self.assertEqual(
christmas_event.revisions.order_by('created_at').first().approved_go_live_at,
datetime.datetime(2014, 9, 16, 9, 12, 00)
)
# Check that the new revision is not scheduled
self.assertEqual(new_christmas_event.revisions.order_by('created_at').first().approved_go_live_at, None)
def test_copy_page_doesnt_copy_revisions_if_told_not_to_do_so(self):
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
christmas_event.save_revision()
# Copy it
new_christmas_event = christmas_event.copy(
update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'}, copy_revisions=False
)
# Check that the revisions weren't copied
# Copying creates a new revision so we're expecting the new page to have one revision
self.assertEqual(new_christmas_event.revisions.count(), 1)
# Check that the revisions weren't removed from old page
self.assertEqual(christmas_event.revisions.count(), 1, "Revisions were removed from the original page")
def test_copy_page_copies_child_objects_with_nonspecific_class(self):
# Get chrismas page as Page instead of EventPage
christmas_event = Page.objects.get(url_path='/home/events/christmas/')
# Copy it
new_christmas_event = christmas_event.copy(
update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'}
)
# Check that the type of the new page is correct
self.assertIsInstance(new_christmas_event, EventPage)
# Check that the speakers were copied
self.assertEqual(new_christmas_event.speakers.count(), 1, "Child objects weren't copied")
def test_copy_page_copies_recursively(self):
events_index = EventIndex.objects.get(url_path='/home/events/')
# Copy it
new_events_index = events_index.copy(
recursive=True, update_attrs={'title': "New events index", 'slug': 'new-events-index'}
)
# Get christmas event
old_christmas_event = events_index.get_children().filter(slug='christmas').first()
new_christmas_event = new_events_index.get_children().filter(slug='christmas').first()
# Check that the event exists in both places
self.assertNotEqual(new_christmas_event, None, "Child pages weren't copied")
self.assertNotEqual(old_christmas_event, None, "Child pages were removed from original page")
# Check that the url path was updated
self.assertEqual(new_christmas_event.url_path, '/home/new-events-index/christmas/')
def test_copy_page_copies_recursively_with_child_objects(self):
events_index = EventIndex.objects.get(url_path='/home/events/')
# Copy it
new_events_index = events_index.copy(
recursive=True, update_attrs={'title': "New events index", 'slug': 'new-events-index'}
)
# Get christmas event
old_christmas_event = events_index.get_children().filter(slug='christmas').first()
new_christmas_event = new_events_index.get_children().filter(slug='christmas').first()
# Check that the speakers were copied
self.assertEqual(new_christmas_event.specific.speakers.count(), 1, "Child objects weren't copied")
# Check that the speakers weren't removed from old page
self.assertEqual(
old_christmas_event.specific.speakers.count(), 1, "Child objects were removed from the original page"
)
def test_copy_page_copies_recursively_with_revisions(self):
events_index = EventIndex.objects.get(url_path='/home/events/')
old_christmas_event = events_index.get_children().filter(slug='christmas').first().specific
old_christmas_event.save_revision()
# Copy it
new_events_index = events_index.copy(
recursive=True, update_attrs={'title': "New events index", 'slug': 'new-events-index'}
)
# Get christmas event
new_christmas_event = new_events_index.get_children().filter(slug='christmas').first()
# Check that the revisions were copied
# Copying creates a new revision so we're expecting the new page to have two revisions
self.assertEqual(new_christmas_event.specific.revisions.count(), 2)
# Check that the revisions weren't removed from old page
self.assertEqual(
old_christmas_event.specific.revisions.count(), 1, "Revisions were removed from the original page"
)
def test_copy_page_copies_recursively_but_doesnt_copy_revisions_if_told_not_to_do_so(self):
events_index = EventIndex.objects.get(url_path='/home/events/')
old_christmas_event = events_index.get_children().filter(slug='christmas').first()
old_christmas_event.save_revision()
# Copy it
new_events_index = events_index.copy(
recursive=True,
update_attrs={'title': "New events index", 'slug': 'new-events-index'},
copy_revisions=False
)
# Get christmas event
new_christmas_event = new_events_index.get_children().filter(slug='christmas').first()
# Check that the revisions weren't copied
# Copying creates a new revision so we're expecting the new page to have one revision
self.assertEqual(new_christmas_event.specific.revisions.count(), 1)
# Check that the revisions weren't removed from old page
self.assertEqual(
old_christmas_event.specific.revisions.count(), 1, "Revisions were removed from the original page"
)
def test_copy_page_copies_recursively_to_the_same_tree(self):
events_index = EventIndex.objects.get(url_path='/home/events/')
old_christmas_event = events_index.get_children().filter(slug='christmas').first().specific
old_christmas_event.save_revision()
with self.assertRaises(Exception) as exception:
events_index.copy(
recursive=True, update_attrs={'title': "New events index", 'slug': 'new-events-index'}, to=events_index
)
self.assertEqual(str(exception.exception), "You cannot copy a tree branch recursively into itself")
def test_copy_page_updates_user(self):
event_moderator = get_user_model().objects.get(email='eventmoderator@example.com')
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
christmas_event.save_revision()
# Copy it
new_christmas_event = christmas_event.copy(
update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'},
user=event_moderator,
)
# Check that the owner has been updated
self.assertEqual(new_christmas_event.owner, event_moderator)
# Check that the user on the last revision is correct
self.assertEqual(new_christmas_event.get_latest_revision().user, event_moderator)
def test_copy_multi_table_inheritance(self):
saint_patrick_event = SingleEventPage.objects.get(url_path='/home/events/saint-patrick/')
# Copy it
new_saint_patrick_event = saint_patrick_event.copy(update_attrs={'slug': 'new-saint-patrick'})
# Check that new_saint_patrick_event is correct
self.assertIsInstance(new_saint_patrick_event, SingleEventPage)
self.assertEqual(new_saint_patrick_event.excerpt, saint_patrick_event.excerpt)
# Check that new_saint_patrick_event is a different page, including parents from both EventPage and Page
self.assertNotEqual(saint_patrick_event.id, new_saint_patrick_event.id)
self.assertNotEqual(saint_patrick_event.eventpage_ptr.id, new_saint_patrick_event.eventpage_ptr.id)
self.assertNotEqual(
saint_patrick_event.eventpage_ptr.page_ptr.id,
new_saint_patrick_event.eventpage_ptr.page_ptr.id
)
# Check that the url path was updated
self.assertEqual(new_saint_patrick_event.url_path, '/home/events/new-saint-patrick/')
# Check that both parent instance exists
self.assertIsInstance(EventPage.objects.get(id=new_saint_patrick_event.id), EventPage)
self.assertIsInstance(Page.objects.get(id=new_saint_patrick_event.id), Page)
def test_copy_page_copies_tags(self):
# create and publish a TaggedPage under Events
event_index = Page.objects.get(url_path='/home/events/')
tagged_page = TaggedPage(title='My tagged page', slug='my-tagged-page')
tagged_page.tags.add('wagtail', 'bird')
event_index.add_child(instance=tagged_page)
tagged_page.save_revision().publish()
old_tagged_item_ids = [item.id for item in tagged_page.tagged_items.all()]
# there should be two items here, with defined (truthy) IDs
self.assertEqual(len(old_tagged_item_ids), 2)
self.assertTrue(all(old_tagged_item_ids))
# copy to underneath homepage
homepage = Page.objects.get(url_path='/home/')
new_tagged_page = tagged_page.copy(to=homepage)
self.assertNotEqual(tagged_page.id, new_tagged_page.id)
# new page should also have two tags
new_tagged_item_ids = [item.id for item in new_tagged_page.tagged_items.all()]
self.assertEqual(len(new_tagged_item_ids), 2)
self.assertTrue(all(new_tagged_item_ids))
# new tagged_item IDs should differ from old ones
self.assertTrue(all([
item_id not in old_tagged_item_ids
for item_id in new_tagged_item_ids
]))
def test_copy_subclassed_page_copies_tags(self):
# create and publish a TaggedGrandchildPage under Events
event_index = Page.objects.get(url_path='/home/events/')
sub_tagged_page = TaggedGrandchildPage(title='My very special tagged page', slug='my-special-tagged-page')
sub_tagged_page.tags.add('wagtail', 'bird')
event_index.add_child(instance=sub_tagged_page)
sub_tagged_page.save_revision().publish()
old_tagged_item_ids = [item.id for item in sub_tagged_page.tagged_items.all()]
# there should be two items here, with defined (truthy) IDs
self.assertEqual(len(old_tagged_item_ids), 2)
self.assertTrue(all(old_tagged_item_ids))
# copy to underneath homepage
homepage = Page.objects.get(url_path='/home/')
new_sub_tagged_page = sub_tagged_page.copy(to=homepage)
self.assertNotEqual(sub_tagged_page.id, new_sub_tagged_page.id)
# new page should also have two tags
new_tagged_item_ids = [item.id for item in new_sub_tagged_page.tagged_items.all()]
self.assertEqual(len(new_tagged_item_ids), 2)
self.assertTrue(all(new_tagged_item_ids))
# new tagged_item IDs should differ from old ones
self.assertTrue(all([
item_id not in old_tagged_item_ids
for item_id in new_tagged_item_ids
]))
def test_copy_page_with_m2m_relations(self):
# create and publish a ManyToManyBlogPage under Events
event_index = Page.objects.get(url_path='/home/events/')
category = BlogCategory.objects.create(name='Birds')
advert = Advert.objects.create(url='http://www.heinz.com/', text="beanz meanz heinz")
blog_page = ManyToManyBlogPage(title='My blog page', slug='my-blog-page')
event_index.add_child(instance=blog_page)
blog_page.adverts.add(advert)
BlogCategoryBlogPage.objects.create(category=category, page=blog_page)
blog_page.save_revision().publish()
# copy to underneath homepage
homepage = Page.objects.get(url_path='/home/')
new_blog_page = blog_page.copy(to=homepage)
# M2M relations are not formally supported, so for now we're only interested in
# the copy operation as a whole succeeding, rather than the child objects being copied
self.assertNotEqual(blog_page.id, new_blog_page.id)
def test_copy_page_with_generic_foreign_key(self):
# create and publish a GenericSnippetPage under Events
event_index = Page.objects.get(url_path='/home/events/')
advert = Advert.objects.create(url='http://www.heinz.com/', text="beanz meanz heinz")
page = GenericSnippetPage(title='My snippet page', slug='my-snippet-page')
page.snippet_content_object = advert
event_index.add_child(instance=page)
page.save_revision().publish()
# copy to underneath homepage
homepage = Page.objects.get(url_path='/home/')
new_page = page.copy(to=homepage)
self.assertNotEqual(page.id, new_page.id)
self.assertEqual(new_page.snippet_content_object, advert)
def test_copy_page_with_o2o_relation(self):
event_index = Page.objects.get(url_path='/home/events/')
page = OneToOnePage(title='My page', slug='my-page')
event_index.add_child(instance=page)
homepage = Page.objects.get(url_path='/home/')
new_page = page.copy(to=homepage)
self.assertNotEqual(page.id, new_page.id)
def test_copy_page_with_additional_excluded_fields(self):
homepage = Page.objects.get(url_path='/home/')
page = homepage.add_child(instance=PageWithExcludedCopyField(
title='Discovery',
slug='disco',
content='NCC-1031',
special_field='Context is for Kings'))
new_page = page.copy(to=homepage, update_attrs={'slug': 'disco-2'})
self.assertEqual(page.title, new_page.title)
self.assertNotEqual(page.id, new_page.id)
self.assertNotEqual(page.path, new_page.path)
# special_field is in the list to be excluded
self.assertNotEqual(page.special_field, new_page.special_field)
def test_copy_page_with_excluded_parental_and_child_relations(self):
"""Test that a page will be copied with parental and child relations removed if excluded."""
try:
# modify excluded fields for this test
EventPage.exclude_fields_in_copy = ['advert_placements', 'categories', 'signup_link']
# set up data
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
summer_category = EventCategory.objects.create(name='Summer')
holiday_category = EventCategory.objects.create(name='Holidays')
# add URL (to test excluding a basic field)
christmas_event.signup_link = "https://christmas-is-awesome.com/rsvp"
# add parental many to many relations
christmas_event.categories = (summer_category, holiday_category)
christmas_event.save()
# Copy it
new_christmas_event = christmas_event.copy(
update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'}
)
# check that the signup_link was NOT copied
self.assertEqual(christmas_event.signup_link, "https://christmas-is-awesome.com/rsvp")
self.assertEqual(new_christmas_event.signup_link, '')
# check that original event is untouched
self.assertEqual(
christmas_event.categories.count(),
2,
"Child objects (parental many to many) defined on the superclass were removed from the original page"
)
# check that parental many to many are NOT copied
self.assertEqual(
new_christmas_event.categories.count(),
0,
"Child objects (parental many to many) were copied but should be excluded"
)
# check that child objects on original event were left untouched
self.assertEqual(
christmas_event.advert_placements.count(),
1,
"Child objects defined on the original superclass were edited when copied"
)
# check that child objects were NOT copied
self.assertEqual(
new_christmas_event.advert_placements.count(),
0,
"Child objects defined on the superclass were copied and should not be"
)
finally:
# reset excluded fields for future tests
EventPage.exclude_fields_in_copy = []
def test_copy_unsaved_page(self):
"""Test that unsaved page will not be copied."""
new_page = SimplePage(slug='testpurp', title='testpurpose')
with self.assertRaises(RuntimeError):
new_page.copy()
def test_copy_published_emits_signal(self):
"""Test that copying of a published page emits a page_published signal."""
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
signal_fired = False
signal_page = None
def page_published_handler(sender, instance, **kwargs):
nonlocal signal_fired
nonlocal signal_page
signal_fired = True
signal_page = instance
page_published.connect(page_published_handler)
copy_page = christmas_page.copy(
update_attrs={'title': "New christmas", 'slug': 'new-christmas'},
)
self.assertTrue(signal_fired)
self.assertEqual(signal_page, copy_page)
def test_copy_unpublished_not_emits_signal(self):
"""Test that copying of an unpublished page not emits a page_published signal."""
homepage = Page.objects.get(url_path='/home/')
homepage.live = False
homepage.save()
signal_fired = False
def page_published_handler(sender, instance, **kwargs):
nonlocal signal_fired
signal_fired = True
page_published.connect(page_published_handler)
homepage.copy(update_attrs={'slug': 'new_slug'})
self.assertFalse(signal_fired)
def test_copy_keep_live_false_not_emits_signal(self):
"""Test that copying of a live page with keep_live=False not emits a page_published signal."""
homepage = Page.objects.get(url_path='/home/')
signal_fired = False
def page_published_handler(sender, instance, **kwargs):
nonlocal signal_fired
signal_fired = True
page_published.connect(page_published_handler)
homepage.copy(
keep_live=False,
update_attrs={'slug': 'new_slug'}
)
self.assertFalse(signal_fired)
def test_copy_alias_page(self):
about_us = SimplePage.objects.get(url_path='/home/about-us/')
about_us_alias = about_us.create_alias(update_slug='about-us-alias')
about_us_alias_copy = about_us_alias.copy(update_attrs={
'slug': 'about-us-alias-copy'
})
self.assertIsInstance(about_us_alias_copy, SimplePage)
self.assertEqual(about_us_alias_copy.slug, 'about-us-alias-copy')
self.assertNotEqual(about_us_alias_copy.id, about_us.id)
self.assertEqual(about_us_alias_copy.url_path, '/home/about-us-alias-copy/')
# The copy should just be a copy of the original page, not an alias
self.assertIsNone(about_us_alias_copy.alias_of)
class TestCreateAlias(TestCase):
fixtures = ['test.json']
def test_create_alias(self):
about_us = SimplePage.objects.get(url_path='/home/about-us/')
# Set a different draft title, aliases are not supposed to
# have a different draft_title because they don't have revisions.
# This should be corrected when copying
about_us.draft_title = 'Draft title'
about_us.save(update_fields=['draft_title'])
# Copy it
new_about_us = about_us.create_alias(update_slug='new-about-us')
# Check that new_about_us is correct
self.assertIsInstance(new_about_us, SimplePage)
self.assertEqual(new_about_us.slug, 'new-about-us')
# Draft title should be changed to match the live title
self.assertEqual(new_about_us.draft_title, 'About us')
# Check that new_about_us is a different page
self.assertNotEqual(about_us.id, new_about_us.id)
# Check that the url path was updated
self.assertEqual(new_about_us.url_path, '/home/new-about-us/')
# Check that the alias_of field was filled in
self.assertEqual(new_about_us.alias_of, about_us)
def test_create_alias_copies_child_objects(self):
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
# Copy it
new_christmas_event = christmas_event.create_alias(update_slug='new-christmas-event')
# Check that the speakers were copied
self.assertEqual(new_christmas_event.speakers.count(), 1, "Child objects weren't copied")
# Check that the speakers weren't removed from old page
self.assertEqual(christmas_event.speakers.count(), 1, "Child objects were removed from the original page")
# Check that advert placements were also copied (there's a gotcha here, since the advert_placements
# relation is defined on Page, not EventPage)
self.assertEqual(
new_christmas_event.advert_placements.count(), 1, "Child objects defined on the superclass weren't copied"
)
self.assertEqual(
christmas_event.advert_placements.count(),
1,
"Child objects defined on the superclass were removed from the original page"
)
def test_create_alias_copies_parental_relations(self):
"""Test that a page will be copied with parental many to many relations intact."""
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
summer_category = EventCategory.objects.create(name='Summer')
holiday_category = EventCategory.objects.create(name='Holidays')
# add parental many to many relations
christmas_event.categories = (summer_category, holiday_category)
christmas_event.save()
# Copy it
new_christmas_event = christmas_event.create_alias(update_slug='new-christmas-event')
# check that original eventt is untouched
self.assertEqual(
christmas_event.categories.count(),
2,
"Child objects (parental many to many) defined on the superclass were removed from the original page"
)
# check that parental many to many are copied
self.assertEqual(
new_christmas_event.categories.count(),
2,
"Child objects (parental many to many) weren't copied"
)
# check that the original and copy are related to the same categories
self.assertEqual(
new_christmas_event.categories.all().in_bulk(),
christmas_event.categories.all().in_bulk()
)
def test_create_alias_doesnt_copy_revisions(self):
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
christmas_event.save_revision()
# Copy it
new_christmas_event = christmas_event.create_alias(update_slug='new-christmas-event')
# Check that no revisions were created
self.assertEqual(new_christmas_event.revisions.count(), 0)
def test_create_alias_copies_child_objects_with_nonspecific_class(self):
# Get chrismas page as Page instead of EventPage
christmas_event = Page.objects.get(url_path='/home/events/christmas/')
# Copy it
new_christmas_event = christmas_event.create_alias(update_slug='new-christmas-event')
# Check that the type of the new page is correct
self.assertIsInstance(new_christmas_event, EventPage)
# Check that the speakers were copied
self.assertEqual(new_christmas_event.speakers.count(), 1, "Child objects weren't copied")
def test_create_alias_copies_recursively(self):
events_index = EventIndex.objects.get(url_path='/home/events/')
# Copy it
new_events_index = events_index.create_alias(recursive=True, update_slug='new-events-index')
# Get christmas event
old_christmas_event = events_index.get_children().filter(slug='christmas').first()
new_christmas_event = new_events_index.get_children().filter(slug='christmas').first()
# Check that the event exists in both places
self.assertNotEqual(new_christmas_event, None, "Child pages weren't copied")
self.assertNotEqual(old_christmas_event, None, "Child pages were removed from original page")
# Check that the url path was updated
self.assertEqual(new_christmas_event.url_path, '/home/new-events-index/christmas/')
# Check that the children were also created as aliases
self.assertEqual(new_christmas_event.alias_of, old_christmas_event)
def test_create_alias_copies_recursively_with_child_objects(self):
events_index = EventIndex.objects.get(url_path='/home/events/')
# Copy it
new_events_index = events_index.create_alias(recursive=True, update_slug='new-events-index')
# Get christmas event
old_christmas_event = events_index.get_children().filter(slug='christmas').first()
new_christmas_event = new_events_index.get_children().filter(slug='christmas').first()
# Check that the speakers were copied
self.assertEqual(new_christmas_event.specific.speakers.count(), 1, "Child objects weren't copied")
# Check that the speakers weren't removed from old page
self.assertEqual(
old_christmas_event.specific.speakers.count(), 1, "Child objects were removed from the original page"
)
def test_create_alias_doesnt_copy_recursively_to_the_same_tree(self):
events_index = EventIndex.objects.get(url_path='/home/events/')
old_christmas_event = events_index.get_children().filter(slug='christmas').first().specific
old_christmas_event.save_revision()
with self.assertRaises(Exception) as exception:
events_index.create_alias(recursive=True, parent=events_index)
self.assertEqual(str(exception.exception), "You cannot copy a tree branch recursively into itself")
def test_create_alias_updates_user(self):
event_moderator = get_user_model().objects.get(email='eventmoderator@example.com')
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
christmas_event.save_revision()
# Copy it
new_christmas_event = christmas_event.create_alias(update_slug='new-christmas-event', user=event_moderator)
# Check that the owner has been updated
self.assertEqual(new_christmas_event.owner, event_moderator)
def test_create_alias_multi_table_inheritance(self):
saint_patrick_event = SingleEventPage.objects.get(url_path='/home/events/saint-patrick/')
# Copy it
new_saint_patrick_event = saint_patrick_event.create_alias(update_slug='new-saint-patrick')
# Check that new_saint_patrick_event is correct
self.assertIsInstance(new_saint_patrick_event, SingleEventPage)
self.assertEqual(new_saint_patrick_event.excerpt, saint_patrick_event.excerpt)
# Check that new_saint_patrick_event is a different page, including parents from both EventPage and Page
self.assertNotEqual(saint_patrick_event.id, new_saint_patrick_event.id)
self.assertNotEqual(saint_patrick_event.eventpage_ptr.id, new_saint_patrick_event.eventpage_ptr.id)
self.assertNotEqual(
saint_patrick_event.eventpage_ptr.page_ptr.id,
new_saint_patrick_event.eventpage_ptr.page_ptr.id
)
# Check that the url path was updated
self.assertEqual(new_saint_patrick_event.url_path, '/home/events/new-saint-patrick/')
# Check that both parent instance exists
self.assertIsInstance(EventPage.objects.get(id=new_saint_patrick_event.id), EventPage)
self.assertIsInstance(Page.objects.get(id=new_saint_patrick_event.id), Page)
def test_create_alias_copies_tags(self):
# create and publish a TaggedPage under Events
event_index = Page.objects.get(url_path='/home/events/')
tagged_page = TaggedPage(title='My tagged page', slug='my-tagged-page')
tagged_page.tags.add('wagtail', 'bird')
event_index.add_child(instance=tagged_page)
tagged_page.save_revision().publish()
old_tagged_item_ids = [item.id for item in tagged_page.tagged_items.all()]
# there should be two items here, with defined (truthy) IDs
self.assertEqual(len(old_tagged_item_ids), 2)
self.assertTrue(all(old_tagged_item_ids))
# copy to underneath homepage
homepage = Page.objects.get(url_path='/home/')
new_tagged_page = tagged_page.create_alias(parent=homepage)
self.assertNotEqual(tagged_page.id, new_tagged_page.id)
# new page should also have two tags
new_tagged_item_ids = [item.id for item in new_tagged_page.tagged_items.all()]
self.assertEqual(len(new_tagged_item_ids), 2)
self.assertTrue(all(new_tagged_item_ids))
# new tagged_item IDs should differ from old ones
self.assertTrue(all([
item_id not in old_tagged_item_ids
for item_id in new_tagged_item_ids
]))
def test_create_alias_with_m2m_relations(self):
# create and publish a ManyToManyBlogPage under Events
event_index = Page.objects.get(url_path='/home/events/')
category = BlogCategory.objects.create(name='Birds')
advert = Advert.objects.create(url='http://www.heinz.com/', text="beanz meanz heinz")
blog_page = ManyToManyBlogPage(title='My blog page', slug='my-blog-page')
event_index.add_child(instance=blog_page)
blog_page.adverts.add(advert)
BlogCategoryBlogPage.objects.create(category=category, page=blog_page)
blog_page.save_revision().publish()
# copy to underneath homepage
homepage = Page.objects.get(url_path='/home/')
new_blog_page = blog_page.create_alias(parent=homepage)
# M2M relations are not formally supported, so for now we're only interested in
# the copy operation as a whole succeeding, rather than the child objects being copied
self.assertNotEqual(blog_page.id, new_blog_page.id)
def test_create_alias_with_generic_foreign_key(self):
# create and publish a GenericSnippetPage under Events
event_index = Page.objects.get(url_path='/home/events/')
advert = Advert.objects.create(url='http://www.heinz.com/', text="beanz meanz heinz")
page = GenericSnippetPage(title='My snippet page', slug='my-snippet-page')
page.snippet_content_object = advert
event_index.add_child(instance=page)
page.save_revision().publish()
# copy to underneath homepage
homepage = Page.objects.get(url_path='/home/')
new_page = page.create_alias(parent=homepage)
self.assertNotEqual(page.id, new_page.id)
self.assertEqual(new_page.snippet_content_object, advert)
def test_create_alias_with_o2o_relation(self):
event_index = Page.objects.get(url_path='/home/events/')
page = OneToOnePage(title='My page', slug='my-page')
event_index.add_child(instance=page)
homepage = Page.objects.get(url_path='/home/')
new_page = page.create_alias(parent=homepage)
self.assertNotEqual(page.id, new_page.id)
@unittest.expectedFailure
def test_create_alias_with_additional_excluded_fields(self):
homepage = Page.objects.get(url_path='/home/')
page = homepage.add_child(instance=PageWithExcludedCopyField(
title='Discovery',
slug='disco',
content='NCC-1031',
special_field='Context is for Kings'))
new_page = page.create_alias(parent=homepage, update_slug='disco-2')
self.assertEqual(page.title, new_page.title)
self.assertNotEqual(page.id, new_page.id)
self.assertNotEqual(page.path, new_page.path)
# special_field is in the list to be excluded
self.assertNotEqual(page.special_field, new_page.special_field)
@unittest.expectedFailure
def test_create_alias_with_excluded_parental_and_child_relations(self):
"""Test that a page will be copied with parental and child relations removed if excluded."""
try:
# modify excluded fields for this test
EventPage.exclude_fields_in_copy = ['advert_placements', 'categories', 'signup_link']
# set up data
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
summer_category = EventCategory.objects.create(name='Summer')
holiday_category = EventCategory.objects.create(name='Holidays')
# add URL (to test excluding a basic field)
christmas_event.signup_link = "https://christmas-is-awesome.com/rsvp"
# add parental many to many relations
christmas_event.categories = (summer_category, holiday_category)
christmas_event.save()
# Copy it
new_christmas_event = christmas_event.create_alias(update_slug='new-christmas-event')
# check that the signup_link was NOT copied
self.assertEqual(christmas_event.signup_link, "https://christmas-is-awesome.com/rsvp")
self.assertEqual(new_christmas_event.signup_link, '')
# check that original event is untouched
self.assertEqual(
christmas_event.categories.count(),
2,
"Child objects (parental many to many) defined on the superclass were removed from the original page"
)
# check that parental many to many are NOT copied
self.assertEqual(
new_christmas_event.categories.count(),
0,
"Child objects (parental many to many) were copied but should be excluded"
)
# check that child objects on original event were left untouched
self.assertEqual(
christmas_event.advert_placements.count(),
1,
"Child objects defined on the original superclass were edited when copied"
)
# check that child objects were NOT copied
self.assertEqual(
new_christmas_event.advert_placements.count(),
0,
"Child objects defined on the superclass were copied and should not be"
)
finally:
# reset excluded fields for future tests
EventPage.exclude_fields_in_copy = []
class TestUpdateAliases(TestCase):
fixtures = ['test.json']
def test_update_aliases(self):
event_page = EventPage.objects.get(url_path='/home/events/christmas/')
alias = event_page.create_alias(update_slug='new-event-page')
alias_alias = alias.create_alias(update_slug='new-event-page-2')
# Update the title and add a speaker
event_page.title = "Updated title"
event_page.draft_title = "A different draft title"
event_page.speakers.add(EventPageSpeaker(
first_name="Ted",
last_name="Crilly",
))
event_page.save()
# Nothing should've happened yet
alias.refresh_from_db()
alias_alias.refresh_from_db()
self.assertEqual(alias.title, "Christmas")
self.assertEqual(alias_alias.title, "Christmas")
self.assertEqual(alias.speakers.count(), 1)
self.assertEqual(alias_alias.speakers.count(), 1)
PageLogEntry.objects.all().delete()
event_page.update_aliases()
# Check that the aliases have been updated
alias.refresh_from_db()
alias_alias.refresh_from_db()
self.assertEqual(alias.title, "Updated title")
self.assertEqual(alias_alias.title, "Updated title")
self.assertEqual(alias.speakers.count(), 2)
self.assertEqual(alias_alias.speakers.count(), 2)
# Draft titles shouldn't update as alias pages do not have drafts
self.assertEqual(alias.draft_title, "Updated title")
self.assertEqual(alias_alias.draft_title, "Updated title")
# Check log entries were created
self.assertTrue(PageLogEntry.objects.filter(page=alias, action='wagtail.publish').exists())
self.assertTrue(PageLogEntry.objects.filter(page=alias_alias, action='wagtail.publish').exists())
def test_update_aliases_publishes_drafts(self):
event_page = EventPage.objects.get(url_path='/home/events/christmas/')
# Unpublish the event page so that the aliases will be created in draft
event_page.live = False
event_page.has_unpublished_changes = True
event_page.save(clean=False)
alias = event_page.create_alias(update_slug='new-event-page')
alias_alias = alias.create_alias(update_slug='new-event-page-2')
self.assertFalse(alias.live)
self.assertFalse(alias_alias.live)
# Publish the event page
event_page.live = True
event_page.has_unpublished_changes = False
event_page.save(clean=False)
# Nothing should've happened yet
alias.refresh_from_db()
alias_alias.refresh_from_db()
self.assertFalse(alias.live)
self.assertFalse(alias_alias.live)
PageLogEntry.objects.all().delete()
event_page.update_aliases()
# Check that the aliases have been updated
alias.refresh_from_db()
alias_alias.refresh_from_db()
self.assertTrue(alias.live)
self.assertTrue(alias_alias.live)
# Check log entries were created
self.assertTrue(PageLogEntry.objects.filter(page=alias, action='wagtail.publish').exists())
self.assertTrue(PageLogEntry.objects.filter(page=alias_alias, action='wagtail.publish').exists())
class TestCopyForTranslation(TestCase):
fixtures = ['test.json']
def setUp(self):
self.en_homepage = Page.objects.get(url_path='/home/').specific
self.en_eventindex = EventIndex.objects.get(url_path='/home/events/')
self.en_eventpage = EventPage.objects.get(url_path='/home/events/christmas/')
self.root_page = self.en_homepage.get_parent()
self.fr_locale = Locale.objects.create(language_code="fr")
def test_copy_homepage(self):
fr_homepage = self.en_homepage.copy_for_translation(self.fr_locale)
self.assertNotEqual(self.en_homepage.id, fr_homepage.id)
self.assertEqual(fr_homepage.locale, self.fr_locale)
self.assertEqual(fr_homepage.translation_key, self.en_homepage.translation_key)
# At the top level, the language code should be appended to the slug
self.assertEqual(fr_homepage.slug, "home-fr")
# Translation must be in draft
self.assertFalse(fr_homepage.live)
self.assertTrue(fr_homepage.has_unpublished_changes)
# Check log
log_entry = PageLogEntry.objects.get(action='wagtail.copy_for_translation')
self.assertEqual(log_entry.data['source_locale']['language_code'], 'en')
self.assertEqual(log_entry.data['page']['locale']['language_code'], 'fr')
self.assertEqual(log_entry.message, "Copied for translation from Root (English)")
def test_copy_homepage_slug_exists(self):
# This test is the same as test_copy_homepage, but we will create another page with
# the slug "home-fr" before translating. copy_for_translation should pick a different slug
self.root_page.add_child(instance=SimplePage(title="Old french homepage", slug="home-fr", content="Test content"))
fr_homepage = self.en_homepage.copy_for_translation(self.fr_locale)
self.assertEqual(fr_homepage.slug, "home-fr-1")
def test_copy_childpage(self):
# Create translated homepage manually
fr_homepage = self.root_page.add_child(instance=Page(
title="french homepage",
slug="home-fr",
locale=self.fr_locale,
translation_key=self.en_homepage.translation_key
))
fr_eventindex = self.en_eventindex.copy_for_translation(self.fr_locale)
self.assertNotEqual(self.en_eventindex.id, fr_eventindex.id)
self.assertEqual(fr_eventindex.locale, self.fr_locale)
self.assertEqual(fr_eventindex.translation_key, self.en_eventindex.translation_key)
# Check that the fr event index was created under the fr homepage
self.assertEqual(fr_eventindex.get_parent(), fr_homepage)
# The slug should be the same when copying to another tree
self.assertEqual(self.en_eventindex.slug, fr_eventindex.slug)
# Check log
log_entry = PageLogEntry.objects.get(action='wagtail.copy_for_translation')
self.assertEqual(log_entry.data['source_locale']['language_code'], 'en')
self.assertEqual(log_entry.data['page']['locale']['language_code'], 'fr')
self.assertEqual(log_entry.message, "Copied for translation from Welcome to the Wagtail test site! (English)")
def test_copy_childpage_without_parent(self):
# This test is the same as test_copy_childpage but we won't create the parent page first
with self.assertRaises(ParentNotTranslatedError):
self.en_eventindex.copy_for_translation(self.fr_locale)
def test_copy_childpage_with_copy_parents(self):
# This time we will set copy_parents
fr_eventindex = self.en_eventindex.copy_for_translation(self.fr_locale, copy_parents=True)
self.assertNotEqual(self.en_eventindex.id, fr_eventindex.id)
self.assertEqual(fr_eventindex.locale, self.fr_locale)
self.assertEqual(fr_eventindex.translation_key, self.en_eventindex.translation_key)
self.assertEqual(self.en_eventindex.slug, fr_eventindex.slug)
# This should create the homepage as well
fr_homepage = fr_eventindex.get_parent()
self.assertNotEqual(self.en_homepage.id, fr_homepage.id)
self.assertEqual(fr_homepage.locale, self.fr_locale)
self.assertEqual(fr_homepage.translation_key, self.en_homepage.translation_key)
self.assertEqual(fr_homepage.slug, "home-fr")
def test_copy_page_with_translatable_child_objects(self):
# Create translated homepage and event index manually
fr_homepage = self.root_page.add_child(instance=Page(
title="french homepage",
slug="home-fr",
locale=self.fr_locale,
translation_key=self.en_homepage.translation_key
))
fr_homepage.add_child(instance=EventIndex(
title="Events",
slug="events",
locale=self.fr_locale,
translation_key=self.en_eventindex.translation_key
))
# Add an award to the speaker
# TODO: Nested child objects not supported by page copy
en_speaker = self.en_eventpage.speakers.get()
# en_award = EventPageSpeakerAward.objects.create(
# speaker=en_speaker,
# name="Golden Globe"
# )
fr_eventpage = self.en_eventpage.copy_for_translation(self.fr_locale)
# Check that the speakers and awards were copied for translation properly
fr_speaker = fr_eventpage.speakers.get()
self.assertEqual(fr_speaker.locale, self.fr_locale)
self.assertEqual(fr_speaker.translation_key, en_speaker.translation_key)
self.assertEqual(list(fr_speaker.get_translations()), [en_speaker])
# TODO: Nested child objects not supported by page copy
# fr_award = fr_speaker.awards.get()
# self.assertEqual(ffr_award.locale, self.fr_locale)
# self.assertEqual(ffr_award.translation_key, en_award.translation_key)
# self.assertEqual(list(fr_award.get_translations()), [en_award])
def test_copies_missing_parents_as_aliases(self):
fr_eventpage = self.en_eventpage.copy_for_translation(self.fr_locale, copy_parents=True)
fr_eventindex = fr_eventpage.get_parent()
# Check parent is a translation of its English original
self.assertEqual(fr_eventindex.locale, self.fr_locale)
self.assertEqual(fr_eventindex.translation_key, self.en_eventindex.translation_key)
# Check parent is also an alias of its English original
self.assertEqual(fr_eventindex.alias_of, self.en_eventindex)
class TestSubpageTypeBusinessRules(TestCase, WagtailTestUtils):
def test_allowed_subpage_models(self):
# SimplePage does not define any restrictions on subpage types
# SimplePage is a valid subpage of SimplePage
self.assertIn(SimplePage, SimplePage.allowed_subpage_models())
# BusinessIndex is a valid subpage of SimplePage
self.assertIn(BusinessIndex, SimplePage.allowed_subpage_models())
# BusinessSubIndex is not valid, because it explicitly omits SimplePage from parent_page_types
self.assertNotIn(BusinessSubIndex, SimplePage.allowed_subpage_models())
# BusinessChild has an empty subpage_types list, so does not allow anything
self.assertNotIn(SimplePage, BusinessChild.allowed_subpage_models())
self.assertNotIn(BusinessIndex, BusinessChild.allowed_subpage_models())
self.assertNotIn(BusinessSubIndex, BusinessChild.allowed_subpage_models())
# BusinessSubIndex only allows BusinessChild as subpage type
self.assertNotIn(SimplePage, BusinessSubIndex.allowed_subpage_models())
self.assertIn(BusinessChild, BusinessSubIndex.allowed_subpage_models())
def test_allowed_parent_page_models(self):
# SimplePage does not define any restrictions on parent page types
# SimplePage is a valid parent page of SimplePage
self.assertIn(SimplePage, SimplePage.allowed_parent_page_models())
# BusinessChild cannot be a parent of anything
self.assertNotIn(BusinessChild, SimplePage.allowed_parent_page_models())
# BusinessNowherePage does not allow anything as a parent
self.assertNotIn(SimplePage, BusinessNowherePage.allowed_parent_page_models())
self.assertNotIn(StandardIndex, BusinessNowherePage.allowed_parent_page_models())
# BusinessSubIndex only allows BusinessIndex as a parent
self.assertNotIn(SimplePage, BusinessSubIndex.allowed_parent_page_models())
self.assertIn(BusinessIndex, BusinessSubIndex.allowed_parent_page_models())
def test_can_exist_under(self):
self.assertTrue(SimplePage.can_exist_under(SimplePage()))
# StandardIndex should only be allowed under a Page
self.assertTrue(StandardIndex.can_exist_under(Page()))
self.assertFalse(StandardIndex.can_exist_under(SimplePage()))
# The Business pages are quite restrictive in their structure
self.assertTrue(BusinessSubIndex.can_exist_under(BusinessIndex()))
self.assertTrue(BusinessChild.can_exist_under(BusinessIndex()))
self.assertTrue(BusinessChild.can_exist_under(BusinessSubIndex()))
self.assertFalse(BusinessSubIndex.can_exist_under(SimplePage()))
self.assertFalse(BusinessSubIndex.can_exist_under(BusinessSubIndex()))
self.assertFalse(BusinessChild.can_exist_under(SimplePage()))
def test_can_create_at(self):
# Pages are not `is_creatable`, and should not be creatable
self.assertFalse(Page.can_create_at(Page()))
# SimplePage can be created under a simple page
self.assertTrue(SimplePage.can_create_at(SimplePage()))
# StandardIndex can be created under a Page, but not a SimplePage
self.assertTrue(StandardIndex.can_create_at(Page()))
self.assertFalse(StandardIndex.can_create_at(SimplePage()))
# The Business pages are quite restrictive in their structure
self.assertTrue(BusinessSubIndex.can_create_at(BusinessIndex()))
self.assertTrue(BusinessChild.can_create_at(BusinessIndex()))
self.assertTrue(BusinessChild.can_create_at(BusinessSubIndex()))
self.assertFalse(BusinessChild.can_create_at(SimplePage()))
self.assertFalse(BusinessSubIndex.can_create_at(SimplePage()))
def test_can_create_at_with_max_count_per_parent_limited_to_one(self):
root_page = Page.objects.get(url_path='/home/')
# Create 2 parent pages for our limited page model
parent1 = root_page.add_child(instance=SimpleParentPage(title='simple parent', slug='simple-parent'))
parent2 = root_page.add_child(instance=SimpleParentPage(title='simple parent', slug='simple-parent-2'))
# Add a child page to one of the pages (assert just to be sure)
self.assertTrue(SimpleChildPage.can_create_at(parent1))
parent1.add_child(instance=SimpleChildPage(title='simple child', slug='simple-child'))
# We already have a `SimpleChildPage` as a child of `parent1`, and since it is limited
# to have only 1 child page, we cannot create another one. However, we should still be able
# to create an instance for this page at a different location (as child of `parent2`)
self.assertFalse(SimpleChildPage.can_create_at(parent1))
self.assertTrue(SimpleChildPage.can_create_at(parent2))
def test_can_move_to(self):
self.assertTrue(SimplePage().can_move_to(SimplePage()))
# StandardIndex should only be allowed under a Page
self.assertTrue(StandardIndex().can_move_to(Page()))
self.assertFalse(StandardIndex().can_move_to(SimplePage()))
# The Business pages are quite restrictive in their structure
self.assertTrue(BusinessSubIndex().can_move_to(BusinessIndex()))
self.assertTrue(BusinessChild().can_move_to(BusinessIndex()))
self.assertTrue(BusinessChild().can_move_to(BusinessSubIndex()))
self.assertFalse(BusinessChild().can_move_to(SimplePage()))
self.assertFalse(BusinessSubIndex().can_move_to(SimplePage()))
def test_singleton_page_creation(self):
root_page = Page.objects.get(url_path='/home/')
# A single singleton page should be creatable
self.assertTrue(SingletonPage.can_create_at(root_page))
# Create a singleton page
root_page.add_child(instance=SingletonPage(
title='singleton', slug='singleton'))
# A second singleton page should not be creatable
self.assertFalse(SingletonPage.can_create_at(root_page))
class TestIssue735(TestCase):
"""
Issue 735 reports that URL paths of child pages are not
updated correctly when slugs of parent pages are updated
"""
fixtures = ['test.json']
def test_child_urls_updated_on_parent_publish(self):
event_index = Page.objects.get(url_path='/home/events/')
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
# Change the event index slug and publish it
event_index.slug = 'old-events'
event_index.save_revision().publish()
# Check that the christmas events url path updated correctly
new_christmas_event = EventPage.objects.get(id=christmas_event.id)
self.assertEqual(new_christmas_event.url_path, '/home/old-events/christmas/')
class TestIssue756(TestCase):
"""
Issue 756 reports that the latest_revision_created_at
field was getting clobbered whenever a revision was published
"""
def test_publish_revision_doesnt_remove_latest_revision_created_at(self):
# Create a revision
revision = Page.objects.get(id=1).save_revision()
# Check that latest_revision_created_at is set
self.assertIsNotNone(Page.objects.get(id=1).latest_revision_created_at)
# Publish the revision
revision.publish()
# Check that latest_revision_created_at is still set
self.assertIsNotNone(Page.objects.get(id=1).latest_revision_created_at)
class TestIssue1216(TestCase):
"""
Test that url paths greater than 255 characters are supported
"""
fixtures = ['test.json']
def test_url_path_can_exceed_255_characters(self):
event_index = Page.objects.get(url_path='/home/events/')
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
# Change the christmas_event slug first - this way, we test that the process for
# updating child url paths also handles >255 character paths correctly
new_christmas_slug = "christmas-%s-christmas" % ("0123456789" * 20)
christmas_event.slug = new_christmas_slug
christmas_event.save_revision().publish()
# Change the event index slug and publish it
new_event_index_slug = "events-%s-events" % ("0123456789" * 20)
event_index.slug = new_event_index_slug
event_index.save_revision().publish()
# Check that the url path updated correctly
new_christmas_event = EventPage.objects.get(id=christmas_event.id)
expected_url_path = "/home/%s/%s/" % (new_event_index_slug, new_christmas_slug)
self.assertEqual(new_christmas_event.url_path, expected_url_path)
class TestIsCreatable(TestCase):
def test_is_creatable_default(self):
"""By default, pages should be creatable"""
self.assertTrue(SimplePage.is_creatable)
self.assertIn(SimplePage, get_page_models())
def test_is_creatable_false(self):
"""Page types should be able to disable their creation"""
self.assertFalse(MTIBasePage.is_creatable)
# non-creatable pages should still appear in the get_page_models list
self.assertIn(MTIBasePage, get_page_models())
def test_is_creatable_not_inherited(self):
"""
is_creatable should not be inherited in the normal manner, and should
default to True unless set otherwise
"""
self.assertTrue(MTIChildPage.is_creatable)
self.assertIn(MTIChildPage, get_page_models())
def test_abstract_pages(self):
"""
Abstract models should not be creatable
"""
self.assertFalse(AbstractPage.is_creatable)
self.assertNotIn(AbstractPage, get_page_models())
class TestDeferredPageClasses(TestCase):
def test_deferred_page_classes_are_not_registered(self):
"""
In Django <1.10, a call to `defer` such as `SimplePage.objects.defer('content')`
will dynamically create a subclass of SimplePage. Ensure that these subclasses
are not registered in the get_page_models() list
"""
list(SimplePage.objects.defer('content'))
simplepage_subclasses = [cls for cls in get_page_models() if issubclass(cls, SimplePage)]
self.assertEqual(simplepage_subclasses, [SimplePage])
class TestPageManager(TestCase):
def test_page_manager(self):
"""
Assert that the Page class uses PageManager
"""
self.assertIs(type(Page.objects), PageManager)
def test_page_subclass_manager(self):
"""
Assert that Page subclasses get a PageManager without having to do
anything special. MTI subclasses do *not* inherit their parents Manager
by default.
"""
self.assertIs(type(SimplePage.objects), PageManager)
def test_custom_page_manager(self):
"""
Subclasses should be able to override their default Manager, and
Wagtail should respect this. It is up to the developer to ensure their
custom Manager inherits from PageManager.
"""
self.assertIs(type(CustomManagerPage.objects), CustomManager)
def test_custom_page_queryset(self):
"""
Managers that are constructed from a custom PageQuerySet
(via PageManager.from_queryset(CustomPageQuerySet)) should return
querysets of that type
"""
self.assertIs(type(CustomManagerPage.objects.all()), CustomPageQuerySet)
self.assertIs(type(CustomManagerPage.objects.about_spam()), CustomPageQuerySet)
self.assertIs(type(CustomManagerPage.objects.all().about_spam()), CustomPageQuerySet)
self.assertIs(type(CustomManagerPage.objects.about_spam().all()), CustomPageQuerySet)
def test_abstract_base_page_manager(self):
"""
Abstract base classes should be able to override their default Manager,
and Wagtail should respect this. It is up to the developer to ensure
their custom Manager inherits from PageManager.
"""
self.assertIs(type(MyCustomPage.objects), CustomManager)
class TestIssue2024(TestCase):
"""
This tests that deleting a content type can't delete any Page objects.
"""
fixtures = ['test.json']
def test_delete_content_type(self):
event_index = Page.objects.get(url_path='/home/events/')
# Delete the content type
event_index_content_type = event_index.content_type
event_index_content_type.delete()
# Fetch the page again, it should still exist
event_index = Page.objects.get(url_path='/home/events/')
# Check that the content_type changed to Page
self.assertEqual(event_index.content_type, ContentType.objects.get_for_model(Page))
class TestMakePreviewRequest(TestCase):
fixtures = ['test.json']
def test_make_preview_request_for_accessible_page(self):
event_index = Page.objects.get(url_path='/home/events/')
response = event_index.make_preview_request()
self.assertEqual(response.status_code, 200)
request = response.context_data['request']
# request should have the correct path and hostname for this page
self.assertEqual(request.path, '/events/')
self.assertEqual(request.META['HTTP_HOST'], 'localhost')
# check other env vars required by the WSGI spec
self.assertEqual(request.META['REQUEST_METHOD'], 'GET')
self.assertEqual(request.META['SCRIPT_NAME'], '')
self.assertEqual(request.META['PATH_INFO'], '/events/')
self.assertEqual(request.META['SERVER_NAME'], 'localhost')
self.assertEqual(request.META['SERVER_PORT'], 80)
self.assertEqual(request.META['SERVER_PROTOCOL'], 'HTTP/1.1')
self.assertEqual(request.META['wsgi.version'], (1, 0))
self.assertEqual(request.META['wsgi.url_scheme'], 'http')
self.assertIn('wsgi.input', request.META)
self.assertIn('wsgi.errors', request.META)
self.assertIn('wsgi.multithread', request.META)
self.assertIn('wsgi.multiprocess', request.META)
self.assertIn('wsgi.run_once', request.META)
def test_make_preview_request_for_accessible_page_https(self):
Site.objects.update(port=443)
event_index = Page.objects.get(url_path='/home/events/')
response = event_index.make_preview_request()
self.assertEqual(response.status_code, 200)
request = response.context_data['request']
# request should have the correct path and hostname for this page
self.assertEqual(request.path, '/events/')
self.assertEqual(request.META['HTTP_HOST'], 'localhost')
# check other env vars required by the WSGI spec
self.assertEqual(request.META['REQUEST_METHOD'], 'GET')
self.assertEqual(request.META['SCRIPT_NAME'], '')
self.assertEqual(request.META['PATH_INFO'], '/events/')
self.assertEqual(request.META['SERVER_NAME'], 'localhost')
self.assertEqual(request.META['SERVER_PORT'], 443)
self.assertEqual(request.META['SERVER_PROTOCOL'], 'HTTP/1.1')
self.assertEqual(request.META['wsgi.version'], (1, 0))
self.assertEqual(request.META['wsgi.url_scheme'], 'https')
self.assertIn('wsgi.input', request.META)
self.assertIn('wsgi.errors', request.META)
self.assertIn('wsgi.multithread', request.META)
self.assertIn('wsgi.multiprocess', request.META)
self.assertIn('wsgi.run_once', request.META)
def test_make_preview_request_for_accessible_page_non_standard_port(self):
Site.objects.update(port=8888)
event_index = Page.objects.get(url_path='/home/events/')
response = event_index.make_preview_request()
self.assertEqual(response.status_code, 200)
request = response.context_data['request']
# request should have the correct path and hostname for this page
self.assertEqual(request.path, '/events/')
self.assertEqual(request.META['HTTP_HOST'], 'localhost:8888')
# check other env vars required by the WSGI spec
self.assertEqual(request.META['REQUEST_METHOD'], 'GET')
self.assertEqual(request.META['SCRIPT_NAME'], '')
self.assertEqual(request.META['PATH_INFO'], '/events/')
self.assertEqual(request.META['SERVER_NAME'], 'localhost')
self.assertEqual(request.META['SERVER_PORT'], 8888)
self.assertEqual(request.META['SERVER_PROTOCOL'], 'HTTP/1.1')
self.assertEqual(request.META['wsgi.version'], (1, 0))
self.assertEqual(request.META['wsgi.url_scheme'], 'http')
self.assertIn('wsgi.input', request.META)
self.assertIn('wsgi.errors', request.META)
self.assertIn('wsgi.multithread', request.META)
self.assertIn('wsgi.multiprocess', request.META)
self.assertIn('wsgi.run_once', request.META)
def test_make_preview_request_for_accessible_page_with_original_request(self):
event_index = Page.objects.get(url_path='/home/events/')
original_headers = {
'REMOTE_ADDR': '192.168.0.1',
'HTTP_X_FORWARDED_FOR': '192.168.0.2,192.168.0.3',
'HTTP_COOKIE': "test=1;blah=2",
'HTTP_USER_AGENT': "Test Agent",
'HTTP_AUTHORIZATION': "Basic V2FndGFpbDpXYWd0YWlsCg==",
}
factory = RequestFactory(**original_headers)
original_request = factory.get('/home/events/')
response = event_index.make_preview_request(original_request)
self.assertEqual(response.status_code, 200)
request = response.context_data['request']
# request should have the all the special headers we set in original_request
self.assertEqual(request.META['REMOTE_ADDR'], original_request.META['REMOTE_ADDR'])
self.assertEqual(request.META['HTTP_X_FORWARDED_FOR'], original_request.META['HTTP_X_FORWARDED_FOR'])
self.assertEqual(request.META['HTTP_COOKIE'], original_request.META['HTTP_COOKIE'])
self.assertEqual(request.META['HTTP_USER_AGENT'], original_request.META['HTTP_USER_AGENT'])
self.assertEqual(request.META['HTTP_AUTHORIZATION'], original_request.META['HTTP_AUTHORIZATION'])
# check other env vars required by the WSGI spec
self.assertEqual(request.META['REQUEST_METHOD'], 'GET')
self.assertEqual(request.META['SCRIPT_NAME'], '')
self.assertEqual(request.META['PATH_INFO'], '/events/')
self.assertEqual(request.META['SERVER_NAME'], 'localhost')
self.assertEqual(request.META['SERVER_PORT'], 80)
self.assertEqual(request.META['SERVER_PROTOCOL'], 'HTTP/1.1')
self.assertEqual(request.META['wsgi.version'], (1, 0))
self.assertEqual(request.META['wsgi.url_scheme'], 'http')
self.assertIn('wsgi.input', request.META)
self.assertIn('wsgi.errors', request.META)
self.assertIn('wsgi.multithread', request.META)
self.assertIn('wsgi.multiprocess', request.META)
self.assertIn('wsgi.run_once', request.META)
@override_settings(ALLOWED_HOSTS=['production.example.com'])
def test_make_preview_request_for_inaccessible_page_should_use_valid_host(self):
root_page = Page.objects.get(url_path='/')
response = root_page.make_preview_request()
self.assertEqual(response.status_code, 200)
request = response.context_data['request']
# in the absence of an actual Site record where we can access this page,
# make_preview_request should still provide a hostname that Django's host header
# validation won't reject
self.assertEqual(request.META['HTTP_HOST'], 'production.example.com')
@override_settings(ALLOWED_HOSTS=['*'])
def test_make_preview_request_for_inaccessible_page_with_wildcard_allowed_hosts(self):
root_page = Page.objects.get(url_path='/')
response = root_page.make_preview_request()
self.assertEqual(response.status_code, 200)
request = response.context_data['request']
# '*' is not a valid hostname, so ensure that we replace it with something sensible
self.assertNotEqual(request.META['HTTP_HOST'], '*')
def test_is_previewable(self):
event_index = Page.objects.get(url_path='/home/events/')
stream_page = StreamPage(title='stream page', body=[('text', 'hello')])
event_index.add_child(instance=stream_page)
plain_stream_page = Page.objects.get(id=stream_page.id)
# StreamPage sets preview_modes to an empty list, so stream_page is not previewable
with self.assertNumQueries(0):
self.assertFalse(stream_page.is_previewable())
# is_previewable should also cope with being called on a base Page object, at the
# cost of an extra query to access the specific object
with self.assertNumQueries(1):
self.assertFalse(plain_stream_page.is_previewable())
# event_index is a plain Page object, but we should recognise that preview_modes
# has not been overridden on EventIndexPage and avoid the extra query
with self.assertNumQueries(0):
self.assertTrue(event_index.is_previewable())
class TestShowInMenusDefaultOption(TestCase):
"""
This tests that a page model can define the default for 'show_in_menus'
"""
fixtures = ['test.json']
def test_show_in_menus_default(self):
# Create a page that does not have the default init
page = Page(
title='My Awesome Page', slug='my-awesome-page')
# Check that the page instance creates with show_in_menu as False
self.assertFalse(page.show_in_menus)
def test_show_in_menus_default_override(self):
# Create a page that does have the default init
page = AlwaysShowInMenusPage(
title='My Awesome Page', slug='my-awesome-page')
# Check that the page instance creates with show_in_menu as True
self.assertTrue(page.show_in_menus)
class TestPageWithContentJSON(TestCase):
fixtures = ['test.json']
def test_with_content_json_preserves_values(self):
original_page = SimplePage.objects.get(url_path='/home/about-us/')
eventpage_content_type = ContentType.objects.get_for_model(EventPage)
# Take a json representation of the page and update it
# with some alternative values
content = json.loads(original_page.to_json())
content.update(
title='About them',
draft_title='About them',
slug='about-them',
url_path='/home/some-section/about-them/',
pk=original_page.pk + 999,
numchild=original_page.numchild + 999,
depth=original_page.depth + 999,
path=original_page.path + 'ABCDEF',
content='<p>They are not as good</p>',
first_published_at="2000-01-01T00:00:00Z",
last_published_at="2000-01-01T00:00:00Z",
live=not original_page.live,
locked=True,
locked_by=1,
locked_at="2000-01-01T00:00:00Z",
has_unpublished_changes=not original_page.has_unpublished_changes,
content_type=eventpage_content_type.id,
show_in_menus=not original_page.show_in_menus,
owner=1
)
# Convert values back to json and pass them to with_content_json()
# to get an updated version of the page
content_json = json.dumps(content)
updated_page = original_page.with_content_json(content_json)
# The following attributes values should have changed
for attr_name in ('title', 'slug', 'content', 'url_path', 'show_in_menus'):
self.assertNotEqual(
getattr(original_page, attr_name),
getattr(updated_page, attr_name)
)
# The following attribute values should have been preserved,
# despite new values being provided in content_json
for attr_name in (
'pk', 'path', 'depth', 'numchild', 'content_type', 'draft_title',
'live', 'has_unpublished_changes', 'owner', 'locked', 'locked_by', 'locked_at',
'latest_revision_created_at', 'first_published_at',
):
self.assertEqual(
getattr(original_page, attr_name),
getattr(updated_page, attr_name)
)
# The url_path should reflect the new slug value, but the
# rest of the path should have remained unchanged
self.assertEqual(updated_page.url_path, '/home/about-them/')
class TestUnpublish(TestCase):
fixtures = ['test.json']
def test_unpublish_doesnt_call_full_clean_before_save(self):
root_page = Page.objects.get(id=1)
home_page = root_page.add_child(
instance=SimplePage(title="Homepage", slug="home2", content="hello")
)
# Empty the content - bypassing validation which would otherwise prevent it
home_page.save(clean=False)
# This shouldn't fail with a ValidationError.
home_page.unpublish()
def test_unpublish_also_unpublishes_aliases(self):
event_page = EventPage.objects.get(url_path='/home/events/christmas/')
alias = event_page.create_alias(update_slug='new-event-page')
alias_alias = alias.create_alias(update_slug='new-event-page-2')
self.assertTrue(event_page.live)
self.assertTrue(alias.live)
self.assertTrue(alias_alias.live)
PageLogEntry.objects.all().delete()
# Unpublish the event page
event_page.unpublish()
alias.refresh_from_db()
alias_alias.refresh_from_db()
self.assertFalse(event_page.live)
self.assertFalse(alias.live)
self.assertFalse(alias_alias.live)
# Check log entries were created for the aliases
self.assertTrue(PageLogEntry.objects.filter(page=alias, action='wagtail.unpublish').exists())
self.assertTrue(PageLogEntry.objects.filter(page=alias_alias, action='wagtail.unpublish').exists())
class TestCachedContentType(TestCase):
"""Tests for Page.cached_content_type"""
def setUp(self):
root_page = Page.objects.first()
self.page = root_page.add_child(
instance=SimplePage(title="Test1", slug="test1", content="test")
)
self.specific_page_ctype = ContentType.objects.get_for_model(SimplePage)
def test_golden_path(self):
"""
The return value should match the value you'd get
if fetching the ContentType from the database,
and shouldn't trigger any database queries when
the ContentType is already in memory.
"""
with self.assertNumQueries(0):
result = self.page.cached_content_type
self.assertEqual(
result, ContentType.objects.get(id=self.page.content_type_id)
)
class TestGetTranslatableModels(TestCase):
def test_get_translatable_models(self):
translatable_models = get_translatable_models()
# Only root translatable models should be included by default
self.assertNotIn(EventPage, translatable_models)
self.assertIn(Page, translatable_models)
self.assertIn(EventPageSpeaker, translatable_models)
self.assertNotIn(Site, translatable_models)
self.assertNotIn(Advert, translatable_models)
def test_get_translatable_models_include_subclasses(self):
translatable_models = get_translatable_models(include_subclasses=True)
self.assertIn(EventPage, translatable_models)
self.assertIn(Page, translatable_models)
self.assertIn(EventPageSpeaker, translatable_models)
self.assertNotIn(Site, translatable_models)
self.assertNotIn(Advert, translatable_models)
class TestDefaultLocale(TestCase):
def setUp(self):
self.root_page = Page.objects.first()
def test_default_locale(self):
page = self.root_page.add_child(
instance=SimplePage(title="Test1", slug="test1", content="test")
)
self.assertEqual(page.locale, self.root_page.locale)
def test_override_default_locale(self):
fr_locale = Locale.objects.create(language_code="fr")
page = self.root_page.add_child(
instance=SimplePage(title="Test1", slug="test1", content="test", locale=fr_locale)
)
self.assertEqual(page.locale, fr_locale)
def test_always_defaults_to_parent_locale(self):
fr_locale = Locale.objects.create(language_code="fr")
fr_page = self.root_page.add_child(
instance=SimplePage(title="Test1", slug="test1", content="test", locale=fr_locale)
)
page = fr_page.add_child(
instance=SimplePage(title="Test1", slug="test1", content="test")
)
self.assertEqual(page.locale, fr_locale)
class TestLocalized(TestCase):
fixtures = ['test.json']
def setUp(self):
self.fr_locale = Locale.objects.create(language_code="fr")
self.event_page = Page.objects.get(url_path='/home/events/christmas/')
self.fr_event_page = self.event_page.copy_for_translation(self.fr_locale, copy_parents=True)
self.fr_event_page.title = 'Noël'
self.fr_event_page.save(update_fields=['title'])
self.fr_event_page.save_revision().publish()
def test_localized_same_language(self):
self.assertEqual(self.event_page.localized, self.event_page)
self.assertEqual(self.event_page.localized_draft, self.event_page)
def test_localized_different_language(self):
with translation.override("fr"):
self.assertEqual(self.event_page.localized, self.fr_event_page.page_ptr)
self.assertEqual(self.event_page.localized_draft, self.fr_event_page.page_ptr)
def test_localized_different_language_unpublished(self):
# We shouldn't autolocalize if the translation is unpublished
self.fr_event_page.unpublish()
self.fr_event_page.save()
with translation.override("fr"):
self.assertEqual(self.event_page.localized, self.event_page)
self.assertEqual(self.event_page.localized_draft, self.fr_event_page.page_ptr)
def test_localized_with_non_content_active_locale(self):
# if active locale does not have a Locale record, use default locale
with translation.override("de"):
self.assertEqual(self.event_page.localized, self.event_page)
self.assertEqual(self.fr_event_page.localized, self.event_page.specific)
self.assertEqual(self.event_page.localized_draft, self.event_page)
self.assertEqual(self.fr_event_page.localized_draft, self.event_page.specific)
def test_localized_with_missing_default_locale(self):
# if neither active locale nor default language code have a Locale record, return self
# Change the 'en' locale to 'pl', so that no locale record for LANGUAGE_CODE exists.
# This replicates a scenario where a site was originally built with LANGUAGE_CODE='pl'
# but subsequently changed to LANGUAGE_CODE='en' (a change which was not reflected in
# the database).
en_locale = Locale.objects.get(language_code="en")
en_locale.language_code = "pl"
en_locale.save()
with translation.override("de"):
self.assertEqual(self.event_page.localized, self.event_page)
self.assertEqual(self.fr_event_page.localized, self.fr_event_page)
self.assertEqual(self.event_page.localized_draft, self.event_page)
self.assertEqual(self.fr_event_page.localized_draft, self.fr_event_page)
| torchbox/wagtail | wagtail/core/tests/test_page_model.py | Python | bsd-3-clause | 137,322 | [
"MOOSE"
] | 26ab83435b1fd1dab72f4d0321d724b895d16fa9428c0f5ad14c536153cccf2e |
#-----------------------------------------------------------------
# _ast_gen.py
#
# Generates the AST Node classes from a specification given in
# a configuration file
#
# The design of this module was inspired by astgen.py from the
# Python 2.5 code-base.
#
# Eli Bendersky [http://eli.thegreenplace.net]
# License: BSD
#-----------------------------------------------------------------
import pprint
from string import Template
class ASTCodeGenerator(object):
def __init__(self, cfg_filename='_c_ast.cfg'):
""" Initialize the code generator from a configuration
file.
"""
self.cfg_filename = cfg_filename
self.node_cfg = [NodeCfg(name, contents)
for (name, contents) in self.parse_cfgfile(cfg_filename)]
def generate(self, file=None):
""" Generates the code into file, an open file buffer.
"""
src = Template(_PROLOGUE_COMMENT).substitute(
cfg_filename=self.cfg_filename)
src += _PROLOGUE_CODE
for node_cfg in self.node_cfg:
src += node_cfg.generate_source() + '\n\n'
file.write(src)
def parse_cfgfile(self, filename):
""" Parse the configuration file and yield pairs of
(name, contents) for each node.
"""
with open(filename, "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
colon_i = line.find(':')
lbracket_i = line.find('[')
rbracket_i = line.find(']')
if colon_i < 1 or lbracket_i <= colon_i or rbracket_i <= lbracket_i:
raise RuntimeError("Invalid line in %s:\n%s\n" % (filename, line))
name = line[:colon_i]
val = line[lbracket_i + 1:rbracket_i]
vallist = [v.strip() for v in val.split(',')] if val else []
yield name, vallist
class NodeCfg(object):
""" Node configuration.
name: node name
contents: a list of contents - attributes and child nodes
See comment at the top of the configuration file for details.
"""
def __init__(self, name, contents):
self.name = name
self.all_entries = []
self.attr = []
self.child = []
self.seq_child = []
for entry in contents:
clean_entry = entry.rstrip('*')
self.all_entries.append(clean_entry)
if entry.endswith('**'):
self.seq_child.append(clean_entry)
elif entry.endswith('*'):
self.child.append(clean_entry)
else:
self.attr.append(entry)
def generate_source(self):
src = self._gen_init()
src += '\n' + self._gen_children()
src += '\n' + self._gen_attr_names()
return src
def _gen_init(self):
src = "class %s(Node):\n" % self.name
if self.all_entries:
args = ', '.join(self.all_entries)
slots = ', '.join("'{0}'".format(e) for e in self.all_entries)
slots += ", 'coord', '__weakref__'"
arglist = '(self, %s, coord=None)' % args
else:
slots = "'coord', '__weakref__'"
arglist = '(self, coord=None)'
src += " __slots__ = (%s)\n" % slots
src += " def __init__%s:\n" % arglist
for name in self.all_entries + ['coord']:
src += " self.%s = %s\n" % (name, name)
return src
def _gen_children(self):
src = ' def children(self):\n'
if self.all_entries:
src += ' nodelist = []\n'
for child in self.child:
src += (
' if self.%(child)s is not None:' +
' nodelist.append(("%(child)s", self.%(child)s))\n') % (
dict(child=child))
for seq_child in self.seq_child:
src += (
' for i, child in enumerate(self.%(child)s or []):\n'
' nodelist.append(("%(child)s[%%d]" %% i, child))\n') % (
dict(child=seq_child))
src += ' return tuple(nodelist)\n'
else:
src += ' return ()\n'
return src
def _gen_attr_names(self):
src = " attr_names = (" + ''.join("%r, " % nm for nm in self.attr) + ')'
return src
_PROLOGUE_COMMENT = \
r'''#-----------------------------------------------------------------
# ** ATTENTION **
# This code was automatically generated from the file:
# $cfg_filename
#
# Do not modify it directly. Modify the configuration file and
# run the generator again.
# ** ** *** ** **
#
# pycparser: c_ast.py
#
# AST Node classes.
#
# Eli Bendersky [http://eli.thegreenplace.net]
# License: BSD
#-----------------------------------------------------------------
'''
_PROLOGUE_CODE = r'''
import sys
class Node(object):
__slots__ = ()
""" Abstract base class for AST nodes.
"""
def children(self):
""" A sequence of all children that are Nodes
"""
pass
def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None):
""" Pretty print the Node and all its attributes and
children (recursively) to a buffer.
buf:
Open IO buffer into which the Node is printed.
offset:
Initial offset (amount of leading spaces)
attrnames:
True if you want to see the attribute names in
name=value pairs. False to only see the values.
nodenames:
True if you want to see the actual node names
within their parents.
showcoord:
Do you want the coordinates of each Node to be
displayed.
"""
lead = ' ' * offset
if nodenames and _my_node_name is not None:
buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ')
else:
buf.write(lead + self.__class__.__name__+ ': ')
if self.attr_names:
if attrnames:
nvlist = [(n, getattr(self,n)) for n in self.attr_names]
attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
else:
vlist = [getattr(self, n) for n in self.attr_names]
attrstr = ', '.join('%s' % v for v in vlist)
buf.write(attrstr)
if showcoord:
buf.write(' (at %s)' % self.coord)
buf.write('\n')
for (child_name, child) in self.children():
child.show(
buf,
offset=offset + 2,
attrnames=attrnames,
nodenames=nodenames,
showcoord=showcoord,
_my_node_name=child_name)
class NodeVisitor(object):
""" A base NodeVisitor class for visiting c_ast nodes.
Subclass it and define your own visit_XXX methods, where
XXX is the class name you want to visit with these
methods.
For example:
class ConstantVisitor(NodeVisitor):
def __init__(self):
self.values = []
def visit_Constant(self, node):
self.values.append(node.value)
Creates a list of values of all the constant nodes
encountered below the given node. To use it:
cv = ConstantVisitor()
cv.visit(node)
Notes:
* generic_visit() will be called for AST nodes for which
no visit_XXX method was defined.
* The children of nodes for which a visit_XXX was
defined will not be visited - if you need this, call
generic_visit() on the node.
You can use:
NodeVisitor.generic_visit(self, node)
* Modeled after Python's own AST visiting facilities
(the ast module of Python 3.0)
"""
def visit(self, node):
""" Visit a node.
"""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
for c_name, c in node.children():
self.visit(c)
'''
if __name__ == "__main__":
import sys
ast_gen = ASTCodeGenerator('_c_ast.cfg')
ast_gen.generate(open('c_ast.py', 'w'))
| Widiot/simpleblog | venv/lib/python3.5/site-packages/pycparser/_ast_gen.py | Python | mit | 8,675 | [
"VisIt"
] | e6e25129ee9b7c193262093ebfdadbff6e5eba071205787d4d3a4dc4878da210 |
"""
Test Display Options
"""
__RCSID__ = "$Id$"
import unittest
import thread
from DIRAC.FrameworkSystem.private.standardLogging.test.TestLoggingBase import Test_Logging, gLogger, cleaningLog
class Test_DisplayOptions(Test_Logging):
"""
Test the creation of subloggers and their properties
"""
def setUp(self):
super(Test_DisplayOptions, self).setUp()
self.filename = '/tmp/logtmp.log'
with open(self.filename, "w"):
pass
def test_00setShowHeaders(self):
"""
Set the headers
"""
gLogger.showHeaders(False)
gLogger.notice('message', 'varmessage')
self.assertEqual("message varmessage\n", self.buffer.getvalue())
self.buffer.truncate(0)
gLogger.showHeaders(True)
gLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual("UTCFrameworkNOTICE:message\n", logstring1)
self.buffer.truncate(0)
def test_01setShowThreadIDs(self):
"""
Set the thread ID
"""
gLogger.showThreadIDs(False)
gLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual("UTCFrameworkNOTICE:message\n", logstring1)
self.buffer.truncate(0)
gLogger.showThreadIDs(True)
gLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertIn(str(thread.get_ident()), logstring1)
self.buffer.truncate(0)
def test_02setShowThreadIDsHeaders(self):
"""
Create a subsubsublogger and create a logrecord
"""
gLogger.showHeaders(False)
gLogger.showThreadIDs(False)
gLogger.notice('message')
self.assertEqual("message\n", self.buffer.getvalue())
self.buffer.truncate(0)
gLogger.showHeaders(False)
gLogger.showThreadIDs(True)
gLogger.notice('message')
self.assertEqual("message\n", self.buffer.getvalue())
self.buffer.truncate(0)
gLogger.showHeaders(True)
gLogger.showThreadIDs(False)
gLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual("UTCFrameworkNOTICE:message\n", logstring1)
self.buffer.truncate(0)
gLogger.showHeaders(True)
gLogger.showThreadIDs(True)
gLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertIn(str(thread.get_ident()), logstring1)
self.buffer.truncate(0)
def test_03setSubLogShowHeaders(self):
"""
Create a sublogger and set it its own Header option.
"""
sublog = gLogger.getSubLogger('sublog')
sublog.setLevel('notice')
sublog.showHeaders(False)
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message\n")
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublogNOTICE:message\n")
def test_04SubLogShowHeadersChange(self):
"""
Create a sublogger and show that its Header option follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog2')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(False)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message\n")
self.assertEqual(self.buffer.getvalue(), "message\n")
def test_05setSubLoggLoggerShowHeaders(self):
"""
Create a sublogger, set its Header option and the Header option of the gLogger.
Show that its Header option do not follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog3')
sublog.setLevel('notice')
sublog.showHeaders(False)
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(True)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message\n")
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublog3NOTICE:message\n")
def test_06setSubLoggLoggerShowHeadersInverse(self):
"""
Create a sublogger, set the Header option of the gLogger and its Header option.
Show that the gLogger Header option do not follow the change of its child Header option.
"""
sublog = gLogger.getSubLogger('sublog4')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(True)
sublog.showHeaders(False)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message\n")
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublog4NOTICE:message\n")
def test_07subLogShowHeadersChange(self):
"""
Create a subsublogger and show that its Header option follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog5')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
subsublog = sublog.getSubLogger('subsublog')
subsublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(False)
subsublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message\nmessage\n")
self.assertEqual(self.buffer.getvalue(), "message\n")
def test_07subLogShowHeadersChangeSetSubLogger(self):
"""
Create a subsublogger and show that its Header option follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog6')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
subsublog = sublog.getSubLogger('subsublog')
subsublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
sublog.showHeaders(False)
subsublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message\nmessage\n")
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublog6/subsublogNOTICE:message\n")
def test_09subLogShowHeadersChangeSetSubLogger(self):
"""
Create a subsublogger and set its Header option and show that
its Header option do not follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog7')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
subsublog = sublog.getSubLogger('subsublog')
subsublog.registerBackends(['file'], {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
sublog.showHeaders(False)
subsublog.showHeaders(True)
subsublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertIn("UTC Framework/sublog7/subsublog NOTICE: message\nmessage\n", message)
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublog7/subsublogNOTICE:message\n")
def test_10gLoggerShowHeadersChange2Times(self):
"""
Create a sublogger with a file backend and change the Header option of gLogger 2 times
in order to verify the propagation.
"""
sublog = gLogger.getSubLogger('sublog8')
sublog.registerBackends(['file'], {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(False)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual("message\n", message)
gLogger.showHeaders(True)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertIn("UTC Framework/sublog8 NOTICE: message\n", message)
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test_DisplayOptions)
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
| chaen/DIRAC | FrameworkSystem/private/standardLogging/test/Test_DisplayOptions.py | Python | gpl-3.0 | 9,182 | [
"DIRAC"
] | 0bca2175b2eb282e96250288a156eb8f326fefa599b0971e73d74e930a8b09ce |
from asap3 import *
from ase.lattice.cubic import FaceCenteredCubic
from asap3.testtools import ReportTest
atoms = FaceCenteredCubic(size=(5,5,5), symbol="Cu")
pot = EMT()
atoms.set_calculator(pot)
for i in (1, 2):
print "*** Pass", i
ReportTest("Energy required", pot.calculation_required(atoms, ["energy"]), 1, 0)
ReportTest("Forces required", pot.calculation_required(atoms, ["forces"]), 1, 0)
ReportTest("Stress required", pot.calculation_required(atoms, ["stress"]), 1, 0)
ReportTest("Magmom required", pot.calculation_required(atoms, ["magmoms"]), 1, 0)
e = atoms.get_potential_energy()
ReportTest("Energy not required", pot.calculation_required(atoms, ["energy"]), 0, 0)
ReportTest("Forces required (II)", pot.calculation_required(atoms, ["forces"]), 1, 0)
f = atoms.get_forces()
ReportTest("Energy not required (II)", pot.calculation_required(atoms, ["energy"]), 0, 0)
ReportTest("Forces not required", pot.calculation_required(atoms, ["forces"]), 0, 0)
ReportTest("Energy or forces not required",
pot.calculation_required(atoms, ["energy", "forces"]), 0, 0)
ReportTest("Energy or stress required",
pot.calculation_required(atoms, ["energy", "stress"]), 1, 0)
s = atoms.get_stress()
ReportTest("Stress not required", pot.calculation_required(atoms, ["stress"]), 0, 0)
r = atoms.get_positions()
r[0,0] += 0.1
atoms.set_positions(r)
ReportTest.Summary()
| auag92/n2dm | Asap-3.8.4/Test/CalculationRequired.py | Python | mit | 1,467 | [
"ASE"
] | 2210897ccdeaed86efd7b824384440014254ab35b487d5851acb5301d3907c13 |
#!/usr/bin/env python
__author__ = "waroquiers"
import os
import shutil
import networkx as nx
from pymatgen.analysis.chemenv.connectivity.environment_nodes import (
EnvironmentNode,
get_environment_node,
)
from pymatgen.util.testing import PymatgenTest
try:
import bson # type: ignore # Ignore bson import for mypy
except ModuleNotFoundError:
bson = None
json_files_dir = os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"..",
"..",
"test_files",
"chemenv",
"json_test_files",
)
class EnvironmentNodesTest(PymatgenTest):
def test_equal(self):
s = self.get_structure("SiO2")
en = EnvironmentNode(central_site=s[0], i_central_site=0, ce_symbol="T:4")
en1 = EnvironmentNode(central_site=s[2], i_central_site=0, ce_symbol="T:4")
assert en == en1
assert not en.everything_equal(en1)
en2 = EnvironmentNode(central_site=s[0], i_central_site=3, ce_symbol="T:4")
assert en != en2
assert not en.everything_equal(en2)
en3 = EnvironmentNode(central_site=s[0], i_central_site=0, ce_symbol="O:6")
assert en == en3
assert not en.everything_equal(en3)
en4 = EnvironmentNode(central_site=s[0], i_central_site=0, ce_symbol="T:4")
assert en == en4
assert en.everything_equal(en4)
def test_as_dict(self):
s = self.get_structure("SiO2")
en = EnvironmentNode(central_site=s[2], i_central_site=2, ce_symbol="T:4")
en_from_dict = EnvironmentNode.from_dict(en.as_dict())
assert en.everything_equal(en_from_dict)
if bson is not None:
bson_data = bson.BSON.encode(en.as_dict())
en_from_bson = EnvironmentNode.from_dict(bson_data.decode())
assert en.everything_equal(en_from_bson)
def test_str(self):
s = self.get_structure("SiO2")
en = EnvironmentNode(central_site=s[2], i_central_site=2, ce_symbol="T:4")
assert str(en) == "Node #2 Si (T:4)"
if __name__ == "__main__":
import unittest
unittest.main()
| gmatteo/pymatgen | pymatgen/analysis/chemenv/connectivity/tests/test_environment_nodes.py | Python | mit | 2,091 | [
"pymatgen"
] | 0bdaf89461decb4e7fb22601e0a20fa87b252984448531428cfe55a28d1bc118 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from scipy.special import gamma
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
# When we plot Hist, Pmf and Cdf objects, they don't appear in
# the legend unless we override the default label.
DEFAULT_LABEL = '_nolegend_'
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else DEFAULT_LABEL
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s)' % (cls, str(self.d))
else:
return self.label
def __repr__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s)' % (cls, repr(self.d))
else:
return '%s(%s, %s)' % (cls, repr(self.d), repr(self.label))
def __eq__(self, other):
try:
return self.d == other.d
except AttributeError:
return False
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def SortedItems(self):
"""Gets a sorted sequence of (value, freq/prob) pairs.
It items are unsortable, the result is unsorted.
"""
def isnan(x):
try:
return math.isnan(x)
except TypeError:
return False
if any([isnan(x) for x in self.Values()]):
msg = 'Keys contain NaN, may not sort correctly.'
logging.warning(msg)
try:
return sorted(self.d.items())
except TypeError:
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
return zip(*self.SortedItems())
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in self.SortedItems():
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def ProbEqual(self, x):
"""Probability that a sample from this Pmf is exactly x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbEqual(self, x)
else:
return self[x]
# NOTE: I've decided to remove the magic comparators because they
# have the side-effect of making Pmf sortable, but in fact they
# don't support sorting.
def Normalize(self, fraction=1):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0:
raise ValueError('Normalize: total probability is zero.')
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
return self.MakeCdf().Sample(n)
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
return sum(p * x for x, p in self.Items())
def Median(self):
"""Computes the median of a PMF.
Returns:
float median
"""
return self.MakeCdf().Percentile(50)
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
return sum(p * (x-mu)**2 for x, p in self.Items())
def Expect(self, func):
"""Computes the expectation of func(x).
Returns:
expectation
"""
return np.sum(p * func(x) for x, p in self.Items())
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def Mode(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
# The mode of a posterior is the maximum aposteori probability (MAP)
MAP = Mode
# If the distribution contains likelihoods only, the peak is the
# maximum likelihood estimator.
MaximumLikelihood = Mode
def MaximumLikelihood(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
prob, val = max((prob, val) for val, prob in self.Items())
return val
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
__radd__ = __add__
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf[v1 + v2] += p1 * p2
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
if other == 0:
return self.Copy()
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
cdf.ps **= k
return cdf
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix[x] += p1 * p2
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf:
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else DEFAULT_LABEL
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s, %s)' % (cls, str(self.xs), str(self.ps))
else:
return self.label
def __repr__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s, %s)' % (cls, str(self.xs), str(self.ps))
else:
return '%s(%s, %s, %s)' % (cls, str(self.xs), str(self.ps),
repr(self.label))
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in zip(self.xs, self.ps):
print(val, prob)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def Values(self, ps=None):
"""Returns InverseCDF(p), the value that corresponds to probability p.
If ps is not provided, returns all values.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
if ps is None:
return self.xs
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
ValueArray = Values
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100)
def Percentiles(self, ps):
"""Returns the value that corresponds to percentiles ps.
Args:
ps: numbers in the range [0, 100]
Returns:
array of values
"""
ps = np.asarray(ps)
return self.Values(ps / 100)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100
def PercentileRanks(self, xs):
"""Returns the percentile ranks of the values in xs.
xs: potential value in the CDF
returns: array of percentile ranks in the range 0 to 100
"""
return self.Probs(x) * 100
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def Sample(self, n):
"""Generates a random sample from the estimated Pdf.
n: size of sample
"""
# NOTE: we have to flatten because resample returns a 2-D
# array for some reason.
return self.kde.resample(n).flatten()
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def MakeBinomialPmf(n, p):
"""Evaluates the binomial PMF.
Returns the distribution of successes in n trials with probability p.
"""
pmf = Pmf()
for k in range(n+1):
pmf[k] = stats.binom.pmf(k, n, p)
return pmf
def EvalGammaPdf(lam, a):
"""Computes the Gamma PDF.
lam: where to evaluate the PDF
a: parameter of the gamma distribution
returns: float probability
"""
return lam**(a-1) * math.exp(-lam) / gamma(a)
def MakeGammaPmf(lams, a):
"""Makes a PMF discrete approx to a Gamma distribution.
lam: parameter lambda in events per unit time
xs: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for lam in lams:
pmf[lam] = EvalGammaPdf(lam, a)
pmf.Normalize()
return pmf
def EvalGeometricPmf(k, p, loc=0):
"""Evaluates the geometric PMF.
With loc=0: Probability of `k` trials to get one success.
With loc=-1: Probability of `k` trials before first success.
k: number of trials
p: probability of success on each trial
"""
return stats.geom.pmf(k, p, loc=loc)
def MakeGeometricPmf(p, loc=0, high=10):
"""Evaluates the binomial PMF.
With loc=0: PMF of trials to get one success.
With loc=-1: PMF of trials before first success.
p: probability of success
high: upper bound where PMF is truncated
"""
pmf = Pmf()
for k in range(high):
pmf[k] = stats.geom.pmf(k, p, loc=loc)
pmf.Normalize()
return pmf
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
return stats.poisson.pmf(k, lam)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = stats.poisson.pmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalWeibullPdf(x, lam, k):
"""Computes the Weibull PDF.
x: value
lam: parameter lambda in events per unit time
k: parameter
returns: float probability density
"""
arg = (x / lam)
return k / lam * arg**(k-1) * np.exp(-arg**k)
def EvalWeibullCdf(x, lam, k):
"""Evaluates CDF of the Weibull distribution."""
arg = (x / lam)
return 1 - np.exp(-arg**k)
def MakeWeibullPmf(lam, k, high, n=200):
"""Makes a PMF discrete approx to a Weibull distribution.
lam: parameter lambda in events per unit time
k: parameter
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
xs = np.linspace(0, high, n)
ps = EvalWeibullPdf(xs, lam, k)
return Pmf(dict(zip(xs, ps)))
def EvalParetoPdf(x, xm, alpha):
"""Computes the Pareto.
xm: minimum value (scale parameter)
alpha: shape parameter
returns: float probability density
"""
return stats.pareto.pdf(x, alpha, scale=xm)
def MakeParetoPmf(xm, alpha, high, num=101):
"""Makes a PMF discrete approx to a Pareto distribution.
xm: minimum value (scale parameter)
alpha: shape parameter
high: upper bound value
num: number of values
returns: normalized Pmf
"""
xs = np.linspace(xm, high, num)
ps = stats.pareto.pdf(xs, alpha, scale=xm)
pmf = Pmf(dict(zip(xs, ps)))
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta:
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def MAP(self):
"""Computes the value with maximum a posteori probability."""
a = self.alpha - 1
b = self.beta - 1
return a / (a + b)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
The result is a little funny, because the values at 0 and 1
are not symmetric. Nevertheless, it is a reasonable discrete
model of the continuous distribution, and behaves well as
the number of values increases.
"""
if label is None and self.label is not None:
label = self.label
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = special.betainc(self.alpha, self.beta, xs)
cdf = Cdf(xs, ps)
return cdf
def Percentile(self, ps):
"""Returns the given percentiles from this distribution.
ps: scalar, array, or list of [0-100]
"""
ps = np.asarray(ps) / 100
xs = special.betaincinv(self.alpha, self.beta, ps)
return xs
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
Jitter(抖动)
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
# lidong change
# return np.random.normal(0, jitter, n) + values
return np.random.uniform(-jitter, jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float, double=float)
var_info = []
with open(dct_file, **options) as f:
for line in f:
match = re.search( r'_column\(([^)]*)\)', line)
if not match:
continue
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column]
cdf = Cdf(dict(weights))
indices = cdf.Sample(len(weights))
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
| qrsforever/workspace | python/learn/thinkbayes/thinkbayes2.py | Python | mit | 75,544 | [
"Gaussian"
] | e0fdb6b15c11c354538fc8c44319bb9f8331a18e4d7a13e90dd48952967d41b9 |
'''
Pulse characterization
Created Fri May 12 2017
@author: cpkmanchee
'''
import numpy as np
import os.path
import inspect
from beamtools.constants import h,c,pi
from beamtools.common import normalize, gaussian, sech2, alias_dict
from beamtools.import_data_file import import_data_file as _import
from beamtools.import_data_file import objdict
from scipy.optimize import curve_fit
__all__ = ['spectrumFT', 'fit_ac', 'ac_x2t', 'sigma_fwhm']
class FitResult():
def __init__(self, ffunc, ftype, popt, pcov=0, indep_var='time'):
self.ffunc = ffunc
self.ftype = ftype
self.popt = popt
self.pcov = pcov
self.iv=indep_var
def subs(self,x):
return self.ffunc(x,*self.popt)
def get_args(self):
return inspect.getargspec(self.ffunc)
def spectrumFT(data,from_file = False, file_type='oo_spec', units_wl='nm', n_interp=0):
'''Compute transform limited pulse from spectrum.
data = wavelength vs. PSD (intensity) if from_file=False
= filename of spectrum file to be imported if from_file=True
Units assumed to be nm for wavelength.
If from_file is set True, data should be filename
Optional file_format, default is oceanoptics_spectrometer. Currently
can not change this (filetype handling for x/y).
n_interp = bit depth of frequency interpolation, n = 2**n_interp. 0 = auto
'''
if from_file:
if type(data) is str:
if not os.path.exists(data):
print('File does not exist')
return -1
imported_data = _import(data,file_type)
#insert testing for wavelength/intensity location in dataobject
wavelength = imported_data.wavelength
intensity = imported_data.intensity
#get units from dataobject
else:
print('invalid filetype')
return -1
else:
wavelength = data[0]
intensity = data[1]
imported_data = data
if n_interp == 0:
#insert here later - round up to nearest power of two.
n = 2**12
else:
n = 2**12
#use units to convert wavelength to SI
wl = wavelength*1E-9
psd = normalize(intensity)
nu = c/wl #nu is SI
#interpolate psd, linear freq spacing
nui = np.linspace(min(nu),max(nu),n)
df = (max(nu)-min(nu))/(n-1)
psdi = normalize(np.interp(nui,np.flipud(nu),np.flipud(psd)))
#i = (np.abs(nui-nu0)).argmin() #centre freq index
#perform FT-1, remove centre spike
t = np.fft.ifftshift(np.fft.fftfreq(n,df)[1:-1])
ac =np.fft.ifftshift((np.fft.ifft(np.fft.ifftshift(psdi)))[1:-1])
output_dict = {'time': t, 'ac': ac, 'nu': nui, 'psd': psdi}
output = objdict(output_dict)
return output, imported_data
def ac_x2t(position,aoi=15,config='sym'):
'''Convert autocorrelation position to time
Symmetric - stage moves perp to normal.
Asymmetric - stage moves along incoming optical axis
'''
if type(config) is not str:
print('Unrecognized configuration. Must be symmetric or asymmetric.')
return position
if config.lower() in alias_dict['symmetric']:
time = (1/c)*position*2*np.cos(aoi*pi/180)
elif config.lower() in alias_dict['asymmetric']:
time = (1/c)*position*(1+np.cos(2*aoi*pi/180))
else:
print('Unrecognized configuration. Must be symmetric or asymmetric.')
return position
return time
def fit_ac(data, from_file = False, file_type='bt_ac', form='all', bgform = 'constant'):
'''Fit autocorrelation peak.
data must be either:
1. 2 x n array - data[0] = time(delay), data[1] = intensity
2. datafile name --> from_file must be True
If there is no 'delay' parameter in data file (only position), the position is
auto converted to time delay.
'''
if from_file:
if type(data) is str:
if not os.path.exists(data):
print('File does not exist')
return -1
imported_data = _import(data,file_type)
#insert testing for power location in dataobject
position = imported_data.position
intensity = imported_data.power
if 'delay' in imported_data.__dict__:
delay = imported_data.delay
else:
delay = ac_x2t(position,aoi=15,config='sym')
#get units from dataobject
else:
print('invalid filetype')
return -1
else:
imported_data = data
delay = data[0]
intensity = data[1]
x = delay
y = intensity
bgpar, bgform = _background(x,y,form = bgform)
mean = np.average(x,weights = y)
stdv = np.sqrt(np.average((x-mean)**2 ,weights = y))
#set fitting function (including background)
if bgform is None:
def fitfuncGaus(x,sigma,a,x0):
return gaussian(x,sigma,a,x0)
def fitfuncSech2(x,sigma,a,x0):
return sech2(x,sigma,a,x0)
if bgform.lower() in alias_dict['constant']:
def fitfuncGaus(x,sigma,a,x0,p0):
return gaussian(x,sigma,a,x0) + p0
def fitfuncSech2(x,sigma,a,x0,p0):
return sech2(x,sigma,a,x0) + p0
elif bgform.lower() in alias_dict['linear']:
def fitfuncGaus(x,sigma,a,x0,p0,p1):
return gaussian(x,sigma,a,x0) + p1*x + p0
def fitfuncSech2(x,sigma,a,x0,p0,p1):
return sech2(x,sigma,a,x0) + p1*x + p0
elif bgform.lower() in alias_dict['quadratic']:
def fitfuncGaus(x,sigma,a,x0,p0,p1,p2):
return gaussian(x,sigma,a,x0) + p2*x**2 + p1*x + p0
def fitfuncSech2(x,sigma,a,x0,p0,p1,p2):
return sech2(x,sigma,a,x0) + p2*x**2 + p1*x + p0
else:
def fitfuncGaus(x,sigma,a,x0):
return gaussian(x,sigma,a,x0)
def fitfuncSech2(x,sigma,a,x0):
return sech2(x,sigma,a,x0)
nFitArgs = len(inspect.getargspec(fitfuncGaus).args) - 1
#sets which functions are to be fit... this can be streamlined i think
if form.lower() in ['both', 'all']:
fitGaus = True
fitSech2 = True
elif form.lower() in alias_dict['gaus']:
fitGaus = True
fitSech2 = False
elif form.lower() in alias_dict['sech2']:
fitGaus = False
fitSech2 = True
else:
print('Unknown fit form: '+form[0])
fitGaus = False
fitSech2 = False
#start fitting
popt=[]
pcov=[]
fit_results=[]
if type(bgpar) is np.float64:
p0=[stdv,max(y)-min(y),mean,bgpar]
elif type(bgpar) is np.ndarray:
p0=[stdv,max(y)-min(y),mean]+bgpar.tolist()
else:
p0=None
if fitGaus:
try:
poptGaus,pcovGaus = curve_fit(fitfuncGaus,x,y,p0)
except RuntimeError:
poptGaus = np.zeros(nFitArgs)
pcovGaus = np.zeros((nFitArgs,nFitArgs))
popt.append(poptGaus)
pcov.append(pcovGaus)
fit_results.append(FitResult(ffunc=fitfuncGaus, ftype='gaussian',
popt=poptGaus, pcov=pcovGaus))
if fitSech2:
try:
poptSech2,pcovSech2 = curve_fit(fitfuncSech2,x,y,p0)
except RuntimeError:
poptSech2 = np.zeros(nFitArgs)
pcovSech2 = np.zeros((nFitArgs,nFitArgs))
popt.append(poptSech2)
pcov.append(pcovSech2)
fit_results.append(FitResult(ffunc=fitfuncSech2, ftype='sech2',
popt=poptSech2, pcov=pcovSech2))
return fit_results, imported_data
def sigma_fwhm(sigma, shape='gaus'):
'''Convert sigma to full-width half-max
'''
if shape.lower() in alias_dict['gaus']:
A = 2*np.sqrt(2*np.log(2))
elif shape.lower() in alias_dict['sech2']:
A = 2*np.arccosh(np.sqrt(2))
else:
A = 1
return A*sigma
def _background(x,y,form = 'constant'):
'''Provides starting values for background parameters.
Takes x,y data and the desired background form (default to constant)
returns p, the polynomial coefficients. p is variable in length.
'''
if form is None:
p = np.zeros((3))
if form.lower() in ['const','constant']:
p = min(y)
#p = np.hstack((p,[0,0]))
elif form.lower() in ['lin','linear']:
p = np.linalg.solve([[1,x[0]],[1,x[-1]]], [y[0],y[-1]])
#p = np.hstack((p,0))
elif form.lower() in ['quad','quadratic']:
index = np.argmin(y)
if index == 0:
x3 = 2*x[0]-x[-1]
y3 = y[-1]
elif index == len(y)-1:
x3 = 2*x[-1]-x[0]
y3 = y[0]
else:
x3 = x[index]
y3 = y[index]
a = [[1,x[0],x[0]**2],[1,x[-1],x[-1]**2],[1,x3,x3**2]]
b = [y[0],y[-1],y3]
p = np.linalg.solve(a,b)
else:
print('Unknown background form')
p = np.zeros((3))
return p, form
| kikimaroca/beamtools | build/lib/beamtools/pulse.py | Python | mit | 9,048 | [
"Gaussian"
] | 16c6fdea4bceb7528f29534324fc75f14954c68d888e87b5c1294660caac01b0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('demo.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^polls/', include('demo.polls.urls', namespace='polls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| gavmain/django_demo | config/urls.py | Python | mit | 1,665 | [
"VisIt"
] | 96c881e7c1bfca5261fe121e8b491b82a5a8bacbf8d987fd7c5baf8b9e796645 |
from __future__ import division
from subprocess import call
from matplotlib.ticker import AutoMinorLocator
from matplotlib.ticker import MultipleLocator
from matplotlib.ticker import FixedLocator
from matplotlib.ticker import LogLocator
from matplotlib.ticker import FormatStrFormatter
from sets import Set
import sys
import math
from collections import defaultdict
# matplotlib
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import matplotlib.patches as mpatches
from matplotlib.legend_handler import HandlerLine2D
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
# Stuff for calculating areas.
from scipy.integrate import simps
from scipy import interpolate
from scipy import optimize
from numpy import trapz
from matplotlib import gridspec
from matplotlib.cbook import get_sample_data
from matplotlib._png import read_png
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox
from scipy.stats import norm
from scipy.stats import gamma
from scipy import arange, array, exp
from scipy.stats import binned_statistic
mpl.rcParams['axes.linewidth'] = 5.0 #set the value globally
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
mpl.rcParams['text.latex.preamble'] = [r'\boldmath']
plt.rc('font', family='serif', size=43)
def generate_GOE_matrix(matrix_size):
random_matrix = np.random.randn(matrix_size, matrix_size)
transpose = np.transpose(random_matrix)
return ( random_matrix + transpose ) / 2
def generate_GUE_matrix(matrix_size):
random_matrix_real = np.matrix( np.random.randn(matrix_size, matrix_size) )
random_matrix_im = np.matrix( 1.j * np.random.randn(matrix_size, matrix_size) )
random_matrix = random_matrix_real + random_matrix_im
random_matrix_H = random_matrix.getH()
return ( random_matrix + random_matrix_H ) / 2
def generate_matrix_ensemble(matrix_size, number_of_matrices, ensemble_type):
random_matrices = []
for i in range(number_of_matrices):
if ensemble_type == "GOE":
random_matrix = generate_GOE_matrix(matrix_size)
elif ensemble_type == "GUE":
random_matrix = generate_GUE_matrix(matrix_size)
random_matrices.append( random_matrix )
return random_matrices
def compute_eigenvalues(matrices):
all_eigenvalues = []
for matrix in matrices:
eigenvalues = sorted( np.linalg.eigvalsh(matrix) )
all_eigenvalues.append( eigenvalues )
return all_eigenvalues
def calculate_eigenvalues_differences(matrices):
matrix_size = np.sqrt( matrices[0].size )
all_eigenvalues = compute_eigenvalues(matrices)
# flatten_eigenvalues = sorted( np.ndarray.flatten( np.array( all_eigenvalues ) ) )
# flatten_eigenvalues = sorted([eigenvalues[int(matrix_size / 2)] for eigenvalues in all_eigenvalues] )
# eigenvalues_differences = ( np.diff(flatten_eigenvalues) )
eigenvalues_differences = [ eigenvalues[int(matrix_size / 2) + 1] - eigenvalues[int(matrix_size / 2)] for eigenvalues in all_eigenvalues ]
# expanded_weights = np.ndarray.flatten( np.array( [ [np.real(weight)]*matrix_size for weight in weights ] ) )
# normalized_eigenvalues_differences = eigenvalues_differences
normalized_eigenvalues_differences = eigenvalues_differences / np.mean(eigenvalues_differences)
return normalized_eigenvalues_differences
def plot_normalized_differences(matrix_size, number_of_matrices, ensemble_type="GOE"):
random_matrices = generate_matrix_ensemble(matrix_size, number_of_matrices, ensemble_type)
differences = calculate_eigenvalues_differences(random_matrices)
plt.hist(differences, color="red", lw=5, edgecolor="red", bins=500, normed=1)
plt.autoscale()
plt.xlim(0, 3)
plt.gcf().set_size_inches(30, 24, forward=1)
plt.savefig("plots/" + str(ensemble_type) + "_eigenvalues_differences.pdf")
plt.clf()
def get_eigenvalues_differences(matrix_size, number_of_matrices):
GOE_differences, GUE_differences = [], []
for i in range(number_of_matrices):
random_GOE_matrix = generate_GOE_matrix(matrix_size)
random_GUE_matrix = generate_GUE_matrix(matrix_size)
# Calculate eigenvalues.
GOE_eigenvalues = sorted( np.linalg.eigvalsh(random_GOE_matrix) )
GUE_eigenvalues = sorted( np.linalg.eigvalsh(random_GUE_matrix) )
GOE_differences.append( GOE_eigenvalues[int(matrix_size / 2) + 1] - GOE_eigenvalues[int(matrix_size / 2)] )
GUE_differences.append( GUE_eigenvalues[int(matrix_size / 2) + 1] - GUE_eigenvalues[int(matrix_size / 2)] )
GOE_normed_differences = GOE_differences / np.mean(GOE_differences)
GUE_normed_differences = GUE_differences / np.mean(GUE_differences)
return [GOE_normed_differences, GUE_normed_differences]
def plot_eigenvalues_differences(matrix_size, number_of_matrices):
GOE_differences, GUE_differences = get_eigenvalues_differences(matrix_size, number_of_matrices)
plt.hist(GOE_differences, color="red", lw=5, histtype='step', edgecolor="red", bins=50, normed=1, label="Gaussian Orthogonal Ensemble")
plt.hist(GUE_differences, color="blue", lw=5, histtype='step', edgecolor="blue", bins=50, normed=1, label="Gaussian Unitary Ensemble")
plt.legend()
plt.autoscale()
plt.xlim(0, 3)
plt.gcf().set_size_inches(30, 24, forward=1)
plt.savefig("plots/eigenvalues_differences.pdf")
plt.clf()
if __name__ == '__main__':
# plot_normalized_differences(N=50, number_of_matrices=10000, ensemble_type="GOE")
plot_eigenvalues_differences(matrix_size=20, number_of_matrices=100000)
| tripatheea/Riemann-Zeta | python/random_matrices.py | Python | mit | 5,531 | [
"Gaussian"
] | c946f2b0f72029f52060f7921e2940580d055d62e22a72101917738db120e3df |
import sys
import pprint
class Reference(object):
def __init__(self, tb_index, varname, target):
self.tb_index = tb_index
self.varname = varname
self.target = target
def marker(self, xtb, tb_index, key):
return Marker(self, xtb, tb_index, key)
class Marker(object):
def __init__(self, reference, xtb, tb_index, key):
self.reference = reference
self.xtb = xtb
self.tb_index = tb_index
self.key = key
self.tb_offset = self.reference.tb_index - self.tb_index
def __repr__(self):
frame = sys._getframe(1)
while frame:
try:
code = self.xtb._format_variable.func_code
except AttributeError:
# python 3
code = self.xtb._format_variable.__code__
if frame.f_code == code:
indent = frame.f_locals["indent"] + 4
break
frame = frame.f_back
else: # pragma: no cover - defensive
raise RuntimeError("Expecting to be called with "
"XTraceback._format_variable in stack")
pretty_repr = pprint.pformat(self.reference.target)
if indent + len(self.key) + len(pretty_repr) > self.xtb.print_width \
or pretty_repr.find("\n") > 0:
name = "" if self.reference.varname == self.key \
else " name=%s" % self.reference.varname
pretty_repr = "<ref offset=%d%s>" % (self.tb_offset, name)
return pretty_repr
| Hypernode/xtraceback | xtraceback/reference.py | Python | mit | 1,552 | [
"xTB"
] | cc2282ad552d0c43ddaa993c62fe808df5db2398507237f099b16db5b2b13817 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGenefilter(RPackage):
"""Some basic functions for filtering genes"""
homepage = "https://bioconductor.org/packages/genefilter/"
url = "https://git.bioconductor.org/packages/genefilter"
list_url = homepage
version('1.58.1', git='https://git.bioconductor.org/packages/genefilter', commit='ace2556049677f60882adfe91f8cc96791556fc2')
depends_on('r@3.4.0:3.4.9', when='@1.58.1')
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-annotate', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
| EmreAtes/spack | var/spack/repos/builtin/packages/r-genefilter/package.py | Python | lgpl-2.1 | 1,882 | [
"Bioconductor"
] | 99bf8ecfc2dbada7598a221cfda13f66f5ae0d1a27c6eec6d2722dca8af72f67 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reference command-line example for Google Analytics Management API v3.
This application demonstrates how to use the python client library to access
all the pieces of data returned by the Google Analytics Management API v3.
The application manages autorization by saving an OAuth2.0 token in a local
file and reusing the token for subsequent requests. It then traverses the
Google Analytics Management hiearchy. It first retrieves and prints all the
authorized user's accounts, next it prints all the web properties for the
first account, then all the profiles for the first web property and finally
all the goals for the first profile. The sample then prints all the
user's advanced segments.
Before You Begin:
Update the client_secrets.json file
You must update the clients_secrets.json file with a client id, client
secret, and the redirect uri. You get these values by creating a new project
in the Google APIs console and registering for OAuth2.0 for installed
applications: https://code.google.com/apis/console
Learn more about registering your analytics application here:
https://developers.google.com/analytics/devguides/config/mgmt/v3/mgmtAuthorization
Sample Usage:
$ python management_v3_reference.py
Also you can also get help on all the command-line flags the program
understands by running:
$ python management_v3_reference.py --help
"""
from __future__ import print_function
__author__ = 'api.nickm@gmail.com (Nick Mihailovski)'
import argparse
import sys
from googleapiclient.errors import HttpError
from googleapiclient import sample_tools
from oauth2client.client import AccessTokenRefreshError
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'analytics', 'v3', __doc__, __file__,
scope='https://www.googleapis.com/auth/analytics.readonly')
# Traverse the Management hiearchy and print results or handle errors.
try:
traverse_hiearchy(service)
except TypeError as error:
# Handle errors in constructing a query.
print(('There was an error in constructing your query : %s' % error))
except HttpError as error:
# Handle API errors.
print(('Arg, there was an API error : %s : %s' %
(error.resp.status, error._get_reason())))
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run'
'the application to re-authorize')
def traverse_hiearchy(service):
"""Traverses the management API hiearchy and prints results.
This retrieves and prints the authorized user's accounts. It then
retrieves and prints all the web properties for the first account,
retrieves and prints all the profiles for the first web property,
and retrieves and prints all the goals for the first profile.
Args:
service: The service object built by the Google API Python client library.
Raises:
HttpError: If an error occurred when accessing the API.
AccessTokenRefreshError: If the current token was invalid.
"""
accounts = service.management().accounts().list().execute()
print_accounts(accounts)
if accounts.get('items'):
firstAccountId = accounts.get('items')[0].get('id')
webproperties = service.management().webproperties().list(
accountId=firstAccountId).execute()
print_webproperties(webproperties)
if webproperties.get('items'):
firstWebpropertyId = webproperties.get('items')[0].get('id')
profiles = service.management().profiles().list(
accountId=firstAccountId,
webPropertyId=firstWebpropertyId).execute()
print_profiles(profiles)
if profiles.get('items'):
firstProfileId = profiles.get('items')[0].get('id')
goals = service.management().goals().list(
accountId=firstAccountId,
webPropertyId=firstWebpropertyId,
profileId=firstProfileId).execute()
print_goals(goals)
print_segments(service.management().segments().list().execute())
def print_accounts(accounts_response):
"""Prints all the account info in the Accounts Collection.
Args:
accounts_response: The response object returned from querying the Accounts
collection.
"""
print('------ Account Collection -------')
print_pagination_info(accounts_response)
print()
for account in accounts_response.get('items', []):
print('Account ID = %s' % account.get('id'))
print('Kind = %s' % account.get('kind'))
print('Self Link = %s' % account.get('selfLink'))
print('Account Name = %s' % account.get('name'))
print('Created = %s' % account.get('created'))
print('Updated = %s' % account.get('updated'))
child_link = account.get('childLink')
print('Child link href = %s' % child_link.get('href'))
print('Child link type = %s' % child_link.get('type'))
print()
if not accounts_response.get('items'):
print('No accounts found.\n')
def print_webproperties(webproperties_response):
"""Prints all the web property info in the WebProperties collection.
Args:
webproperties_response: The response object returned from querying the
Webproperties collection.
"""
print('------ Web Properties Collection -------')
print_pagination_info(webproperties_response)
print()
for webproperty in webproperties_response.get('items', []):
print('Kind = %s' % webproperty.get('kind'))
print('Account ID = %s' % webproperty.get('accountId'))
print('Web Property ID = %s' % webproperty.get('id'))
print(('Internal Web Property ID = %s' %
webproperty.get('internalWebPropertyId')))
print('Website URL = %s' % webproperty.get('websiteUrl'))
print('Created = %s' % webproperty.get('created'))
print('Updated = %s' % webproperty.get('updated'))
print('Self Link = %s' % webproperty.get('selfLink'))
parent_link = webproperty.get('parentLink')
print('Parent link href = %s' % parent_link.get('href'))
print('Parent link type = %s' % parent_link.get('type'))
child_link = webproperty.get('childLink')
print('Child link href = %s' % child_link.get('href'))
print('Child link type = %s' % child_link.get('type'))
print()
if not webproperties_response.get('items'):
print('No webproperties found.\n')
def print_profiles(profiles_response):
"""Prints all the profile info in the Profiles Collection.
Args:
profiles_response: The response object returned from querying the
Profiles collection.
"""
print('------ Profiles Collection -------')
print_pagination_info(profiles_response)
print()
for profile in profiles_response.get('items', []):
print('Kind = %s' % profile.get('kind'))
print('Account ID = %s' % profile.get('accountId'))
print('Web Property ID = %s' % profile.get('webPropertyId'))
print(('Internal Web Property ID = %s' %
profile.get('internalWebPropertyId')))
print('Profile ID = %s' % profile.get('id'))
print('Profile Name = %s' % profile.get('name'))
print('Currency = %s' % profile.get('currency'))
print('Timezone = %s' % profile.get('timezone'))
print('Default Page = %s' % profile.get('defaultPage'))
print(('Exclude Query Parameters = %s' %
profile.get('excludeQueryParameters')))
print(('Site Search Category Parameters = %s' %
profile.get('siteSearchCategoryParameters')))
print(('Site Search Query Parameters = %s' %
profile.get('siteSearchQueryParameters')))
print('Created = %s' % profile.get('created'))
print('Updated = %s' % profile.get('updated'))
print('Self Link = %s' % profile.get('selfLink'))
parent_link = profile.get('parentLink')
print('Parent link href = %s' % parent_link.get('href'))
print('Parent link type = %s' % parent_link.get('type'))
child_link = profile.get('childLink')
print('Child link href = %s' % child_link.get('href'))
print('Child link type = %s' % child_link.get('type'))
print()
if not profiles_response.get('items'):
print('No profiles found.\n')
def print_goals(goals_response):
"""Prints all the goal info in the Goals collection.
Args:
goals_response: The response object returned from querying the Goals
collection
"""
print('------ Goals Collection -------')
print_pagination_info(goals_response)
print()
for goal in goals_response.get('items', []):
print('Goal ID = %s' % goal.get('id'))
print('Kind = %s' % goal.get('kind'))
print('Self Link = %s' % goal.get('selfLink'))
print('Account ID = %s' % goal.get('accountId'))
print('Web Property ID = %s' % goal.get('webPropertyId'))
print(('Internal Web Property ID = %s' %
goal.get('internalWebPropertyId')))
print('Profile ID = %s' % goal.get('profileId'))
print('Goal Name = %s' % goal.get('name'))
print('Goal Value = %s' % goal.get('value'))
print('Goal Active = %s' % goal.get('active'))
print('Goal Type = %s' % goal.get('type'))
print('Created = %s' % goal.get('created'))
print('Updated = %s' % goal.get('updated'))
parent_link = goal.get('parentLink')
print('Parent link href = %s' % parent_link.get('href'))
print('Parent link type = %s' % parent_link.get('type'))
# Print the goal details depending on the type of goal.
if goal.get('urlDestinationDetails'):
print_url_destination_goal_details(
goal.get('urlDestinationDetails'))
elif goal.get('visitTimeOnSiteDetails'):
print_visit_time_on_site_goal_details(
goal.get('visitTimeOnSiteDetails'))
elif goal.get('visitNumPagesDetails'):
print_visit_num_pages_goal_details(
goal.get('visitNumPagesDetails'))
elif goal.get('eventDetails'):
print_event_goal_details(goal.get('eventDetails'))
print()
if not goals_response.get('items'):
print('No goals found.\n')
def print_url_destination_goal_details(goal_details):
"""Prints all the URL Destination goal type info.
Args:
goal_details: The details portion of the goal response.
"""
print('------ Url Destination Goal -------')
print('Goal URL = %s' % goal_details.get('url'))
print('Case Sensitive = %s' % goal_details.get('caseSensitive'))
print('Match Type = %s' % goal_details.get('matchType'))
print('First Step Required = %s' % goal_details.get('firstStepRequired'))
print('------ Url Destination Goal Steps -------')
for goal_step in goal_details.get('steps', []):
print('Step Number = %s' % goal_step.get('number'))
print('Step Name = %s' % goal_step.get('name'))
print('Step URL = %s' % goal_step.get('url'))
if not goal_details.get('steps'):
print('No Steps Configured')
def print_visit_time_on_site_goal_details(goal_details):
"""Prints all the Visit Time On Site goal type info.
Args:
goal_details: The details portion of the goal response.
"""
print('------ Visit Time On Site Goal -------')
print('Comparison Type = %s' % goal_details.get('comparisonType'))
print('comparison Value = %s' % goal_details.get('comparisonValue'))
def print_visit_num_pages_goal_details(goal_details):
"""Prints all the Visit Num Pages goal type info.
Args:
goal_details: The details portion of the goal response.
"""
print('------ Visit Num Pages Goal -------')
print('Comparison Type = %s' % goal_details.get('comparisonType'))
print('comparison Value = %s' % goal_details.get('comparisonValue'))
def print_event_goal_details(goal_details):
"""Prints all the Event goal type info.
Args:
goal_details: The details portion of the goal response.
"""
print('------ Event Goal -------')
print('Use Event Value = %s' % goal_details.get('useEventValue'))
for event_condition in goal_details.get('eventConditions', []):
event_type = event_condition.get('type')
print('Type = %s' % event_type)
if event_type in ('CATEGORY', 'ACTION', 'LABEL'):
print('Match Type = %s' % event_condition.get('matchType'))
print('Expression = %s' % event_condition.get('expression'))
else: # VALUE type.
print('Comparison Type = %s' % event_condition.get('comparisonType'))
print('Comparison Value = %s' % event_condition.get('comparisonValue'))
def print_segments(segments_response):
"""Prints all the segment info in the Segments collection.
Args:
segments_response: The response object returned from querying the
Segments collection.
"""
print('------ Segments Collection -------')
print_pagination_info(segments_response)
print()
for segment in segments_response.get('items', []):
print('Segment ID = %s' % segment.get('id'))
print('Kind = %s' % segment.get('kind'))
print('Self Link = %s' % segment.get('selfLink'))
print('Name = %s' % segment.get('name'))
print('Definition = %s' % segment.get('definition'))
print('Created = %s' % segment.get('created'))
print('Updated = %s' % segment.get('updated'))
print()
def print_pagination_info(management_response):
"""Prints common pagination details.
Args:
management_response: The common reponse object for each collection in the
Management API.
"""
print('Items per page = %s' % management_response.get('itemsPerPage'))
print('Total Results = %s' % management_response.get('totalResults'))
print('Start Index = %s' % management_response.get('startIndex'))
# These only have values if other result pages exist.
if management_response.get('previousLink'):
print('Previous Link = %s' % management_response.get('previousLink'))
if management_response.get('nextLink'):
print('Next Link = %s' % management_response.get('nextLink'))
if __name__ == '__main__':
main(sys.argv)
| googleapis/google-api-python-client | samples/analytics/management_v3_reference.py | Python | apache-2.0 | 14,762 | [
"VisIt"
] | ecd2a6f99cf5634a4f2b458b7efc7f7f41c66710b50b5e6a23f70b37ddedc34d |
#!/usr/bin/env python2
# Copyright (C) 2016-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
# #
# ESPResSo++ Python script for the calculation of long range interactions #
# (Coulomb interaction) using the Ewald summation and the P3M methods. #
# #
#############################################################################
'''
# Initially, the simple cubic structure is generated in order to represent the
# NaCl crystal. Then the energy and forces are calculated and compared using both
# the Ewald summation and the P3M. At the end the Madelung constant of NaCl crystal
# is calculated.
#
# At the moment there is only metallic surrounding media is possible.
# Parameters:
# Ewald summation:
alpha = 1.112583061 (Ewald parameter)
rspacecutoff = 4.9 (the cutoff in real space)
kspacecutoff = 30 (the cutoff in reciprocal space)
# P3M:
M = (16, 16, 16) (mesh)
P = 7 (charge assignment order)
'''
import mpi4py.MPI as MPI
import espressopp
from espressopp import Real3D
# initial parameters
N = 16 # number of particles on lattice site
num_particles = N**3 # total number of particles
rho = 0.03 # number density of particles, number of particles devided by volume
# creating a cubic NaCl crystal
#print 'Creating a simple cubic structure...'
x, y, z, Lx, Ly, Lz = espressopp.tools.lattice.createCubic(num_particles, rho, False)
# creating the system box
box = (Lx, Ly, Lz)
print 'System box size: ', box
print 'Number of particles = ', num_particles
# Ewald summation parameters
#alphaEwald = 1.112583061 # alpha - Ewald parameter
alphaEwald = 0.660557
rspacecutoff = 4.9 # rspacecutoff - the cutoff in real space
kspacecutoff = 30 # kspacecutoff - the cutoff in reciprocal space
print 'Ewald parameters:'
print 'alfa=%f, rcutoff=%f, kcutoff=%d' % (alphaEwald, rspacecutoff, kspacecutoff)
# P3M parameters
M = espressopp.Int3D(64, 64, 64)
P = 7
#alphaP3M = 1.112583061 # alpha - Ewald parameter
alphaP3M = 0.660557
print 'P3M parameters:'
print 'Mesh=', M,', charge assignment order=%d, alphaP3M=%lf' % ( P, alphaP3M)
# a skin for Verlet list
skin = 0.2
# Coulomb prefactor parameters
bjerrumlength = 1.0
temperature = 1.0
coulomb_prefactor = bjerrumlength * temperature
nodeGrid = espressopp.tools.decomp.nodeGrid(MPI.COMM_WORLD.size,box,rspacecutoff,skin)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rspacecutoff, skin)
print ''
print 'density = %.4f' % (rho)
print 'NodeGrid = %s' % (nodeGrid,)
print 'CellGrid = %s' % (cellGrid,)
print ''
'''
Below two systems for Ewald summation and PPPM methods will be created.
'''
#######################################################################################
# system for Ewald
#######################################################################################
systemEwald = espressopp.System()
systemEwald.rng = espressopp.esutil.RNG()
systemEwald.bc = espressopp.bc.OrthorhombicBC(systemEwald.rng, box)
systemEwald.skin = skin
systemEwald.storage = espressopp.storage.DomainDecomposition(systemEwald, nodeGrid, cellGrid)
#######################################################################################
# system for PPPM
#######################################################################################
systemPPPM = espressopp.System()
systemPPPM.rng = espressopp.esutil.RNG()
systemPPPM.bc = espressopp.bc.OrthorhombicBC(systemPPPM.rng, box)
systemPPPM.skin = skin
systemPPPM.storage = espressopp.storage.DomainDecomposition(systemPPPM, nodeGrid, cellGrid)
#######################################################################################
# adding particles
props = ['id', 'pos', 'type', 'q']
new_particles = []
countX = countY = countZ = 0
for i in range(0, num_particles):
# charge should be accordingly to NaCl crystall
charge = pow(-1, countX + countY + countZ)
part = [ i, Real3D(x[i], y[i], z[i]), 0, charge ]
new_particles.append(part)
countX += 1
if countX >= N:
countX = 0
countY += 1
if countY >= N:
countY = 0
countZ += 1
# adding particles to Ewald system
systemEwald.storage.addParticles(new_particles, *props)
systemEwald.storage.decompose()
# adding particles to PPPM system
systemPPPM.storage.addParticles(new_particles, *props)
systemPPPM.storage.decompose()
## potentials and interactions ##
# setting a Verlet list
vlEwald = espressopp.VerletList(systemEwald, rspacecutoff)
vlPPPM = espressopp.VerletList(systemPPPM, rspacecutoff)
# real space interaction for Ewald system
# R space part of electrostatic interaction
coulombR_potEwald = espressopp.interaction.CoulombRSpace(coulomb_prefactor, alphaEwald, rspacecutoff)
# creating an interaction based on the Verlet list
coulombR_intEwald = espressopp.interaction.VerletListCoulombRSpace(vlEwald)
# setting the potential for the interaction between particles of type 0 and 0
coulombR_intEwald.setPotential(type1=0, type2=0, potential = coulombR_potEwald)
# adding the interaction to the system
systemEwald.addInteraction(coulombR_intEwald)
# real space interaction for PPPM system
# R space part of electrostatic interaction
coulombR_potP3M = espressopp.interaction.CoulombRSpace(coulomb_prefactor, alphaP3M, rspacecutoff)
# creating an interaction based on the Verlet list
coulombR_intPPPM = espressopp.interaction.VerletListCoulombRSpace(vlPPPM)
# setting the potential for the interaction between particles of type 0 and 0
coulombR_intPPPM.setPotential(type1=0, type2=0, potential = coulombR_potP3M)
# adding the interaction to the system
systemPPPM.addInteraction(coulombR_intPPPM)
# K space part of electrostatic interaction
ewaldK_pot = espressopp.interaction.CoulombKSpaceEwald(systemEwald, coulomb_prefactor, alphaEwald, kspacecutoff)
# creating an interaction based on the Cell list for all particle interaction and potential in K space
ewaldK_int = espressopp.interaction.CellListCoulombKSpaceEwald(systemEwald.storage, ewaldK_pot)
# adding the interaction to the system
systemEwald.addInteraction(ewaldK_int)
# PPPM system
p3m_pot = espressopp.interaction.CoulombKSpaceP3M( systemPPPM, coulomb_prefactor, alphaP3M, M, P, rspacecutoff)
# creating the interaction based on the Cell list for all particle interaction and potential in K space
p3m_int = espressopp.interaction.CellListCoulombKSpaceP3M(systemPPPM.storage, p3m_pot)
# adding the interaction to the system
systemPPPM.addInteraction(p3m_int)
### Integrators for Ewald and PPPM
# creating the integrator which based on the Verlet algorithm
integratorEwald = espressopp.integrator.VelocityVerlet(systemEwald)
# seting the time step (it is not important here)
integratorEwald.dt = 0.0001
# nothing will be changed in system, just forces will be calculated ones
integratorEwald.run(0)
# creating the integrator which based on the Verlet algorithm
integratorPPPM = espressopp.integrator.VelocityVerlet(systemPPPM)
# seting the time step (it is not important here)
integratorPPPM.dt = 0.0001
# nothing will be changed in system, just forces will be calculated ones
integratorPPPM.run(0)
# printing the particle id and force difference (x,y,z) for first 6 particles
print ('\n Difference between forces calculated by Ewald summation and PPPM (first 6 particles)')
print ('%3s %20s %20s %20s\n' % ('id', 'dfx', 'dfy', 'dfz'))
#sock = espressopp.tools.vmd.connect(systemPPPM)
#espressopp.tools.vmd.imd_positions(systemPPPM, sock)
print_N = min(num_particles, 20)
for j in range(0, print_N):
print ( '%3d %3.17f %3.17f %3.17f' % (j, \
abs(systemEwald.storage.getParticle(j).f.x - systemPPPM.storage.getParticle(j).f.x), \
abs(systemEwald.storage.getParticle(j).f.y - systemPPPM.storage.getParticle(j).f.y), \
abs(systemEwald.storage.getParticle(j).f.z - systemPPPM.storage.getParticle(j).f.z)) )
print 'force:', systemPPPM.storage.getParticle(j).f, ' ', systemEwald.storage.getParticle(j).f
# calculating the R space part of electrostatic energy
energyEwaldR = coulombR_intEwald.computeEnergy()
# calculating the K space part of electrostatic energy
energyEwaldK = ewaldK_int.computeEnergy()
# total energy (Ewald summation)
enTotEwald = energyEwaldR + energyEwaldK
# calculating the R space part of electrostatic energy
energyPPPMR = coulombR_intPPPM.computeEnergy()
# calculating the K space part of electrostatic energy
energyPPPMK = p3m_int.computeEnergy()
# total energy (PPPM)
enTotPPPM = energyPPPMR + energyPPPMK
# printing the total energy and the difference
print 'Energy (Ewald summation): %5.16f Energy (PPPM): %5.16f\n' % (enTotEwald, enTotPPPM)
print 'The difference in energy (Ewald - PPPM): %5.16f\n' % (enTotEwald-enTotPPPM)
a = 2 * pow( Lx*Ly*Lz / num_particles , 1./3. )
madelung_NaCl = -1.747564594633182190636212035544397403481
print ("Madelung constant is: %14.10f\n" % (enTotEwald/num_particles * a))
print (" error: %e\n\n" % ( abs( abs( enTotPPPM/num_particles * a) - abs(madelung_NaCl))))
| kkreis/espressopp | examples/electrostatics/electrostatics_example.py | Python | gpl-3.0 | 10,081 | [
"CRYSTAL",
"ESPResSo",
"VMD"
] | 58312be13b673ccccd0c3cae52ffe2bf70e31aeafabfcc2b2d8db2558ba2441c |
from __main__ import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import os
import numpy
import pickle
import json
#
# Load Files
#
class EasyClip(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
parent.title = "Easy Clip"
parent.categories = ["Surface Models"]
parent.dependencies = []
parent.contributors = ["Julia Lopinto, (University Of Michigan)", "Jean-Baptiste Vimort, (University Of Michigan)"]
parent.helpText = """
This Module is used to clip one or different 3D Models according to a predetermined plane.
Plane can be saved to be reused for other models.
After clipping, the models are closed and can be saved as new 3D Models.
This is an alpha version of the module.
It can't be used for the moment.
"""
parent.acknowledgementText = """
This work was supported by the National
Institutes of Dental and Craniofacial Research
and Biomedical Imaging and Bioengineering of
the National Institutes of Health under Award
Number R01DE024450.
"""
self.parent = parent
class EasyClipWidget(ScriptedLoadableModuleWidget):
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
print("-------Easy Clip Widget Setup---------")
# GLOBALS:
self.logic = EasyClipLogic(self)
self.ignoredNodeNames = ('Red Volume Slice', 'Yellow Volume Slice', 'Green Volume Slice')
self.colorSliceVolumes = dict()
self.dictionnaryModel = dict()
self.hardenModelIDdict = dict()
self.landmarkDescriptionDict = dict()
self.planeControlsDictionary = {}
# Instantiate and connect widgets
#
# Interface
#
loader = qt.QUiLoader()
moduleName = 'EasyClip'
scriptedModulesPath = eval('slicer.modules.%s.path' % moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
path = os.path.join(scriptedModulesPath, 'Resources', 'UI', '%s.ui' %moduleName)
qfile = qt.QFile(path)
qfile.open(qt.QFile.ReadOnly)
widget = loader.load(qfile, self.parent)
self.layout = self.parent.layout()
self.widget = widget
self.layout.addWidget(widget)
##--------------------------- Scene --------------------------#
self.SceneCollapsibleButton = self.logic.get("SceneCollapsibleButton") # this atribute is usefull for Longitudinal quantification extension
treeView = self.logic.get("treeView")
treeView.setMRMLScene(slicer.app.mrmlScene())
treeView.sceneModel().setHorizontalHeaderLabels(["Models"])
treeView.sortFilterProxyModel().nodeTypes = ['vtkMRMLModelNode']
treeView.header().setVisible(False)
self.autoChangeLayout = self.logic.get("autoChangeLayout")
self.computeBox = self.logic.get("computeBox")
self.computeBox.connect('clicked()', self.onComputeBox)
#--------------------------- Clipping Part --------------------------#
# CLIPPING BUTTONS
self.red_plane_box = self.logic.get("red_plane_box")
self.radio_red_Neg = self.logic.get("radio_red_Neg")
self.radio_red_Neg.setIcon(qt.QIcon(":/Icons/RedSpaceNegative.png"))
self.radio_red_Pos = self.logic.get("radio_red_Pos")
self.radio_red_Pos.setIcon(qt.QIcon(":/Icons/RedSpacePositive.png"))
self.red_plane_box.connect('clicked(bool)', lambda: self.logic.onCheckBoxClicked('Red',
self.red_plane_box,
self.radio_red_Neg))
self.red_plane_box.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeRed",
self.red_plane_box.isChecked(),
self.radio_red_Neg.isChecked(),
self.radio_red_Pos.isChecked()))
self.radio_red_Neg.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeRed",
self.red_plane_box.isChecked(),
self.radio_red_Neg.isChecked(),
self.radio_red_Pos.isChecked()))
self.radio_red_Pos.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeRed",
self.red_plane_box.isChecked(),
self.radio_red_Neg.isChecked(),
self.radio_red_Pos.isChecked()))
self.yellow_plane_box = self.logic.get("yellow_plane_box")
self.radio_yellow_Neg= self.logic.get("radio_yellow_Neg")
self.radio_yellow_Neg.setIcon(qt.QIcon(":/Icons/YellowSpaceNegative.png"))
self.radio_yellow_Pos = self.logic.get("radio_yellow_Pos")
self.radio_yellow_Pos.setIcon(qt.QIcon(":/Icons/YellowSpacePositive.png"))
self.yellow_plane_box.connect('clicked(bool)', lambda: self.logic.onCheckBoxClicked('Yellow',
self.yellow_plane_box,
self.radio_yellow_Neg))
self.yellow_plane_box.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeYellow",
self.yellow_plane_box.isChecked(),
self.radio_yellow_Neg.isChecked(),
self.radio_yellow_Pos.isChecked()))
self.radio_yellow_Neg.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeYellow",
self.yellow_plane_box.isChecked(),
self.radio_yellow_Neg.isChecked(),
self.radio_yellow_Pos.isChecked()))
self.radio_yellow_Pos.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeYellow",
self.yellow_plane_box.isChecked(),
self.radio_yellow_Neg.isChecked(),
self.radio_yellow_Pos.isChecked()))
self.green_plane_box = self.logic.get("green_plane_box")
self.radio_green_Neg= self.logic.get("radio_green_Neg")
self.radio_green_Neg.setIcon(qt.QIcon(":/Icons/GreenSpaceNegative.png"))
self.radio_green_Pos = self.logic.get("radio_green_Pos")
self.radio_green_Pos.setIcon(qt.QIcon(":/Icons/GreenSpacePositive.png"))
self.green_plane_box.connect('clicked(bool)', lambda: self.logic.onCheckBoxClicked('Green',
self.green_plane_box,
self.radio_green_Neg))
self.green_plane_box.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeGreen",
self.green_plane_box.isChecked(),
self.radio_green_Neg.isChecked(),
self.radio_green_Pos.isChecked()))
self.radio_green_Neg.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeGreen",
self.green_plane_box.isChecked(),
self.radio_green_Neg.isChecked(),
self.radio_green_Pos.isChecked()))
self.radio_green_Pos.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeGreen",
self.green_plane_box.isChecked(),
self.radio_green_Neg.isChecked(),
self.radio_green_Pos.isChecked()))
self.ClippingButton = self.logic.get("ClippingButton")
self.ClippingButton.connect('clicked()', self.ClippingButtonClicked)
self.UndoButton = self.logic.get("UndoButton")
self.UndoButton.connect('clicked()', self.UndoButtonClicked)
# -------------------------------- PLANES --------------------------------#
self.CollapsibleButton3 = self.logic.get("CollapsibleButton3")
self.save = self.logic.get("save")
self.read = self.logic.get("read")
self.save.connect('clicked(bool)', self.savePlane)
self.read.connect('clicked(bool)', self.readPlane)
#-------------------- onCloseScene ----------------------#
slicer.mrmlScene.AddObserver(slicer.mrmlScene.EndCloseEvent, self.onCloseScene)
def onCloseScene(self, obj, event):
self.colorSliceVolumes = dict()
for key in self.logic.ColorNodeCorrespondence:
self.logic.planeDict[self.logic.ColorNodeCorrespondence[key]] = self.logic.planeDef()
self.UndoButton.enabled = False
def enter(self):
if self.autoChangeLayout.isChecked():
lm = slicer.app.layoutManager()
self.currentLayout = lm.layout
lm.setLayout(4) # 3D-View
# Show manual planes
for planeControls in self.planeControlsDictionary.values():
if planeControls.PlaneIsDefined():
planeControls.logic.planeLandmarks(planeControls.landmark1ComboBox.currentIndex, planeControls.landmark2ComboBox.currentIndex,
planeControls.landmark3ComboBox.currentIndex, planeControls.slider.value, planeControls.slideOpacity.value)
# Checking the names of the fiducials
list = slicer.mrmlScene.GetNodesByClass("vtkMRMLMarkupsFiducialNode")
end = list.GetNumberOfItems()
for i in range(0,end):
fidList = list.GetItemAsObject(i)
landmarkDescription = self.logic.decodeJSON(fidList.GetAttribute("landmarkDescription"))
if landmarkDescription:
for n in range(fidList.GetNumberOfMarkups()):
markupID = fidList.GetNthMarkupID(n)
markupLabel = fidList.GetNthMarkupLabel(n)
landmarkDescription[markupID]["landmarkLabel"] = markupLabel
fidList.SetAttribute("landmarkDescription",self.logic.encodeJSON(landmarkDescription))
self.onComputeBox()
self.logic.onCheckBoxClicked('Red', self.red_plane_box, self.radio_red_Neg)
self.logic.onCheckBoxClicked('Green', self.green_plane_box, self.radio_green_Neg)
self.logic.onCheckBoxClicked('Yellow', self.yellow_plane_box, self.radio_yellow_Neg)
def exit(self):
# Remove hidden nodes that are created just for Angle Planes
for x in self.colorSliceVolumes.values():
node = slicer.mrmlScene.GetNodeByID(x)
slicer.mrmlScene.RemoveNode(node)
node.SetHideFromEditors(False)
self.colorSliceVolumes = dict()
# Hide manual planes
for planeControls in self.planeControlsDictionary.values():
if planeControls.PlaneIsDefined():
planeControls.logic.planeLandmarks(planeControls.landmark1ComboBox.currentIndex, planeControls.landmark2ComboBox.currentIndex,
planeControls.landmark3ComboBox.currentIndex, planeControls.slider.value, 0)
# Hide planes
for x in self.logic.ColorNodeCorrespondence.keys():
compNode = slicer.util.getNode('vtkMRMLSliceCompositeNode' + x)
compNode.SetLinkedControl(False)
slice = slicer.mrmlScene.GetNodeByID(self.logic.ColorNodeCorrespondence[x])
slice.SetWidgetVisible(False)
slice.SetSliceVisible(False)
# Reset layout
if self.autoChangeLayout.isChecked():
lm = slicer.app.layoutManager()
if lm.layout == 4: # the user has not manually changed the layout
lm.setLayout(self.currentLayout)
def savePlane(self):
self.logic.getCoord()
self.logic.saveFunction()
def readPlane(self):
# Load plane matrix/function
self.logic.readPlaneFunction()
def UndoButtonClicked(self):
print("undo:")
print(self.dictionnaryModel)
self.UndoButton.enabled = False
for key,value in self.dictionnaryModel.items():
model = slicer.mrmlScene.GetNodeByID(key)
model.SetAndObservePolyData(value)
for key,value in self.hardenModelIDdict.items():
fidList = slicer.mrmlScene.GetNodeByID(key)
fidList.SetAttribute("hardenModelID", value)
for key,value in self.modelIDdict.items():
fidList = slicer.mrmlScene.GetNodeByID(key)
fidList.SetAttribute("connectedModelID", value)
for key,value in self.landmarkDescriptionDict.items():
fidList = slicer.mrmlScene.GetNodeByID(key)
fidList.SetAttribute("landmarkDescription",value)
def onComputeBox(self):
#--------------------------- Box around the model --------------------------#
positionOfVisibleNodes = self.getPositionOfModelNodes(True)
if len(positionOfVisibleNodes) == 0:
return
try:
maxValue = slicer.sys.float_info.max
except:
maxValue = self.logic.sys.float_info.max
bound = [maxValue, -maxValue, maxValue, -maxValue, maxValue, -maxValue]
for i in positionOfVisibleNodes:
node = slicer.mrmlScene.GetNthNodeByClass(i, "vtkMRMLModelNode")
model = self.logic.createIntermediateHardenModel(node)
polydata = model.GetPolyData()
if polydata is None or not hasattr(polydata, "GetBounds"):
continue
tempbound = polydata.GetBounds()
bound[0] = min(bound[0], tempbound[0])
bound[2] = min(bound[2], tempbound[2])
bound[4] = min(bound[4], tempbound[4])
bound[1] = max(bound[1], tempbound[1])
bound[3] = max(bound[3], tempbound[3])
bound[5] = max(bound[5], tempbound[5])
# --------------------------- Box around the model --------------------------#
dim = []
origin = []
for x in range(0, 3):
dim.append(bound[x * 2 + 1] - bound[x * 2])
origin.append(bound[x * 2] + dim[x] / 2)
dim[x] *= 1.1
dictColors = {'Red': 32, 'Yellow': 15, 'Green': 1}
for x in dictColors.keys():
sampleVolumeNode = self.CreateNewNode(x, dictColors[x], dim, origin)
compNode = slicer.util.getNode('vtkMRMLSliceCompositeNode' + x)
compNode.SetLinkedControl(False)
compNode.SetBackgroundVolumeID(sampleVolumeNode.GetID())
lm = slicer.app.layoutManager()
#Reset and fit 2D-views
lm.resetSliceViews()
for x in dictColors.keys():
logic = lm.sliceWidget(x)
node = logic.mrmlSliceNode()
node.SetSliceResolutionMode(node.SliceResolutionMatch2DView)
logic.fitSliceToBackground()
#Reset pink box around models
for i in range(0, lm.threeDViewCount):
threeDView = lm.threeDWidget(i).threeDView()
threeDView.resetFocalPoint()
#Reset camera in 3D view to center the models and position the camera so that all actors can be seen
threeDView.renderWindow().GetRenderers().GetFirstRenderer().ResetCamera()
def getPositionOfModelNodes(self, onlyVisible):
numNodes = slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLModelNode")
positionOfNodes = list()
for i in range(0, numNodes):
node = slicer.mrmlScene.GetNthNodeByClass(i, "vtkMRMLModelNode")
if node.GetName() in self.ignoredNodeNames:
continue
if onlyVisible is True and node.GetDisplayVisibility() == 0:
continue
positionOfNodes.append(i)
return positionOfNodes
def CreateNewNode(self, colorName, color, dim, origin):
# we add a pseudo-random number to the name of our empty volume to avoid the risk of having a volume called
# exactly the same by the user which could be confusing. We could also have used slicer.app.sessionId()
if colorName not in self.colorSliceVolumes.keys():
VolumeName = "EasyClip_EmptyVolume_" + str(slicer.app.applicationPid()) + "_" + colorName
# Do NOT set the spacing and the origin of imageData (vtkImageData)
# The spacing and the origin should only be set in the vtkMRMLScalarVolumeNode!!!!!!
# We only create an image of 1 voxel (as we only use it to color the planes
imageData = vtk.vtkImageData()
imageData.SetDimensions(1, 1, 1)
imageData.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
imageData.SetScalarComponentFromDouble(0, 0, 0, 0, color)
if hasattr(slicer, 'vtkMRMLLabelMapVolumeNode'):
sampleVolumeNode = slicer.vtkMRMLLabelMapVolumeNode()
else:
sampleVolumeNode = slicer.vtkMRMLScalarVolumeNode()
sampleVolumeNode = slicer.mrmlScene.AddNode(sampleVolumeNode)
sampleVolumeNode.SetName(VolumeName)
labelmapVolumeDisplayNode = slicer.vtkMRMLLabelMapVolumeDisplayNode()
slicer.mrmlScene.AddNode(labelmapVolumeDisplayNode)
colorNode = slicer.util.getNode('GenericAnatomyColors')
labelmapVolumeDisplayNode.SetAndObserveColorNodeID(colorNode.GetID())
sampleVolumeNode.SetAndObserveImageData(imageData)
sampleVolumeNode.SetAndObserveDisplayNodeID(labelmapVolumeDisplayNode.GetID())
labelmapVolumeDisplayNode.VisibilityOff()
self.colorSliceVolumes[colorName] = sampleVolumeNode.GetID()
sampleVolumeNode = slicer.mrmlScene.GetNodeByID(self.colorSliceVolumes[colorName])
sampleVolumeNode.HideFromEditorsOn()
sampleVolumeNode.SetOrigin(origin[0], origin[1], origin[2])
sampleVolumeNode.SetSpacing(dim[0], dim[1], dim[2])
if not hasattr(slicer, 'vtkMRMLLabelMapVolumeNode'):
sampleVolumeNode.SetLabelMap(1)
sampleVolumeNode.SetHideFromEditors(True)
sampleVolumeNode.SetSaveWithScene(False)
return sampleVolumeNode
def ClippingButtonClicked(self):
self.logic.getCoord()
self.dictionnaryModel, self.modelIDdict, self.hardenModelIDdict, self.landmarkDescriptionDict\
= self.logic.clipping()
self.UndoButton.enabled = True
def updateSliceState(self, plane, boxState, negState, posState):
print("Update Slice State")
self.logic.planeDict[plane].boxState = boxState
self.logic.planeDict[plane].negState = negState
self.logic.planeDict[plane].posState = posState
class EasyClipLogic(ScriptedLoadableModuleLogic):
try:
slicer.sys
except:
import sys
class planeDef(object):
def __init__(self):
# Matrix that define each plane
self.matrix = None
# normal to the plane
self.n = None
# point in the plane
self.P = None
# Slice State
self.boxState = False
self.negState = False
self.posState = False
# Plane for cliping
self.vtkPlane = vtk.vtkPlane()
def __init__(self, interface):
self.interface = interface
self.ColorNodeCorrespondence = {'Red': 'vtkMRMLSliceNodeRed',
'Yellow': 'vtkMRMLSliceNodeYellow',
'Green': 'vtkMRMLSliceNodeGreen'}
self.get_normal = numpy.matrix([[0], [0], [1], [0]])
self.get_point = numpy.matrix([[0], [0], [0], [1]])
self.planeDict = dict()
for key in self.ColorNodeCorrespondence:
self.planeDict[self.ColorNodeCorrespondence[key]] = self.planeDef()
def get(self, objectName):
return self.findWidget(self.interface.widget, objectName)
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
def createIntermediateHardenModel(self, model):
hardenModel = slicer.mrmlScene.GetNodesByName("SurfaceRegistration_" + model.GetName() + "_hardenCopy_" + str(
slicer.app.applicationPid())).GetItemAsObject(0)
if hardenModel is None:
hardenModel = slicer.vtkMRMLModelNode()
hardenPolyData = vtk.vtkPolyData()
hardenPolyData.DeepCopy(model.GetPolyData())
hardenModel.SetAndObservePolyData(hardenPolyData)
hardenModel.SetName(
"SurfaceRegistration_" + model.GetName() + "_hardenCopy_" + str(slicer.app.applicationPid()))
if model.GetParentTransformNode():
hardenModel.SetAndObserveTransformNodeID(model.GetParentTransformNode().GetID())
hardenModel.HideFromEditorsOn()
slicer.mrmlScene.AddNode(hardenModel)
logic = slicer.vtkSlicerTransformLogic()
logic.hardenTransform(hardenModel)
return hardenModel
def onCheckBoxClicked(self, colorPlane, checkBox, radioButton ):
slice = slicer.util.getNode(self.ColorNodeCorrespondence[colorPlane])
print("Slice test", slice)
if checkBox.isChecked():
slice.SetWidgetVisible(True)
radioButton.setChecked(True)
else:
slice.SetWidgetVisible(False)
def getMatrix(self, slice):
mat = slice.GetSliceToRAS()
m = numpy.matrix([[mat.GetElement(0, 0), mat.GetElement(0, 1), mat.GetElement(0, 2), mat.GetElement(0, 3)],
[mat.GetElement(1, 0), mat.GetElement(1, 1), mat.GetElement(1, 2), mat.GetElement(1, 3)],
[mat.GetElement(2, 0), mat.GetElement(2, 1), mat.GetElement(2, 2), mat.GetElement(2, 3)],
[mat.GetElement(3, 0), mat.GetElement(3, 1), mat.GetElement(3, 2), mat.GetElement(3, 3)]])
return m
def getCoord(self):
for key, planeDef in self.planeDict.items():
planeDef.matrix = self.getMatrix(slicer.util.getNode(key))
planeDef.n = planeDef.matrix * self.get_normal
# print "n : \n", planeDef.n
planeDef.P = planeDef.matrix * self.get_point
# print "P : \n", planeDef.P
a = planeDef.n[0]
b = planeDef.n[1]
c = planeDef.n[2]
d = planeDef.n[0]*planeDef.P[0] + planeDef.n[1]*planeDef.P[1] + planeDef.n[2]*planeDef.P[2]
# print key + "plan equation : \n", a ,"* x + ", b , "* y + ", c , "* z - ", d ," = 0 "
def clipping(self):
planeCollection = vtk.vtkPlaneCollection()
harden = slicer.vtkSlicerTransformLogic()
tempTransform = slicer.vtkMRMLLinearTransformNode()
tempTransform.HideFromEditorsOn()
slicer.mrmlScene.AddNode(tempTransform)
numNodes = slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLModelNode")
dictionnaryModel = dict()
hardenModelIDdict = dict()
landmarkDescriptionDict = dict()
modelIDdict = dict()
for i in range(3, numNodes):
planeCollection.RemoveAllItems()
mh = slicer.mrmlScene.GetNthNodeByClass(i, "vtkMRMLModelNode")
if mh.GetDisplayVisibility() == 0:
continue
model = slicer.util.getNode(mh.GetName())
transform = model.GetParentTransformNode()
if transform:
tempTransform.Copy(transform)
harden.hardenTransform(tempTransform)
m = vtk.vtkMatrix4x4()
tempTransform.GetMatrixTransformToParent(m)
m.Invert(m, m)
else:
m = vtk.vtkMatrix4x4()
for key, planeDef in self.planeDict.items():
hardenP = m.MultiplyPoint(planeDef.P)
hardenN = m.MultiplyPoint(planeDef.n)
if planeDef.boxState:
planeDef.vtkPlane.SetOrigin(hardenP[0], hardenP[1], hardenP[2])
if planeDef.negState:
planeDef.vtkPlane.SetNormal(-hardenN[0], -hardenN[1], -hardenN[2])
if planeDef.posState:
planeDef.vtkPlane.SetNormal(hardenN[0], hardenN[1], hardenN[2])
planeCollection.AddItem(planeDef.vtkPlane)
dictionnaryModel[model.GetID()]= model.GetPolyData()
polyData = model.GetPolyData()
clipper = vtk.vtkClipClosedSurface()
clipper.SetClippingPlanes(planeCollection)
clipper.SetInputData(polyData)
clipper.SetGenerateFaces(1)
clipper.SetScalarModeToLabels()
clipper.Update()
polyDataNew = clipper.GetOutput()
model.SetAndObservePolyData(polyDataNew)
# Checking if one ore more fiducial list are connected to this model
list = slicer.mrmlScene.GetNodesByClass("vtkMRMLMarkupsFiducialNode")
end = list.GetNumberOfItems()
for i in range(0,end):
fidList = list.GetItemAsObject(i)
if fidList.GetAttribute("connectedModelID"):
if fidList.GetAttribute("connectedModelID") == model.GetID():
modelIDdict[fidList.GetID()], hardenModelIDdict[fidList.GetID()], landmarkDescriptionDict[fidList.GetID()] = \
self.unprojectLandmarks(fidList)
return dictionnaryModel, modelIDdict, hardenModelIDdict, landmarkDescriptionDict
def unprojectLandmarks(self, fidList):
hardenModelID = fidList.GetAttribute("hardenModelID")
ModelID = fidList.GetAttribute("connectedModelID")
landmarkDescriptioncopy = fidList.GetAttribute("landmarkDescription")
fidList.SetAttribute("connectedModelID", None)
fidList.SetAttribute("hardenModelID", None)
landmarkDescription = self.decodeJSON(fidList.GetAttribute("landmarkDescription"))
for n in range(fidList.GetNumberOfMarkups()):
markupID = fidList.GetNthMarkupID(n)
landmarkDescription[markupID]["projection"]["isProjected"] = False
landmarkDescription[markupID]["projection"]["closestPointIndex"] = None
landmarkDescription[markupID]["ROIradius"] = 0
fidList.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
return ModelID, hardenModelID, landmarkDescriptioncopy
def saveFunction(self):
filename = qt.QFileDialog.getSaveFileName(self.interface.parent, "Save file")
tempDictionary = {}
for key in self.ColorNodeCorrespondence:
slice = slicer.util.getNode(self.ColorNodeCorrespondence[key])
tempDictionary[key] = self.getMatrix(slice).tolist()
if filename is None:
filename = qt.QFileDialog.getSaveFileName(self.interface.parent, "Save file")
if filename != "":
fileObj = open(filename, "wb")
pickle.dump(tempDictionary, fileObj)
fileObj.close()
def readPlaneFunction(self):
filename = qt.QFileDialog.getOpenFileName(self.interface.parent, "Open file")
if filename is None:
filename = qt.QFileDialog.getOpenFileName(self.interface.parent, "Open file")
if filename != "":
fileObj = open(filename, "rb")
tempDictionary = pickle.load(fileObj)
for key in self.ColorNodeCorrespondence:
node = slicer.mrmlScene.GetNodeByID(self.ColorNodeCorrespondence[key])
matList = tempDictionary[key]
matNode = node.GetSliceToRAS()
for col in range(0, len(matList)):
for row in range(0, len(matList[col])):
matNode.SetElement(col, row, matList[col][row])
node.UpdateMatrices()
fileObj.close()
def encodeJSON(self, input):
encodedString = json.dumps(input)
encodedString = encodedString.replace('\"', '\'')
return encodedString
def decodeJSON(self, input):
if input:
input = input.replace('\'','\"')
return self.byteify(json.loads(input))
return None
def byteify(self, input):
if isinstance(input, dict):
return {self.byteify(key):self.byteify(value) for key,value in input.items()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
class EasyClipTest(ScriptedLoadableModuleTest):
def setUp(self):
# reset the state - clear scene
slicer.mrmlScene.Clear(0)
def runTest(self):
# run all tests needed
self.setUp()
self.test_EasyClip()
def test_EasyClip(self):
self.delayDisplay("Starting the test")
###################################################################################################
# Loading some data #
###################################################################################################
import urllib.request
downloads = (
('http://slicer.kitware.com/midas3/download?items=167065', 'model.vtk', slicer.util.loadModel),
)
for url,name,loader in downloads:
filePath = slicer.app.temporaryPath + '/' + name
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
print('Requesting download %s from %s...\n' % (name, url))
urllib.request.urlretrieve(url, filePath)
if loader:
print('Loading %s...\n' % (name,))
loader(filePath)
self.delayDisplay('Finished with download and loading\n')
layoutManager = slicer.app.layoutManager()
threeDWidget = layoutManager.threeDWidget(0)
threeDView = threeDWidget.threeDView()
threeDView.resetFocalPoint()
self.delayDisplay('Model loaded')
###################################################################################################
# Initialize Plane Position #
###################################################################################################
redslice = slicer.util.getNode('vtkMRMLSliceNodeRed')
yellowslice = slicer.util.getNode('vtkMRMLSliceNodeYellow')
greenslice = slicer.util.getNode('vtkMRMLSliceNodeGreen')
# print redslice, yellowslice, greenslice
self.delayDisplay('Planes are displayed!')
#Put planes at specific places
matRed = redslice.GetSliceToRAS()
matRed.SetElement(0,3,0)
matRed.SetElement(1,3,0)
matRed.SetElement(2,3,8)
redslice.SetWidgetVisible(True)
print(matRed)
matYellow = yellowslice.GetSliceToRAS()
matYellow.SetElement(0,3,-3)
matYellow.SetElement(1,3,0)
matYellow.SetElement(2,3,0)
print(matYellow)
yellowslice.SetWidgetVisible(True)
matGreen = greenslice.GetSliceToRAS()
matGreen.SetElement(0,3,0)
matGreen.SetElement(1,3,-9)
matGreen.SetElement(2,3,0)
print(matGreen)
greenslice.SetWidgetVisible(True)
self.delayDisplay('planes are placed!')
logic = EasyClipLogic(slicer.modules.EasyClipWidget)
logic.getCoord()
logic.clipping()
self.delayDisplay('Test passed!')
| DCBIA-OrthoLab/EasyClip-Extension | EasyClip/EasyClip.py | Python | apache-2.0 | 33,447 | [
"VTK"
] | 804e3e9882578295e1fb05fb71a1fc81e62d1160b72093a56dc218ea5a2293e7 |
# run NESTRandConnMat_v2.py 101 1. -18. 1.8 5000.
import nest
import nest.raster_plot
import nest.voltage_trace
import parameters_v1 as pm
import numpy as np
import matplotlib.pylab as pl
import sys
import time
file_name = sys.argv[1]
conmat = np.load("eigens/"+file_name+".dat")
#prefix = file_name+"_"+sys.argv[2]+"_"+sys.argv[3]+"_"+sys.argv[4]+"_"+sys.argv[5]
prefix = file_name
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads':1,'overwrite_files':True,'data_path':'./data/','data_prefix':prefix})
start_time = time.time()
""" PARAMETERS """
#conn_dict = {'rule': 'fixed_indegree', 'indegree': pm.nrns}
exc = float(sys.argv[2])
inh = float(sys.argv[3])
ext = float(sys.argv[4])
p_rate = float(sys.argv[5])
nest.CopyModel("static_synapse","excitatory",{"weight": exc, "delay":1.5})
nest.CopyModel("static_synapse","inh",{"weight": inh,"delay": 1.5})
nest.CopyModel("static_synapse","ext",{"weight": ext, "delay":1.5})
neuron_params = {'V_th':-55.0, 'V_reset': -70.0, 't_ref': 2.0, 'g_L':16.6,'C_m':250.0, 'E_ex': 0.0, 'E_in': -80.0, 'tau_syn_ex':0.2,'tau_syn_in': 2.0,'E_L' : -70.}
nest.SetDefaults("iaf_cond_alpha", neuron_params)
""" NEST CREATIONS """
Nestrons = nest.Create('iaf_cond_alpha',pm.nrns)#,params={'I_e':350.})
spikerec = nest.Create('spike_detector',1)# len(pm.split_lr23)+len(pm.split_lr4)+len(pm.split_lr5))
nest.SetStatus(spikerec,{'to_file':True,'to_memory':False, 'start':100.})
psn = nest.Create('poisson_generator', 1, {'rate':p_rate}) #1150
psn1 = nest.Create('poisson_generator', 1, {'rate': p_rate/5.})
""" choosing random neurons [exc,inh] from each layer """
x=np.zeros(6)
hc,mc = 0,0
x[0],x[1] = pm.choose_EI(pm.layers23,hc,mc)
x[2],x[3] = pm.choose_EI(pm.layers4,hc,mc)
x[4],x[5] = pm.choose_EI(pm.layers5,hc,mc)
vm = nest.Create('voltmeter',len(x))
nest.SetStatus(vm,{'to_file':True,'to_memory':False})
for i in range(len(x)):
nest.SetStatus([Nestrons[int(x[i])]],{'V_th':1000.})
nest.Connect([vm[i]],[Nestrons[int(x[i])]])
""" CONNECTIONS """
nid = np.zeros(pm.nrns)
nid[:] = Nestrons
for j in range(pm.nrns):
post_id = pl.find(np.abs(conmat[:,j]) != 0.) #>0.0001)
post_wt = conmat.T[j][post_id]
post_del = np.ones(np.size(post_wt))*1.
nx = nid[post_id].astype(int).tolist()
if j in pm.exc_nrns_set:
syn_type = 'excitatory'
elif j in pm.inh_nrns_set:
syn_type = 'inh'
nest.Connect([Nestrons[j]],nx,'all_to_all',syn_spec=syn_type)
nest.Connect(psn,Nestrons, syn_spec="ext")
nest.Connect(Nestrons,spikerec)
""" SIMULATION AND PLOTTING """
print "Simulating.."
nest.Simulate(2000.)
print "Done! Now plotting.."
vv = [np.loadtxt('./data/'+prefix+'voltmeter-288'+str(i)+'-0.dat') for i in range(4,10)]
pl.figure(prefix)
pl.title(prefix)
for i in range(len(vv)):
pl.subplot(len(vv),1,i+1)
pl.title("neuron "+str(int(x[i]))+","+pm.n_where(x[i],conmat))
pl.plot(vv[i][:,1], vv[i][:,2])
#pl.plot(vv[len(vv)][:,1],vv[len(vv)][:,2])
#nest.voltage_trace.from_device([vm[i]])
pl.show() | kalfasyan/DA224x | code/NESTRandConnMat_v2.py | Python | gpl-2.0 | 3,002 | [
"NEURON"
] | 85a34f80a09c9143895a2b937ee79d5193ebffc72749c7290e65b74db9b74134 |
""" JobLoggingDB class is a front-end to the Job Logging Database.
The following methods are provided
addLoggingRecord()
getJobLoggingInfo()
deleteJob()
getWMSTimeStamps()
"""
__RCSID__ = "$Id$"
import time
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Base.DB import DB
MAGIC_EPOC_NUMBER = 1270000000
#############################################################################
class JobLoggingDB(DB):
""" Frontend to JobLoggingDB MySQL table
"""
def __init__(self):
""" Standard Constructor
"""
DB.__init__(self, 'JobLoggingDB', 'WorkloadManagement/JobLoggingDB')
#############################################################################
def addLoggingRecord(self,
jobID,
status='idem',
minor='idem',
application='idem',
date='',
source='Unknown'):
""" Add a new entry to the JobLoggingDB table. One, two or all the three status
components (major, minor, application) can be specified.
Optionally the time stamp of the status can
be provided in a form of a string in a format '%Y-%m-%d %H:%M:%S' or
as datetime.datetime object. If the time stamp is not provided the current
UTC time is used.
"""
event = 'status/minor/app=%s/%s/%s' % (status, minor, application)
self.log.info("Adding record for job ", str(jobID) + ": '" + event + "' from " + source)
if not date:
# Make the UTC datetime string and float
_date = Time.dateTime()
epoc = time.mktime(_date.timetuple()) + _date.microsecond / 1000000. - MAGIC_EPOC_NUMBER
time_order = round(epoc, 3)
else:
try:
if isinstance(date, basestring):
# The date is provided as a string in UTC
_date = Time.fromString(date)
epoc = time.mktime(_date.timetuple()) + _date.microsecond / 1000000. - MAGIC_EPOC_NUMBER
time_order = round(epoc, 3)
elif isinstance(date, Time._dateTimeType):
_date = date
epoc = time.mktime(_date.timetuple()) + _date.microsecond / 1000000. - \
MAGIC_EPOC_NUMBER # pylint: disable=no-member
time_order = round(epoc, 3)
else:
self.log.error('Incorrect date for the logging record')
_date = Time.dateTime()
epoc = time.mktime(_date.timetuple()) - MAGIC_EPOC_NUMBER
time_order = round(epoc, 3)
except BaseException:
self.log.exception('Exception while date evaluation')
_date = Time.dateTime()
epoc = time.mktime(_date.timetuple()) - MAGIC_EPOC_NUMBER
time_order = round(epoc, 3)
cmd = "INSERT INTO LoggingInfo (JobId, Status, MinorStatus, ApplicationStatus, " + \
"StatusTime, StatusTimeOrder, StatusSource) VALUES (%d,'%s','%s','%s','%s',%f,'%s')" % \
(int(jobID), status, minor, application[:255],
str(_date), time_order, source)
return self._update(cmd)
#############################################################################
def getJobLoggingInfo(self, jobID):
""" Returns a Status,MinorStatus,ApplicationStatus,StatusTime,StatusSource tuple
for each record found for job specified by its jobID in historical order
"""
cmd = 'SELECT Status,MinorStatus,ApplicationStatus,StatusTime,StatusSource FROM' \
' LoggingInfo WHERE JobId=%d ORDER BY StatusTimeOrder,StatusTime' % int(jobID)
result = self._query(cmd)
if not result['OK']:
return result
if result['OK'] and not result['Value']:
return S_ERROR('No Logging information for job %d' % int(jobID))
return_value = []
status, minor, app = result['Value'][0][:3]
if app == "idem":
app = "Unknown"
for row in result['Value']:
if row[0] != "idem":
status = row[0]
if row[1] != "idem":
minor = row[1]
if row[2] != "idem":
app = row[2]
return_value.append((status, minor, app, str(row[3]), row[4]))
return S_OK(return_value)
#############################################################################
def deleteJob(self, jobID):
""" Delete logging records for given jobs
"""
# Make sure that we have a list of jobs
if isinstance(jobID, (int, long)):
jobList = [str(jobID)]
elif isinstance(jobID, basestring):
jobList = [jobID]
else:
jobList = list(jobID)
jobString = ','.join(jobList)
req = "DELETE FROM LoggingInfo WHERE JobID IN (%s)" % jobString
result = self._update(req)
return result
#############################################################################
def getWMSTimeStamps(self, jobID):
""" Get TimeStamps for job MajorState transitions
return a {State:timestamp} dictionary
"""
self.log.debug('getWMSTimeStamps: Retrieving Timestamps for Job %d' % int(jobID))
result = {}
cmd = 'SELECT Status,StatusTimeOrder FROM LoggingInfo WHERE JobID=%d' % int(jobID)
resCmd = self._query(cmd)
if not resCmd['OK']:
return resCmd
if not resCmd['Value']:
return S_ERROR('No Logging Info for job %d' % int(jobID))
for event, etime in resCmd['Value']:
result[event] = str(etime + MAGIC_EPOC_NUMBER)
# Get last date and time
cmd = 'SELECT MAX(StatusTime) FROM LoggingInfo WHERE JobID=%d' % int(jobID)
resCmd = self._query(cmd)
if not resCmd['OK']:
return resCmd
if resCmd['Value']:
result['LastTime'] = str(resCmd['Value'][0][0])
else:
result['LastTime'] = "Unknown"
return S_OK(result)
| chaen/DIRAC | WorkloadManagementSystem/DB/JobLoggingDB.py | Python | gpl-3.0 | 5,672 | [
"DIRAC"
] | 30bbf2e610926f417e7487c2394b29fd1f358824cfd46aef5fa938b2984fe373 |
"""Unit tests for BioCMA."""
import unittest
from cStringIO import StringIO
from Bio import SeqIO
from biocma import biocma, cma, utils
EX_CMA = "fikk.cma"
EX_FASTA = "fikk-full.fasta"
class IOTests(unittest.TestCase):
"""Tests for parsing and writing the CMA format."""
def test_read(self):
block = cma.read(EX_CMA)
self.assertEqual(len(block['sequences']), 24)
self.assertEqual(block['query_length'], block['sequences'][0]['length'])
def test_parse(self):
blocks = list(cma.parse(EX_CMA))
self.assertEqual(len(blocks), 1)
class BioTests(unittest.TestCase):
"""Tests for the Biopython wrapper."""
def test_read(self):
aln = biocma.read(EX_CMA)
class UtilTests(unittest.TestCase):
"""Tests for utility functions."""
def test_iron(self):
seq = ('-a-a-LLLGQPIFPGDSGVDQLVEIIKVLgtptre---qiremnpnyteFKFPQIK'
'---ahpwtkvfrprtPPEAIALCSRLLEYTPTARLT-----PLEACAHSFF-')
iseq = cma.iron(seq)
self.assertEqual(len(seq.replace('-', '')),
len(iseq.replace('-', '')))
def test_get_inserts(self):
block = cma.read(EX_CMA)
inserts = utils.get_inserts(block)
self.assertEqual(len(inserts), len(block['sequences']))
fullseqs = SeqIO.to_dict(SeqIO.parse(EX_FASTA, 'fasta'))
for sequence, targets in (
(block['sequences'][1], ['n', 't', 'fyklyllkkydsntlfnv']),
(block['sequences'][-1], ['altkl', 'nkl',
'siptvgfskdgdrlqemykasvcsyteecqg',
'ndndgeylldge', 'eh', 'p',
'epecancneedknmsennhkkdskhkgdsnhksdsnhksdsnhksdsnhksgsnhksdcnhksgsnhksdsnhqsdcnhmsdhnhksdnnhksdsshksdsshksdsshksgsnhksdnnhksdsshksgsnhksdhnhksdsnhksdsnhknesnhknesnhknesnhknesnhknesnhkndsnhksdsnhmsdhnhksdnnhksdhnhmsdhnhksdnnhksdnnhmsdhnhksdnnhksdnnhksdnnhksdhnhmsdhnhksdnnhksdhnhksdsnhmsdhnhmsdhnhksdhnhksdhnhksdnnhksdsnhksdsnhksdhnhksdsnhmsdhnhmsdhnhksdhnhksdnnhksdsnhksdsnhksdhnhksdsnhmsdhnhmsdhnhmsdhnhksdhnhksdnnhksdsnhksdsnhksdsnhksdhnhksdhkhmsdnnhksdnnhksdhnhksdnnhksdhnhksdsnhksdsnhksdsnhksdsnhksdnnhksdhnhnsdsnhmsdhnhksdhnhksdhnhksdnnhksdnnhksdhnhksdhkknnnnnkdnknddnddsdasdavhediellesysdlnkfnemlteqln',
'vt', 'edtrv', 'pmythnl', 'g',
'sfqscqpcv', 'iirehiklkidnpfehlstitdqee',
'yfd', 'ra', 'fqlak'])):
full = fullseqs[sequence['id']]
ins_ranges = [str(full.seq)[start-1:end]
for start, end in inserts[sequence['id']]]
print sequence['id'], ins_ranges
self.assertEqual(len(ins_ranges), len(targets))
for ins, tgt in zip(ins_ranges, targets):
self.assertEqual(ins.lower(), tgt)
# ---------------------------------------------------------
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| etal/biocma | test/test_all.py | Python | bsd-2-clause | 3,052 | [
"Biopython"
] | f54f07387718de5cdbf684b122a3084098fd30a8849ddda7682a2a97cd826710 |
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_pkiprofile
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of PKIProfile Avi RESTful Object
description:
- This module is used to configure PKIProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
ca_certs:
description:
- List of certificate authorities (root and intermediate) trusted that is used for certificate validation.
created_by:
description:
- Creator name.
crl_check:
description:
- When enabled, avi will verify via crl checks that certificates in the trust chain have not been revoked.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
crls:
description:
- Certificate revocation lists.
ignore_peer_chain:
description:
- When enabled, avi will not trust intermediate and root certs presented by a client.
- Instead, only the chain certs configured in the certificate authority section will be used to verify trust of the client's cert.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
is_federated:
description:
- This field describes the object's replication scope.
- If the field is set to false, then the object is visible within the controller-cluster and its associated service-engines.
- If the field is set to true, then the object is replicated across the federation.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.4"
type: bool
name:
description:
- Name of the pki profile.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
validate_only_leaf_crl:
description:
- When enabled, avi will only validate the revocation status of the leaf certificate using crl.
- To enable validation for the entire chain, disable this option and provide all the relevant crls.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create PKIProfile object
avi_pkiprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_pkiprofile
"""
RETURN = '''
obj:
description: PKIProfile (api/pkiprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
ca_certs=dict(type='list',),
created_by=dict(type='str',),
crl_check=dict(type='bool',),
crls=dict(type='list',),
ignore_peer_chain=dict(type='bool',),
is_federated=dict(type='bool',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
validate_only_leaf_crl=dict(type='bool',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'pkiprofile',
set([]))
if __name__ == '__main__':
main()
| thaim/ansible | lib/ansible/modules/network/avi/avi_pkiprofile.py | Python | mit | 5,555 | [
"VisIt"
] | 7543ee002a1bbabc0651734c9d36e28663f394602ce978ac55fbed9168f247d4 |
''' Test_RSS_Policy_SpaceTokenOccupancyPolicy
'''
import unittest
import DIRAC.ResourceStatusSystem.Policy.SpaceTokenOccupancyPolicy as moduleTested
################################################################################
class SpaceTokenOccupancyPolicy_TestCase( unittest.TestCase ):
def setUp( self ):
'''
Setup
'''
self.moduleTested = moduleTested
self.testClass = self.moduleTested.SpaceTokenOccupancyPolicy
def tearDown( self ):
'''
Tear down
'''
del self.moduleTested
del self.testClass
################################################################################
class SpaceTokenOccupancyPolicy_Success( SpaceTokenOccupancyPolicy_TestCase ):
def test_instantiate( self ):
''' tests that we can instantiate one object of the tested class
'''
module = self.testClass()
self.assertEqual( 'SpaceTokenOccupancyPolicy', module.__class__.__name__ )
def test_evaluate( self ):
''' tests the method _evaluate
'''
module = self.testClass()
res = module._evaluate( { 'OK' : False, 'Message' : 'Bo!' } )
self.assertTrue(res['OK'])
self.assertEqual( 'Error', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Bo!', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : None } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'No values to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'No values to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{ 'A' : 1 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Error', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Key total missing', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{ 'Total' : 1 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Error', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Key free missing', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{'Total' : 100, 'Free' : 0.0 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Error', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Key guaranteed missing', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{'Total' : 100, 'Free' : 0.0,
'Guaranteed' : 1 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Banned', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Free space < 100GB', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{'Total' : 100, 'Free' : 4.0,
'Guaranteed' : 1 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Degraded', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Free space < 5TB',
res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{'Total' : 100, 'Free' : 100,
'Guaranteed' : 1 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Active', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Free space > 5TB',
res[ 'Value' ][ 'Reason' ] )
################################################################################
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( SpaceTokenOccupancyPolicy_TestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( SpaceTokenOccupancyPolicy_Success ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF | hgiemza/DIRAC | ResourceStatusSystem/Policy/test/Test_RSS_Policy_SpaceTokenOccupancyPolicy.py | Python | gpl-3.0 | 3,959 | [
"DIRAC"
] | daf5b4b114a36650a47c938b88dbb548bde99968f2a771cf4be80d904998657d |
""" HTTP controllers for the HXL Proxy
All of the Flask controllers are in this module.
See unit tests in tests/test_controllers.py
Started January 2015 by David Megginson
License: Public Domain
"""
import hxl_proxy
from hxl.io import HXLIOException
from hxl_proxy import admin, app, auth, cache, caching, dao, exceptions, filters, pcodes, preview, recipes, util, validate
import datetime, flask, hxl, io, json, logging, requests, requests_cache, werkzeug, csv, urllib
logger = logging.getLogger(__name__)
""" Python logger for this module """
SHEET_MAX_NO = 20
########################################################################
# Error handlers
#
# These functions handle exceptions that make it to the top level.
#
# The HXL Proxy uses exceptions for special purposes like redirections
# or authorisation as well as errors.
########################################################################
def handle_default_exception(e):
""" Error handler: display an error page with various HTTP status codes
This handler applies to any exception that doesn't have a more-specific
handler below.
@param e: the exception being handled
"""
if isinstance(e, IOError) or isinstance(e, OSError):
# probably tried to open an inappropriate URL
status = 403
elif isinstance(e, werkzeug.exceptions.HTTPException):
status = e.code
elif isinstance(e, requests.exceptions.HTTPError):
status = 404
else:
status = 500
# Check the global output_format variable to see if it's HTML or JSON/CSV
# Use a JSON error format if not HTML
if flask.g.output_format != 'html':
response = flask.Response(util.make_json_error(e, status), mimetype='application/json', status=status)
# add CORS header
response.headers['Access-Control-Allow-Origin'] = '*'
return response
else:
# Generic HTML error page
return flask.render_template('error.html', e=e, category=type(e)), status
# Register the general error handler UNLESS we're in debug mode
if not app.config.get('DEBUG'):
app.register_error_handler(Exception, handle_default_exception)
def handle_redirect_exception(e):
""" Error handler: catch a redirection exception
Different parts of the code throw exceptions.RedirectException
when they need the HXL Proxy to jump to a different page. This is especially
important for workflow (e.g. if there's no URL, jump to /data/source)
@param e: the exception being handled
"""
if e.message:
flask.flash(e.message)
return flask.redirect(e.target_url, e.http_code)
# Register the redirect exception handler
app.register_error_handler(exceptions.RedirectException, handle_redirect_exception)
def handle_source_authorization_exception(e):
""" Error handler: the data source requires authorisation
This will be triggered when opening a private HDX dataset before
the user has supplied their authorisation token.
@param e: the exception being handled
"""
if e.message:
flask.flash(e.message)
# we're using flask.g.recipe_id to handle the case where a saved recipe
# points to a formerly-public dataset that has suddenly become private
# normally, it will be None (because there's no saved recipe yet)
recipe = recipes.Recipe(recipe_id=flask.g.recipe_id)
# add an extra parameter for the /data/save form to indicate that we
# want the user to provide an authorisation token
extras = {
'need_token': 'on'
}
# note whether the resource looked like it came from HDX
if e.is_ckan:
extras['is_ckan'] = 'on'
# redirect to the /data/save page to ask the user for a token
return flask.redirect(util.data_url_for('data_save', recipe=recipe, extras=extras), 302)
# register the source authorisation handler
app.register_error_handler(hxl.io.HXLAuthorizationException, handle_source_authorization_exception)
def handle_password_required_exception(e):
""" Error handler: the HXL Proxy saved recipe requires a password login
Note that this handler triggers on a recipe basis, not a user basis; each
each saved recipe can potentially have a different password.
@param e: the exception being handled
"""
flask.flash("Login required")
if flask.g.recipe_id:
destination = flask.request.path
args = dict(flask.request.args)
if args:
destination += "?" + urllib.parse.urlencode(args)
return flask.redirect(util.data_url_for('data_login', recipe_id=flask.g.recipe_id, extras={"from": destination}), 303)
else:
raise Exception("Internal error: login but no saved recipe")
# register the password required handler
app.register_error_handler(werkzeug.exceptions.Unauthorized, handle_password_required_exception)
def handle_ssl_certificate_error(e):
""" Error handler: SSL certificate error
Give the user an option to disable SSL certificate verification by redirecting to the /data/source page.
@param e: the exception being handled
"""
if flask.g.output_format == "html":
flask.flash("SSL error. If you understand the risks, you can check \"Don't verify SSL certificates\" to continue.")
return flask.redirect(util.data_url_for('data_source', recipe=recipes.Recipe()), 302)
else:
response = flask.Response(util.make_json_error(e, 400), mimetype='application/json', status=400)
# add CORS header
response.headers['Access-Control-Allow-Origin'] = '*'
return response
# register the SSL certificate verification handler
app.register_error_handler(requests.exceptions.SSLError, handle_ssl_certificate_error)
########################################################################
# Global pre-/post-controller functions
########################################################################
@app.before_request
def before_request():
"""Code to run immediately before the request"""
# grab the secret key
app.secret_key = app.config['SECRET_KEY']
# choose the parameter storage class before parsing the GET parameters
flask.request.parameter_storage_class = werkzeug.datastructures.ImmutableOrderedMultiDict
# grab the member error for Humanitarian.ID (not currently used)
flask.g.member = flask.session.get('member_info')
# select the default output format (controllers may change it)
flask.g.output_format='html'
########################################################################
# Top-level page controllers
########################################################################
# has tests
@app.route("/about.html")
def about():
""" Flask controller: show the about page
Includes version information for major packages, so that
we can tell easily what's deployed.
"""
# include version information for these packages
releases = {
'hxl-proxy': hxl_proxy.__version__,
'libhxl': hxl.__version__,
'flask': flask.__version__,
'requests': requests.__version__
}
# draw the web page
return flask.render_template('about.html', releases=releases)
########################################################################
# /data GET controllers
########################################################################
# has tests
@app.route("/data/<recipe_id>/login")
def data_login(recipe_id):
""" Flask controller: log in to work on a saved recipe
The user will end up here only if they tried to alter a saved
recipe. They will have to enter the recipe's password to
continue.
@param recipe_id: the hash for a saved recipe (or None if working from the command line)
"""
flask.g.recipe_id = recipe_id # for error handling
recipe = recipes.Recipe(recipe_id)
destination = flask.request.args.get('from')
if not destination:
destination = util.data_url_for('data_edit', recipe)
return flask.render_template('data-login.html', recipe=recipe, destination=destination)
# has tests
@app.route("/data/source")
@app.route("/data/<recipe_id>/source")
def data_source(recipe_id=None):
""" Flask controller: choose a new source URL
@param recipe_id: the hash for a saved recipe (or None if working from the command line)
"""
flask.g.recipe_id = recipe_id # for error handling
recipe = recipes.Recipe(recipe_id, auth=True)
return flask.render_template('data-source.html', recipe=recipe)
# has tests
@app.route("/data/tagger")
@app.route("/data/<recipe_id>/tagger")
def data_tagger(recipe_id=None):
""" Flask controller: add HXL tags to an untagged dataset
The template will render differently depending on whether the user has selected the
last row of text headers yet (&header_row), so this is actually two different workflow
steps.
@param recipe_id: the hash for a saved recipe (or None if working from the command line)
"""
flask.g.recipe_id = recipe_id # for error handling
# Build the recipe from the GET params and/or the database
recipe = recipes.Recipe(recipe_id, auth=True)
# Workflow: if there's no source URL, redirect the user to /data/source
if not recipe.url:
logger.info("No URL supplied for /data/tagger; redirecting to /data/source")
flask.flash('Please choose a data source first.')
return flask.redirect(util.data_url_for('data_source', recipe), 303)
# We have to collect the following properties manually, because we don't have a complete
# HXLated recipe to open yet
header_row = recipe.args.get('header-row')
if header_row is not None:
header_row = int(header_row)
try:
sheet_index = int(recipe.args.get('sheet', 0))
except:
logger.info("Assuming sheet 0, since none specified")
sheet_index = 0
selector = recipe.args.get('selector', None)
# Set up a 25-row raw-data preview, using make_input from libhxl-python
preview = []
i = 0
http_headers = {
'User-Agent': 'hxl-proxy/tagger'
}
if 'authorization_token' in recipe.args: # private dataset
http_headers['Authorization'] = recipe.args['authorization_token']
for row in hxl.io.make_input(
recipe.url,
sheet_index=sheet_index,
selector=selector,
verify_ssl=util.check_verify_ssl(recipe.args),
http_headers=http_headers
):
# Stop if we get to 25 rows
if i >= 25:
break
else:
i = i + 1
if row:
preview.append(row)
if header_row is not None:
mappings = util.clean_tagger_mappings(preview[header_row-1], recipe)
mappings += [["", ""]] # room for an extra header in the form
else:
mappings = []
# Draw the web page
return flask.render_template('data-tagger.html', recipe=recipe, preview=preview, header_row=header_row, mappings=mappings)
# has tests
@app.route("/data/edit")
@app.route("/data/<recipe_id>/edit", methods=['GET', 'POST'])
def data_edit(recipe_id=None):
"""Flask controller: create or edit a filter pipeline.
Output for this page is never cached, but input may be.
@param recipe_id: the hash for a saved recipe (or None if working from the command line)
"""
flask.g.recipe_id = recipe_id # for error handling
# Build the recipe from the GET params and/or the database
recipe = recipes.Recipe(recipe_id, auth=True)
# Workflow: if there's no source URL, redirect the user to /data/source
if not recipe.url:
flask.flash('Please choose a data source first.')
logger.info("No URL supplied for /data/edit; redirecting to /data/source")
return flask.redirect(util.data_url_for('data_source', recipe), 303)
# show only a short preview
max_rows = recipe.args.get('max-rows')
max_rows = min(int(max_rows), 25) if max_rows is not None else 25
# check whether we're stripping headers
show_headers = (recipe.args.get('strip-headers') != 'on')
# Special handling: if the user has introduced an error in the filters,
# catch it so that they have an opportunity to change the filters and try to
# fix it.
error = None
try:
source = preview.PreviewFilter(filters.setup_filters(recipe), max_rows=max_rows)
source.columns
except exceptions.RedirectException as e1:
# always pass through a redirect exception
raise e1
except hxl.io.HXLAuthorizationException as e2:
# always pass through an authorization exception
raise e2
except Exception as e3:
logger.exception(e3)
error = e3
source = None
# Figure out how many filter forms to show
filter_count = 0
for n in range(1, filters.MAX_FILTER_COUNT):
if recipe.args.get('filter%02d' % n):
filter_count = n
if filter_count < filters.MAX_FILTER_COUNT:
filter_count += 1
# Draw the web page
return flask.render_template(
'data-recipe.html',
recipe=recipe,
source=source,
error=error,
show_headers=show_headers,
filter_count=filter_count
)
# has tests
@app.route("/data/save")
@app.route("/data/<recipe_id>/save")
def data_save(recipe_id=None):
""" Flask controller: create or update a saved dataset (with a short URL)
The user will get redirected here automatically if they attempt to open a private
dataset on HDX (or anywhere else that requires an "Authorization:" HTTP header.
The controller creates a form that submits the recipe information to be saved
in the database and identified with a short hash.
@param recipe_id: the hash for a saved recipe (or None if working from the command line)
"""
flask.g.recipe_id = recipe_id # for error handling
# Build the recipe from the GET params and/or the database
recipe = recipes.Recipe(recipe_id, auth=True)
# Workflow: if there's no source URL, redirect the user to /data/source
if not recipe.url:
flask.flash('Please choose a data source first.')
logger.info("No URL supplied for /data/save; redirecting to /data/source")
return flask.redirect(util.data_url_for('data_source', recipe), 303)
# Grab controller-specific properties for the template
need_token = flask.request.args.get('need_token') # we need an authentication token
is_ckan = flask.request.args.get('is_ckan') # the source looks like CKAN
# Draw the web page
return flask.render_template('data-save.html', recipe=recipe, need_token=need_token, is_ckan=is_ckan)
# has tests
@app.route("/data/validate")
@app.route("/data/validate.<format>")
@app.route("/data/<recipe_id>/validate")
@app.route("/data/<recipe_id>/validate.<format>")
def data_validate(recipe_id=None, format='html'):
""" Flask controller: validate a HXL dataset and show the results
Output options include a web-based HTML dashboard or JSON.
Output for this page is never cached, but input may be.
@param recipe_id: the hash for a saved recipe (or None if working from the command line)
@param format: the selected output format (json or html)
"""
# Set global variables
flask.g.recipe_id = recipe_id # for error handling
flask.g.output_format = format # requested output format
# Get the recipe
recipe = recipes.Recipe(recipe_id)
# Workflow: if there's no source URL, redirect the user to /data/source
if not recipe.url:
flask.flash('Please choose a data source first.')
logger.info("No URL supplied for /data/validate; redirecting to /data/source")
return flask.redirect(util.data_url_for('data_source', recipe), 303)
# Set up the HXL validation schema
schema_source = None
if recipe.schema_url:
schema_source = hxl.data(
recipe.schema_url,
verify_ssl=util.check_verify_ssl(recipe.args),
http_headers={'User-Agent': 'hxl-proxy/validation'}
)
logger.info("Using HXL validation schema at %s", recipe.schema_url)
else:
logger.info("No HXL validation schema specified; using default schema")
# Run the validation and get a JSON report from libhxl-python
error_report = hxl.validate(
filters.setup_filters(recipe),
schema_source
)
# Render the validation results in JSON
if format == 'json':
return flask.Response(
json.dumps(error_report, indent=4),
mimetype="application/json"
)
# Render the validation results in HTML
else:
# issue to highlight (HTML only)
template_name = 'validate-summary.html'
selected_issue = None
# Special GET parameters for controlling validation
severity_level = flask.request.args.get('severity', 'info')
detail_hash = flask.request.args.get('details', None)
# if there's a detail_hash, show just that detail in the report
if detail_hash:
logger.info("Showing validation-report detail")
template_name = 'validate-issue.html'
for issue in error_report['issues']:
if issue['rule_id'] == detail_hash:
selected_issue = issue
break
# draw the web page
return flask.render_template(
template_name,
recipe=recipe,
schema_url=recipe.schema_url,
error_report=error_report,
issue=selected_issue,
severity=severity_level
)
# has tests
@app.route("/data/advanced")
@app.route("/data/<recipe_id>/advanced")
def show_advanced(recipe_id=None):
""" Flask controller: developer page for entering a JSON recipe directly
This page isn't linked from the HXML Proxy validation, but it's a convenient
place to experiment with creating JSON-encoded recipes, as described at
https://github.com/HXLStandard/hxl-proxy/wiki/JSON-recipes
"""
recipe = recipes.Recipe(recipe_id)
return flask.render_template("data-advanced.html", recipe=recipe)
# no tests
@app.route("/data/logs")
@app.route("/data/<recipe_id>/logs")
def data_logs(recipe_id=None):
""" Flask controller: show logs for a recipe
"""
level = flask.request.args.get('level', 'WARNING').upper()
recipe = recipes.Recipe(recipe_id)
return flask.render_template("data-logs.html", recipe=recipe, level=level, in_logger=True)
# has tests
@app.route("/data")
@app.route("/data.<flavour>.<format>")
@app.route("/data.<format>")
@app.route("/data/download/<stub>.<flavour>.<format>")
@app.route("/data/download/<stub>.<format>")
@app.route("/data/<recipe_id>.<flavour>.<format>")
@app.route("/data/<recipe_id>.<format>")
@app.route("/data/<recipe_id>/download/<stub>.<flavour>.<format>")
@app.route("/data/<recipe_id>/download/<stub>.<format>")
@app.route("/data/<recipe_id>") # must come last, or it will steal earlier patterns
@cache.cached(key_prefix=util.make_cache_key, unless=util.skip_cache_p)
def data_view(recipe_id=None, format="html", stub=None, flavour=None):
""" Flask controller: render a transformed dataset
This is the controller that requests will hit most of the time.
It renders a transformed dataset as an HTML web page, a JSON
list of lists, or a JSON list of objects, based on the URL, Note that
the URL patterns above allow for custom-named download files
as well as generic downloads, hence the wide variety of patterns.
This controller MUST come after all the other /data controllers, or
else Flask will get confused.
This is a tricky controller to understand, for a few reasons:
1. It can render output in several different formats
2. It optionally caches the output
3. It includes a CORS HTTP header
4. Most of the work happens inside a nested function, to simplify caching
5. It may specify a download file name, based on the stub property
Grab a cup of tea, and work your way through the code slowly. :)
@param recipe_id: the hash for a saved recipe (or None if working from the command line)
@param format: the selected output format (json or html)
@param stub: the root filename for download, if supplied
@param flavour: the JSON flavour, if supplied (will be "objects")
"""
flask.g.recipe_id = recipe_id # for error handling
# Use an internal function to generate the output.
# That simplifies the control flow, so that we can easily
# capture the output regardless of chosen format, and
# cache it as needed. Most of the controller's code
# is inside this function.
def get_result ():
""" Internal output-generation function.
@returns: a Python generator to produce the input incrementally
"""
flask.g.output_format = format
# Set up the data source from the recipe
recipe = recipes.Recipe(recipe_id, auth=False)
# Workflow: if there's no source URL, redirect the user to /data/source
if not recipe.url:
flask.flash('Please choose a data source first.')
return flask.redirect(util.data_url_for('data_source', recipe), 303)
# Use input caching if requested
if util.skip_cache_p():
source = filters.setup_filters(recipe)
else:
with caching.input():
source = filters.setup_filters(recipe)
# Parameters controlling the output
show_headers = (recipe.args.get('strip-headers') != 'on')
max_rows = recipe.args.get('max-rows', None)
# Return a generator based on the format requested
# Render a web page
if format == 'html':
# cap output at 5,000 rows for HTML
max_rows = min(int(max_rows), 5000) if max_rows is not None else 5000
return flask.render_template(
'data-view.html',
source=preview.PreviewFilter(source, max_rows=max_rows),
recipe=recipe,
show_headers=show_headers
)
# Data formats from here on ...
# Limit the number of output rows *only* if requested
if max_rows is not None:
source = preview.PreviewFilter(source, max_rows=int(max_rows))
# Render JSON output (list of lists or list of objects)
if format == 'json':
response = flask.Response(
list(
source.gen_json(show_headers=show_headers, use_objects=(flavour=='objects'))
),
mimetype='application/json'
)
# Render CSV output
else:
response = flask.Response(list(source.gen_csv(show_headers=show_headers)), mimetype='text/csv')
# Include a CORS header for cross-origin data access
response.headers['Access-Control-Allow-Origin'] = '*'
# Set the file download name if &stub is present
if recipe.stub:
response.headers['Content-Disposition'] = 'attachment; filename={}.{}'.format(recipe.stub, format)
# Return the response object
return response
# end of internal function
# Get the result and update the cache manually if we're skipping caching.
result = get_result()
# Update the cache even if caching is turned off
# (this might not be working yet)
if util.skip_cache_p():
cache.set(util.make_cache_key(), result)
# return the response object that will render the output
return result
#########################################################################
# Primary action POST controllers
# These are URLs that are not bookmarkable.
########################################################################
# needs tests
@app.route("/actions/login", methods=['POST'])
def do_data_login():
""" Flask controller: log the user in for a specific dataset.
Note that this is NOT a user login; it's a dataset login. That
means that the user will have to re-login if they start working
on a dataset that has a different password.
POST parameters:
from - the origin URL (return there after login)
password - the clear-text password
"""
# Note origin page
destination = flask.request.form.get('from')
if not destination:
destination = util.data_url_for('data_view')
# Just save the password hash in a cookie, but don't do anything with it
password = flask.request.form.get('password')
flask.session['passhash'] = util.make_md5(password)
# Try opening the original page again, with password hash token in the cookie.
return flask.redirect(destination, 303)
# needs tests
@app.route("/actions/save-recipe", methods=['POST'])
def do_data_save():
""" Flask controller: create or update a saved recipe
The saved recipe has all of its parameters in the database, and is
identified by a short hash. The user needs to supply a password
to edit it.
Post parameters:
recipe_id - the short hash identifying the recipe (blank to create a new one)
name - the recipe' title (optional)
description - the recipe's long description (optional)
cloneable - a flag indicating whether a user may clone the saved recipe (optional; defaults to "on")
stub - a root filename for downloading (optional)
password - a clear-text password (optional for a new saved recipe)
password_repeat - repeated clear-text password (optional for a new saved recipe)
(Will also include all of the other HXL Proxy recipe arguments as hidden parameters)
"""
# FIXME - move somewhere else
RECIPE_ARG_EXCLUDES = [
'cloneable',
'description',
'dest',
'details'
'name',
'passhash',
'password',
'password-repeat',
'recipe_id',
'severity',
'stub',
]
"""Properties that should never appear in a recipe's args dictionary"""
# We will have a recipe_id if we're updating an existing pipeline
recipe_id = flask.request.form.get('recipe_id')
flask.g.recipe_id = recipe_id # for error handling
recipe = recipes.Recipe(recipe_id, auth=True, request_args=flask.request.form)
destination_facet = flask.request.form.get('dest', 'data_view')
# Update recipe metadata
# Note that an empty/unchecked value will be omitted from the form
if 'name' in flask.request.form:
recipe.name = flask.request.form['name']
if 'description' in flask.request.form:
recipe.description = flask.request.form['description']
else:
recipe.description = ''
if 'cloneable' in flask.request.form and not flask.request.form.get('authorization_token') and flask.request.form['cloneable'] == "on":
recipe.cloneable = True
else:
recipe.cloneable = False
if 'stub' in flask.request.form:
recipe.stub = flask.request.form['stub']
else:
recipe.stub = ''
# Merge changed values
recipe.args = {}
for name in flask.request.form:
if name not in RECIPE_ARG_EXCLUDES:
recipe.args[name] = flask.request.form.get(name)
# Check for a password change
password = flask.request.form.get('password')
password_repeat = flask.request.form.get('password-repeat')
# Updating an existing recipe.
if recipe_id:
if password:
if password == password_repeat:
recipe.passhash = util.make_md5(password)
flask.session['passhash'] = recipe.passhash
else:
raise werkzeug.exceptions.BadRequest("Passwords don't match")
dao.recipes.update(recipe.toDict())
# Creating a new recipe.
else:
if password == password_repeat:
recipe.passhash = util.make_md5(password)
flask.session['passhash'] = recipe.passhash
else:
raise werkzeug.exceptions.BadRequest("Passwords don't match")
recipe_id = dao.make_recipe_id()
recipe.recipe_id = recipe_id
dao.recipes.create(recipe.toDict()) # FIXME move save functionality to Recipe class
# FIXME other auth information is in __init__.py
flask.session['passhash'] = recipe.passhash
# Clear the entire HXL Proxy cache to avoid stale data (!!!)
# TODO: be more targeted here
cache.clear()
# Redirect to the /data view page
return flask.redirect(util.data_url_for(destination_facet, recipe), 303)
# has tests
@app.route("/actions/validate", methods=['POST'])
def do_data_validate():
""" Flask controler: validate an uploaded file against an uploaded HXL schema
This controller was created for HDX Data Check, which is the only known user.
The controller returns a JSON validation report from libhxl-python.
Post parameters:
url - the URL of the data to validate (required unless "content" is specified)
content - a file attachment with the HXL content (required unless "url" is specified)
sheet_index - the 0-based index of the tab in a dataset Excel sheet
selector - the top-level key for a JSON dataset
schema_url - the URL of the HXL schema to use (optional; exclusive with "schema_content")
schema_content - a file attachment with the HXL schema to use (optional; exclusive with "schema_url")
schema_sheet_index - the 0-based index of the tab in a schema Excel sheet
include_dataset - if specified, include the original dataset in the JSON validation result
"""
flask.g.output_format = 'json' # for error reporting
# dataset-related POST parameters
url = flask.request.form.get('url')
content = flask.request.files.get('content')
content_hash = None
if content is not None:
# need a hash of the content for caching
content_hash = util.make_file_hash(content)
sheet_index = flask.request.form.get('sheet', None)
if sheet_index is not None:
try:
sheet_index = int(sheet_index)
except:
logger.warning("Bad sheet index: %s", flask.request.form.get('sheet'))
sheet_index = None
selector = flask.request.form.get('selector', None)
# schema-related POST parameters
schema_url = flask.request.form.get('schema_url')
schema_content = flask.request.files.get('schema_content')
schema_content_hash = None
if schema_content is not None:
# need a hash of the schema content for caching
schema_content_hash = util.make_file_hash(schema_content)
schema_sheet_index = flask.request.form.get('schema_sheet', None)
if schema_sheet_index is not None:
try:
schema_sheet_index = int(schema_sheet_index)
except:
logger.warning("Bad schema_sheet index: %s", flask.request.form.get('schema_sheet'))
schema_sheet_index = None
# general POST parameters
include_dataset = flask.request.form.get('include_dataset', False)
# run the validation and save a report
# caching happens in the util.run_validation() function
report = validate.run_validation(
url, content, content_hash, sheet_index, selector,
schema_url, schema_content, schema_content_hash, schema_sheet_index,
include_dataset
)
# return a JSON version of the validation report as an HTTP response
# (make sure we have a CORS header)
response = flask.Response(
json.dumps(
report,
indent=4
),
mimetype='application/json'
)
# add the CORS header for cross-origin compatibility
response.headers['Access-Control-Allow-Origin'] = '*'
# render the JSON response
return response
# needs tests
# NOTE: This is an experiment that's probably not used anywhere right now
# We may choose to remove it
@app.route('/actions/json-spec', methods=['POST'])
def do_json_recipe():
""" POST handler to execute a JSON recipe
This POST endpoint allows the user to upload a JSON HXL recipe
and execute it. The endpoint does NOT currently allow uploading
a file (but we should add that, to support private datasets).
POST parameters:
recipe - a file upload containing a JSON recipe
format - "csv" or "json"
show_headers - if specified, include text headers in the output
use_objects - if specified, use JSON list of objects format
stub - root filename for downloads
Information on JSON recipes is available at
https://github.com/HXLStandard/hxl-proxy/wiki/JSON-recipes
"""
# Get the JSON recipe as a file attachment
json_recipe_file = flask.request.files.get('recipe', None)
if json_recipe_file is None:
raise werkzeug.exceptions.BadRequest("Parameter 'recipe' is required")
json_recipe = json.load(json_recipe_file.stream)
# Other parameters
format = flask.request.form.get('format', 'csv')
show_headers = False if flask.request.form.get('show_headers', None) is None else True
use_objects = False if flask.request.form.get('use_objects', None) is None else True
stub = flask.request.form.get('stub', 'data')
# Set global output format
flask.g.output_format = 'format'
# Create a HXL filter chain by parsing the JSON recipe
source = hxl.io.from_spec(json_recipe)
# Create a JSON or CSV response object, as requested
if format == 'json':
response = flask.Response(
source.gen_json(show_headers=show_headers, use_objects=use_objects),
mimetype='application/json'
)
else:
response = flask.Response(
source.gen_csv(show_headers=show_headers),
mimetype='text/csv'
)
# Add the CORS header for cross-origin compatibility
response.headers['Access-Control-Allow-Origin'] = '*'
# If a stub is specified, use it to define a download filename
if stub:
response.headers['Content-Disposition'] = 'attachment; filename={}.{}'.format(stub, format)
# Render the output
return response
########################################################################
# Humanitarian.ID controllers
# (not in active use as of 2019-04)
########################################################################
@app.route('/login')
def hid_login():
""" Flask controller: display login page for Humanitarian.ID
This is distinct from the /data/login page, which accepts a password
for a DATASET rather than a USER.
In the future, we can associate saved datasets with users, so they can
manage them without individual dataset passwords.
GET parameters:
from - the URL of the page from which we were redirected
"""
# set the from path in a session cookie for later use
flask.session['login_redirect'] = flask.request.args.get('from', '/')
# redirect to Humanitarian.ID for the actual login form
return flask.redirect(auth.get_hid_login_url(), 303)
@app.route('/logout')
def hid_logout():
""" Flask controller: kill the user's login session with Humanitarian.ID
GET parameters:
from - the URL of the page from which we were redirected
"""
path = flask.request.args.get('from', '/') # original path where user choose to log out
flask.session.clear() # clear the login cookie
flask.flash("Disconnected from your Humanitarian.ID account (browsing anonymously).")
return flask.redirect(path, 303)
# not currently in use (until we reactivate H.ID support)
@app.route('/settings/user')
def user_settings():
""" Flask controller: show the user's settings from Humanitarian.ID
"""
if flask.g.member:
return flask.render_template('settings-user.html', member=flask.g.member)
else:
# redirect back to the settings page after login
# ('from' is reserved, so we need a bit of a workaround)
args = { 'from': util.data_url_for('user_settings') }
return flask.redirect(url_for('login', **args), 303)
@app.route('/oauth/authorized2/1')
def do_hid_authorisation():
"""Flask controller: accept an OAuth2 token after successful login via Humanitarian.ID
GET parameters:
code - the OAuth2 token
state - the state that we originally passed to Humanitarian.ID (for verification)
"""
# grab the token
code = flask.request.args.get('code')
# grab the state and check if it's the same as the one we saved in a session cookie
state = flask.request.args.get('state')
if state != flask.session.get('state'):
raise Exception("Security violation: inconsistent state returned from humanitarian.id login request")
else:
# if OK, clear the session cookie
flask.session['state'] = None
# Look up extra info from Humanitarian.ID
user_info = auth.get_hid_user(code) # look up user info from Humanitarian.ID
flask.session['member_info'] = user_info
flask.flash("Connected to your Humanitarian.ID account as {}".format(user_info.get('name')))
# Try to bring the user back where s/he started.
redirect_path = flask.session.get('login_redirect', '/')
del flask.session['login_redirect']
return flask.redirect(redirect_path, 303)
########################################################################
# /admin controllers
########################################################################
# needs tests
@app.route("/admin/login")
def admin_login():
""" Log in to use admin functions """
return flask.render_template('admin-login.html')
# needs tests
@app.route("/admin/recipes/<recipe_id>/")
def admin_recipe_view(recipe_id):
""" View a specific recipe """
admin.admin_auth()
recipe = recipes.Recipe(recipe_id, auth=False)
if 'authorization_token' in recipe.args:
clone_url = None
else:
clone_url = util.data_url_for('data_view', recipe, cloned=True)
return flask.render_template('admin-recipe-view.html', recipe=recipe, clone_url=clone_url)
# needs tests
@app.route("/admin/recipes/<recipe_id>/edit.html")
def admin_recipe_edit(recipe_id):
""" Edit a saved recipe """
admin.admin_auth()
recipe = recipes.Recipe(recipe_id, auth=False)
args = json.dumps(recipe.args, indent=4)
return flask.render_template('admin-recipe-edit.html', recipe=recipe, args=args)
# needs tests
@app.route("/admin/recipes/<recipe_id>/delete.html")
def admin_recipe_delete(recipe_id):
""" Delete a saved recipe """
admin.admin_auth()
recipe = recipes.Recipe(recipe_id, auth=False)
return flask.render_template('admin-recipe-delete.html', recipe=recipe)
# needs tests
@app.route("/admin/recipes/")
def admin_recipe_list():
""" List all saved recipes """
admin.admin_auth()
recipes = admin.admin_get_recipes()
return flask.render_template('admin-recipe-list.html', recipes=recipes)
# needs tests
@app.route("/admin/")
def admin_root():
""" Root of admin pages """
admin.admin_auth()
return flask.render_template('admin-root.html')
# needs tests
@app.route("/admin/actions/login", methods=['POST'])
def do_admin_login():
""" POST controller for an admin login """
password = flask.request.form.get('password')
admin.do_admin_login(password)
flask.flash("Logged in as admin")
return flask.redirect('/admin/', 303)
# needs tests
@app.route("/admin/actions/logout", methods=['POST'])
def do_admin_logout():
""" POST controller for an admin logout """
admin.admin_auth()
admin.do_admin_logout()
flask.flash("Logged out of admin functions")
return flask.redirect('/data/source', 303)
# needs tests
@app.route("/admin/actions/update-recipe", methods=['POST'])
def do_admin_update_recipe():
admin.admin_auth()
recipe_id = flask.request.form.get('recipe_id')
admin.do_admin_update_recipe(dict(flask.request.form))
flask.flash("Updated recipe {}".format(recipe_id))
return flask.redirect('/admin/recipes/{}/'.format(recipe_id), 303)
# needs tests
@app.route("/admin/actions/delete-recipe", methods=['POST'])
def do_admin_delete_recipe():
admin.admin_auth()
recipe_id = flask.request.form.get('recipe_id')
admin.do_admin_delete_recipe(recipe_id)
flask.flash("Deleted recipe {}".format(recipe_id))
return flask.redirect('/admin/recipes/'.format(recipe_id), 303)
########################################################################
# Controllers for extra API calls
#
# Migrating to /api (gradually)
#
# None of this is core to the Proxy's function, but this is a convenient
# place to keep it.
########################################################################
@app.route("/api/from-spec.<format>")
def from_spec(format="json"):
""" Use a JSON HXL spec
Not cached
"""
# allow format override
if format != "html":
format = flask.request.args.get("format", format)
flask.g.output_format = format
# other args
verify_ssl = util.check_verify_ssl(flask.request.args)
http_headers = {
'User-Agent': 'hxl-proxy/download'
}
filename = flask.request.args.get('filename')
force = flask.request.args.get("force")
# check arg logic
spec_url = flask.request.args.get("spec-url")
spec_json = flask.request.args.get("spec-json")
spec = None
if format == "html":
return flask.render_template(
'api-from-spec.html',
spec_json=spec_json,
spec_url=spec_url,
verify_ssl=verify_ssl,
filename=filename,
force=force
)
elif spec_url and spec_json:
raise ValueError("Must specify only one of &spec-url or &spec-json")
elif spec_url:
spec_response = requests.get(spec_url, verify=verify_ssl, headers=http_headers)
spec_response.raise_for_status()
spec = spec_response.json()
elif spec_json:
spec = json.loads(spec_json)
else:
raise ValueError("Either &spec-url or &spec-json required")
# process the JSON spec
source = hxl.io.from_spec(spec)
# produce appropriate output
if format == "json":
response = flask.Response(
source.gen_json(
show_headers=spec.get("show_headers", True),
show_tags=spec.get("show_tags", True),
use_objects=False
),
mimetype="application/json"
)
elif format == "objects.json":
response = flask.Response(
source.gen_json(
show_headers=spec.get("show_headers", True),
show_tags=spec.get("show_tags", True),
use_objects=True
),
mimetype="application/json"
)
elif format == "csv":
response = flask.Response(
source.gen_csv(
show_headers=spec.get("show_headers", True),
show_tags=spec.get("show_tags", True)
),
mimetype="text/csv"
)
else:
raise ValueError("Unsupported output format {}".format(format))
# Add CORS header and return
response.headers['Access-Control-Allow-Origin'] = '*'
# Set the file download name if &filename is present
if filename:
response.headers['Content-Disposition'] = 'attachment; filename={}'.format(filename)
return response
# needs tests
@app.route("/api/hxl-test.<format>")
@app.route("/api/hxl-test")
@app.route("/hxl-test.<format>") # legacy path
@app.route("/hxl-test") # legacy path
def hxl_test(format='html'):
""" Flask controller: test if a resource is HXL hashtagged
GET parameters:
url - the URL of the resource th check
@param format: the format for rendering the result.
"""
flask.g.output_format = format # save the data format for error reporting
# get the URL
url = flask.request.args.get('url')
if not url and (format != 'html'):
# if it's a web page, show a form; otherwise, throw an error
raise ValueError("&url parameter required")
# start the result status report
result = {
'status': False,
'url': url
}
# internal function: serialise an exception for inclusion in the report
def record_exception(e):
result['exception'] = e.__class__.__name__
result['args'] = [str(arg) for arg in e.args]
# if there's a URL, test the resource
if url:
try:
# we grab the columns to force lazy parsing
hxl.data(
url,
verify_ssl=util.check_verify_ssl(flask.request.args),
http_headers={'User-Agent': 'hxl-proxy/test'}
).columns
# if we get to here, it's OK
result['status'] = True
result['message'] = 'Dataset has HXL hashtags'
except IOError as e1:
# can't open resource to check it
result['message'] = 'Cannot load dataset'
record_exception(e1)
except hxl.io.HXLTagsNotFoundException as e2:
# not hashtagged
result['message'] = 'Dataset does not have HXL hashtags'
record_exception(e2)
except BaseException as e3:
# something else went wrong
result['message'] = 'Undefined error'
record_exception(e3)
else:
# no URL, so no result
result = None
if format == 'json':
# render a JSON result
return flask.Response(json.dumps(result), mimetype='application/json')
else:
# render an HTML page
return flask.render_template('hxl-test.html', result=result)
# has tests
@app.route('/api/data-preview.<format>')
#@cache.cached(key_prefix=util.make_cache_key, unless=util.skip_cache_p) # can't cache generator output
def data_preview (format="json"):
""" Return a raw-data preview of any data source supported by the HXL Proxy
Does not attempt HXL processing.
"""
def json_generator ():
""" Generate JSON output, row by row """
counter = 0
yield '['
for row in input:
if rows > 0 and counter >= rows:
break
if counter == 0:
line = "\n "
else:
line = ",\n "
counter += 1
line += json.dumps(row)
yield line
yield "\n]"
def json_object_generator ():
""" Generate JSON object-style output, row by row """
counter = 0
headers = None
yield '['
for row in input:
if headers is None:
headers = row
continue
if rows > 0 and counter >= rows:
break
if counter == 0:
line = "\n "
else:
line = ",\n "
counter += 1
object = {}
for i, header in enumerate(headers):
if header and i < len(row):
object[header] = row[i]
line += json.dumps(object)
yield line
yield "\n]"
def csv_generator ():
""" Generate CSV output, row by row """
counter = 0
for row in input:
if rows > 0 and counter >= rows:
break
counter += 1
output = io.StringIO()
csv.writer(output).writerow(row)
s = output.getvalue()
output.close()
yield s
# allow overriding the format in a parameter (useful for forms)
if "format" in flask.request.args and format != "html":
format = flask.request.args.get("format")
flask.g.output_format = format # for error reporting
# params
url = flask.request.args.get('url')
sheet = flask.request.args.get('sheet')
if sheet is not None:
sheet = int(sheet)
rows = flask.request.args.get('rows')
if rows is not None:
rows = int(rows)
force = flask.request.args.get('force')
filename = flask.request.args.get('filename')
if format == "html":
return flask.render_template('api-data-preview.html', url=url, sheet=sheet, rows=rows, filename=filename, force=force)
# if there's no URL, then show an interactive form
if not url:
return flask.redirect('/api/data-preview.html', 302)
# fix up params
# if not sheet:
# sheet = -1
#
if not rows:
rows = -1
# make input
if util.skip_cache_p():
input = hxl.io.make_input(url, sheet_index=sheet)
else:
with caching.input():
input = hxl.io.make_input(url, sheet_index=sheet)
# Generate result
if format == 'json':
response = flask.Response(json_generator(), mimetype='application/json')
elif format == 'objects.json':
response = flask.Response(json_object_generator(), mimetype='application/json')
elif format == 'csv':
response = flask.Response(csv_generator(), mimetype='text/csv')
else:
raise ValueError("Unsupported &format {}".format(format))
# Add CORS header and return
response.headers['Access-Control-Allow-Origin'] = '*'
# Set the file download name if &filename is present
if filename:
response.headers['Content-Disposition'] = 'attachment; filename={}'.format(filename)
return response
# has no tests
@app.route('/api/data-preview-sheets.<format>')
# @cache.cached(key_prefix=util.make_cache_key, unless=util.skip_cache_p) # can't cache generator output
def data_preview_sheets(format="json"):
""" Return names only for the sheets in an Excel workbook.
In case of csv it returns one sheet name 'Default'
You must use data_preview to get the actual sheet contents.
"""
def json_generator():
""" Generate JSON output, row by row """
counter = 0
yield '['
for row in input:
if rows > 0 and counter >= rows:
break
if counter == 0:
line = "\n "
else:
line = ",\n "
counter += 1
line += json.dumps(row)
yield line
yield "\n]"
def csv_generator():
""" Generate CSV output, row by row """
counter = 0
for row in input:
if rows > 0 and counter >= rows:
break
counter += 1
output = io.StringIO()
csv.writer(output).writerow([row])
s = output.getvalue()
output.close()
yield s
flask.g.output_format = format # for error reporting
# params
url = flask.request.args.get('url')
if not url:
raise ValueError("&url parameter required")
rows = -1
# make input
_output = []
try:
for sheet in range(0, SHEET_MAX_NO):
if util.skip_cache_p():
input = hxl.io.make_input(url, sheet_index=sheet)
else:
with caching.input():
input = hxl.io.make_input(url, sheet_index=sheet)
if isinstance(input, hxl.io.CSVInput):
_output.append("Default")
break
else:
if input._sheet and input._sheet.name:
_output.append(input._sheet.name)
else:
_output.append(str(sheet))
except HXLIOException as ex:
logger.debug("Found the last sheet of the Excel file")
# Generate result
input = _output
if format == 'json':
response = flask.Response(json_generator(), mimetype='application/json')
elif format == 'csv':
response = flask.Response(csv_generator(), mimetype='text/csv')
else:
raise ValueError("Unsupported &format {}".format(format))
# Add CORS header and return
response.headers['Access-Control-Allow-Origin'] = '*'
return response
# has tests
@app.route('/api/pcodes/<country>-<level>.csv')
@app.route('/pcodes/<country>-<level>.csv') # legacy path
@cache.cached(timeout=604800) # 1 week cache
def pcodes_get(country, level):
""" Flask controller: look up a list of P-codes from iTOS
@param country: the ISO3 country code
@param level: the admin level (e.g. "adm2")
"""
flask.g.output_format = 'csv' # for error reporting
# Get the P-codes
with io.StringIO() as buffer:
pcodes.extract_pcodes(country, level, buffer)
response = flask.Response(buffer.getvalue(), mimetype='text/csv')
# Add a CORS header for cross-origin support
response.headers['Access-Control-Allow-Origin'] = '*'
# Render the result
return response
# has tests
@app.route('/api/hash')
@app.route('/hash') # legacy path
def make_hash():
""" Flask controller: hash a HXL dataset
GET parameters:
url - the URL of the dataset to check
headers_only - if specified, hash only the headers, not the content
"""
flask.g.output_format = 'json' # for error reporting
# Get the URL; if not supplied, open an HTML form
url = flask.request.args.get('url')
if not url:
return flask.render_template('hash.html')
# Check if we're hashing only headers
headers_only = flask.request.args.get('headers_only')
# Open the HXL dataset
source = hxl.data(url)
# Generate the report
report = {
'hash': source.columns_hash if headers_only else source.data_hash,
'url': url,
'date': datetime.datetime.utcnow().isoformat(),
'headers_only': True if headers_only else False,
'headers': source.headers,
'hashtags': source.display_tags
}
# Render the JSON response
return flask.Response(
json.dumps(report, indent=4),
mimetype="application/json"
)
########################################################################
# Controllers for removed features (display error messages)
########################################################################
# needs tests
@app.route("/")
def home():
""" Flask controller: nothing currently at root
Redirect to the /data/source page
"""
# home isn't moved permanently
return flask.redirect(flask.url_for("data_source", **flask.request.args) , 302)
# has tests
@app.route('/data/<recipe_id>/chart')
@app.route('/data/chart')
def data_chart(recipe_id=None):
""" Flask controller: discontinued charting endpoint """
return "The HXL Proxy no longer supports basic charts. Please visit <a href='https://tools.humdata.org/'>tools.humdata.org</a>", 410
# has tests
@app.route('/data/<recipe_id>/map')
@app.route('/data/map')
def data_map(recipe_id=None):
""" Flask controller: discontinued mapping endpoint """
return "The HXL Proxy no longer supports basic maps. Please visit <a href='https://tools.humdata.org/'>tools.humdata.org</a>", 410
# end
| HXLStandard/hxl-proxy | hxl_proxy/controllers.py | Python | unlicense | 54,810 | [
"VisIt"
] | e342a1dc62b94449d8e5d72fac48f78ef0e18a79b5a72342a69fe081d15c6fc1 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._hybrid_network_management_client_enums import *
class CustomProfile(msrest.serialization.Model):
"""Specifies the custom settings for the virtual machine.
:param metadata_configuration_path: Path for metadata configuration.
:type metadata_configuration_path: str
"""
_attribute_map = {
'metadata_configuration_path': {'key': 'metadataConfigurationPath', 'type': 'str'},
}
def __init__(
self,
*,
metadata_configuration_path: Optional[str] = None,
**kwargs
):
super(CustomProfile, self).__init__(**kwargs)
self.metadata_configuration_path = metadata_configuration_path
class DataDisk(msrest.serialization.Model):
"""Specifies information about the operating system disk used by the virtual machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs for Azure virtual machines <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_.
:param create_option: Specifies how the virtual machine should be created. Possible values
include: "Unknown", "Empty".
:type create_option: str or ~hybrid_network_management_client.models.DiskCreateOptionTypes
:param name: The name of data disk.
:type name: str
:param disk_size_gb: Specifies the size of an empty disk in gigabytes. This element can be used
to overwrite the size of the disk in a virtual machine image.
:type disk_size_gb: int
"""
_attribute_map = {
'create_option': {'key': 'createOption', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
}
def __init__(
self,
*,
create_option: Optional[Union[str, "DiskCreateOptionTypes"]] = None,
name: Optional[str] = None,
disk_size_gb: Optional[int] = None,
**kwargs
):
super(DataDisk, self).__init__(**kwargs)
self.create_option = create_option
self.name = name
self.disk_size_gb = disk_size_gb
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = tags
self.location = location
class Device(TrackedResource):
"""Device resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:ivar system_data: The system meta data relating to this resource.
:vartype system_data: ~hybrid_network_management_client.models.SystemData
:ivar status: The current device status. Possible values include: "Unknown", "NotRegistered",
"Registered", "Deleted".
:vartype status: str or ~hybrid_network_management_client.models.Status
:ivar provisioning_state: The provisioning state of the device resource. Possible values
include: "Unknown", "Succeeded", "Accepted", "Deleting", "Failed", "Canceled", "Deleted".
:vartype provisioning_state: str or ~hybrid_network_management_client.models.ProvisioningState
:param device_type: The type of the device.Constant filled by server. Possible values include:
"Unknown", "AzureStackEdge".
:type device_type: str or ~hybrid_network_management_client.models.DeviceType
:param azure_stack_edge: The reference to the Azure stack edge device. Once set, it cannot be
updated.
:type azure_stack_edge: ~hybrid_network_management_client.models.SubResource
:ivar network_functions: The list of network functions deployed on the device.
:vartype network_functions: list[~hybrid_network_management_client.models.SubResource]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'system_data': {'readonly': True},
'status': {'readonly': True},
'provisioning_state': {'readonly': True},
'network_functions': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'status': {'key': 'properties.status', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'device_type': {'key': 'properties.deviceType', 'type': 'str'},
'azure_stack_edge': {'key': 'properties.azureStackEdge', 'type': 'SubResource'},
'network_functions': {'key': 'properties.networkFunctions', 'type': '[SubResource]'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
azure_stack_edge: Optional["SubResource"] = None,
**kwargs
):
super(Device, self).__init__(tags=tags, location=location, **kwargs)
self.system_data = None
self.status = None
self.provisioning_state = None
self.device_type = None # type: Optional[str]
self.azure_stack_edge = azure_stack_edge
self.network_functions = None
class DeviceListResult(msrest.serialization.Model):
"""Response for devices API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of devices.
:type value: list[~hybrid_network_management_client.models.Device]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Device]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Device"]] = None,
**kwargs
):
super(DeviceListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class DevicePropertiesFormat(msrest.serialization.Model):
"""Device properties.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: .
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar status: The current device status. Possible values include: "Unknown", "NotRegistered",
"Registered", "Deleted".
:vartype status: str or ~hybrid_network_management_client.models.Status
:ivar provisioning_state: The provisioning state of the device resource. Possible values
include: "Unknown", "Succeeded", "Accepted", "Deleting", "Failed", "Canceled", "Deleted".
:vartype provisioning_state: str or ~hybrid_network_management_client.models.ProvisioningState
:param device_type: Required. The type of the device.Constant filled by server. Possible
values include: "Unknown", "AzureStackEdge".
:type device_type: str or ~hybrid_network_management_client.models.DeviceType
:param azure_stack_edge: The reference to the Azure stack edge device. Once set, it cannot be
updated.
:type azure_stack_edge: ~hybrid_network_management_client.models.SubResource
:ivar network_functions: The list of network functions deployed on the device.
:vartype network_functions: list[~hybrid_network_management_client.models.SubResource]
"""
_validation = {
'status': {'readonly': True},
'provisioning_state': {'readonly': True},
'device_type': {'required': True},
'network_functions': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'device_type': {'key': 'deviceType', 'type': 'str'},
'azure_stack_edge': {'key': 'azureStackEdge', 'type': 'SubResource'},
'network_functions': {'key': 'networkFunctions', 'type': '[SubResource]'},
}
_subtype_map = {
'device_type': {}
}
def __init__(
self,
*,
azure_stack_edge: Optional["SubResource"] = None,
**kwargs
):
super(DevicePropertiesFormat, self).__init__(**kwargs)
self.status = None
self.provisioning_state = None
self.device_type = None # type: Optional[str]
self.azure_stack_edge = azure_stack_edge
self.network_functions = None
class DeviceRegistrationKey(msrest.serialization.Model):
"""The device registration key.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar registration_key: The registration key for the device.
:vartype registration_key: str
"""
_validation = {
'registration_key': {'readonly': True},
}
_attribute_map = {
'registration_key': {'key': 'registrationKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DeviceRegistrationKey, self).__init__(**kwargs)
self.registration_key = None
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~hybrid_network_management_client.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~hybrid_network_management_client.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~hybrid_network_management_client.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
*,
error: Optional["ErrorDetail"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ImageReference(msrest.serialization.Model):
"""The image reference properties.
:param publisher: The image publisher.
:type publisher: str
:param offer: Specifies the offer of the image used to create the virtual machine.
:type offer: str
:param sku: The image SKU.
:type sku: str
:param version: Specifies the version of the image used to create the virtual machine. The
allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers.
Specify 'latest' to use the latest version of an image available at deploy time. Even if you
use 'latest', the VM image will not automatically update after deploy time even if a new
version becomes available.
:type version: str
:param exact_version: Specifies in decimal numbers, the exact version of image used to create
the virtual machine.
:type exact_version: str
"""
_attribute_map = {
'publisher': {'key': 'publisher', 'type': 'str'},
'offer': {'key': 'offer', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'exact_version': {'key': 'exactVersion', 'type': 'str'},
}
def __init__(
self,
*,
publisher: Optional[str] = None,
offer: Optional[str] = None,
sku: Optional[str] = None,
version: Optional[str] = None,
exact_version: Optional[str] = None,
**kwargs
):
super(ImageReference, self).__init__(**kwargs)
self.publisher = publisher
self.offer = offer
self.sku = sku
self.version = version
self.exact_version = exact_version
class LinuxConfiguration(msrest.serialization.Model):
"""Specifies the Linux operating system settings on the virtual machine.
:param ssh: Specifies the ssh key configuration for a Linux OS.
:type ssh: ~hybrid_network_management_client.models.SshConfiguration
"""
_attribute_map = {
'ssh': {'key': 'ssh', 'type': 'SshConfiguration'},
}
def __init__(
self,
*,
ssh: Optional["SshConfiguration"] = None,
**kwargs
):
super(LinuxConfiguration, self).__init__(**kwargs)
self.ssh = ssh
class NetworkFunction(TrackedResource):
"""Network function resource response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param etag: A unique read-only string that changes whenever the resource is updated.
:type etag: str
:ivar system_data: The system meta data relating to this resource.
:vartype system_data: ~hybrid_network_management_client.models.SystemData
:ivar provisioning_state: The provisioning state of the network function resource. Possible
values include: "Unknown", "Succeeded", "Accepted", "Deleting", "Failed", "Canceled",
"Deleted".
:vartype provisioning_state: str or ~hybrid_network_management_client.models.ProvisioningState
:param device: The reference to the device resource. Once set, it cannot be updated.
:type device: ~hybrid_network_management_client.models.SubResource
:param sku_name: The sku name for the network function. Once set, it cannot be updated.
:type sku_name: str
:ivar sku_type: The sku type for the network function. Possible values include: "Unknown",
"EvolvedPacketCore", "SDWAN", "Firewall".
:vartype sku_type: str or ~hybrid_network_management_client.models.SkuType
:param vendor_name: The vendor name for the network function. Once set, it cannot be updated.
:type vendor_name: str
:ivar service_key: The service key for the network function resource.
:vartype service_key: str
:ivar vendor_provisioning_state: The vendor provisioning state for the network function
resource. Possible values include: "Unknown", "NotProvisioned", "Provisioning", "Provisioned",
"Deprovisioned", "UserDataValidationFailed".
:vartype vendor_provisioning_state: str or
~hybrid_network_management_client.models.VendorProvisioningState
:ivar managed_application: The resource URI of the managed application.
:vartype managed_application: ~hybrid_network_management_client.models.SubResource
:param managed_application_parameters: The parameters for the managed application.
:type managed_application_parameters: any
:param network_function_container_configurations: The network function container configurations
from the user.
:type network_function_container_configurations: any
:param network_function_user_configurations: The network function configurations from the user.
:type network_function_user_configurations:
list[~hybrid_network_management_client.models.NetworkFunctionUserConfiguration]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'sku_type': {'readonly': True},
'service_key': {'readonly': True},
'vendor_provisioning_state': {'readonly': True},
'managed_application': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'device': {'key': 'properties.device', 'type': 'SubResource'},
'sku_name': {'key': 'properties.skuName', 'type': 'str'},
'sku_type': {'key': 'properties.skuType', 'type': 'str'},
'vendor_name': {'key': 'properties.vendorName', 'type': 'str'},
'service_key': {'key': 'properties.serviceKey', 'type': 'str'},
'vendor_provisioning_state': {'key': 'properties.vendorProvisioningState', 'type': 'str'},
'managed_application': {'key': 'properties.managedApplication', 'type': 'SubResource'},
'managed_application_parameters': {'key': 'properties.managedApplicationParameters', 'type': 'object'},
'network_function_container_configurations': {'key': 'properties.networkFunctionContainerConfigurations', 'type': 'object'},
'network_function_user_configurations': {'key': 'properties.networkFunctionUserConfigurations', 'type': '[NetworkFunctionUserConfiguration]'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
etag: Optional[str] = None,
device: Optional["SubResource"] = None,
sku_name: Optional[str] = None,
vendor_name: Optional[str] = None,
managed_application_parameters: Optional[Any] = None,
network_function_container_configurations: Optional[Any] = None,
network_function_user_configurations: Optional[List["NetworkFunctionUserConfiguration"]] = None,
**kwargs
):
super(NetworkFunction, self).__init__(tags=tags, location=location, **kwargs)
self.etag = etag
self.system_data = None
self.provisioning_state = None
self.device = device
self.sku_name = sku_name
self.sku_type = None
self.vendor_name = vendor_name
self.service_key = None
self.vendor_provisioning_state = None
self.managed_application = None
self.managed_application_parameters = managed_application_parameters
self.network_function_container_configurations = network_function_container_configurations
self.network_function_user_configurations = network_function_user_configurations
class NetworkFunctionListResult(msrest.serialization.Model):
"""Response for network function API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of network function resources in a subscription or resource group.
:type value: list[~hybrid_network_management_client.models.NetworkFunction]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[NetworkFunction]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["NetworkFunction"]] = None,
**kwargs
):
super(NetworkFunctionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class NetworkFunctionRoleConfiguration(msrest.serialization.Model):
"""Network function role configuration.
:param role_name: The name of the network function role.
:type role_name: str
:param role_type: Role type. Possible values include: "Unknown", "VirtualMachine".
:type role_type: str or
~hybrid_network_management_client.models.NetworkFunctionRoleConfigurationType
:param virtual_machine_size: The size of the virtual machine. Possible values include:
"Unknown", "Standard_D1_v2", "Standard_D2_v2", "Standard_D3_v2", "Standard_D4_v2",
"Standard_D5_v2", "Standard_D11_v2", "Standard_D12_v2", "Standard_D13_v2", "Standard_DS1_v2",
"Standard_DS2_v2", "Standard_DS3_v2", "Standard_DS4_v2", "Standard_DS5_v2", "Standard_DS11_v2",
"Standard_DS12_v2", "Standard_DS13_v2", "Standard_F1", "Standard_F2", "Standard_F4",
"Standard_F8", "Standard_F16", "Standard_F1s", "Standard_F2s", "Standard_F4s", "Standard_F8s",
"Standard_F16s".
:type virtual_machine_size: str or
~hybrid_network_management_client.models.VirtualMachineSizeTypes
:param os_profile: Specifies the operating system settings for the role instance. This value
can be updated during the deployment of network function.
:type os_profile: ~hybrid_network_management_client.models.OsProfile
:param user_data_template: The user data template for customers. This is a json schema template
describing the format and data type of user data parameters.
:type user_data_template: any
:param user_data_parameters: The user parameters for customers. The format of user data
parameters has to be matched with the provided user data template.
:type user_data_parameters: any
:param network_interfaces: The network interface configurations.
:type network_interfaces: list[~hybrid_network_management_client.models.NetworkInterface]
:param storage_profile: Specifies the storage settings for the virtual machine disks.
:type storage_profile: ~hybrid_network_management_client.models.StorageProfile
:param custom_profile: Specifies the custom settings for the virtual machine.
:type custom_profile: ~hybrid_network_management_client.models.CustomProfile
"""
_attribute_map = {
'role_name': {'key': 'roleName', 'type': 'str'},
'role_type': {'key': 'roleType', 'type': 'str'},
'virtual_machine_size': {'key': 'virtualMachineSize', 'type': 'str'},
'os_profile': {'key': 'osProfile', 'type': 'OsProfile'},
'user_data_template': {'key': 'userDataTemplate', 'type': 'object'},
'user_data_parameters': {'key': 'userDataParameters', 'type': 'object'},
'network_interfaces': {'key': 'networkInterfaces', 'type': '[NetworkInterface]'},
'storage_profile': {'key': 'storageProfile', 'type': 'StorageProfile'},
'custom_profile': {'key': 'customProfile', 'type': 'CustomProfile'},
}
def __init__(
self,
*,
role_name: Optional[str] = None,
role_type: Optional[Union[str, "NetworkFunctionRoleConfigurationType"]] = None,
virtual_machine_size: Optional[Union[str, "VirtualMachineSizeTypes"]] = None,
os_profile: Optional["OsProfile"] = None,
user_data_template: Optional[Any] = None,
user_data_parameters: Optional[Any] = None,
network_interfaces: Optional[List["NetworkInterface"]] = None,
storage_profile: Optional["StorageProfile"] = None,
custom_profile: Optional["CustomProfile"] = None,
**kwargs
):
super(NetworkFunctionRoleConfiguration, self).__init__(**kwargs)
self.role_name = role_name
self.role_type = role_type
self.virtual_machine_size = virtual_machine_size
self.os_profile = os_profile
self.user_data_template = user_data_template
self.user_data_parameters = user_data_parameters
self.network_interfaces = network_interfaces
self.storage_profile = storage_profile
self.custom_profile = custom_profile
class NetworkFunctionRoleInstanceListResult(msrest.serialization.Model):
"""List of role instances of vendor network function.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A list of role instances.
:vartype value: list[~hybrid_network_management_client.models.RoleInstance]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RoleInstance]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkFunctionRoleInstanceListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class NetworkFunctionSkuDetails(msrest.serialization.Model):
"""The network function sku details.
Variables are only populated by the server, and will be ignored when sending a request.
:param sku_type: The network function sku type. Possible values include: "Unknown",
"EvolvedPacketCore", "SDWAN", "Firewall".
:type sku_type: str or ~hybrid_network_management_client.models.SkuType
:param value: The network function sku role details.
:type value: list[~hybrid_network_management_client.models.NetworkFunctionSkuRoleDetails]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'sku_type': {'key': 'skuType', 'type': 'str'},
'value': {'key': 'value', 'type': '[NetworkFunctionSkuRoleDetails]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
sku_type: Optional[Union[str, "SkuType"]] = None,
value: Optional[List["NetworkFunctionSkuRoleDetails"]] = None,
**kwargs
):
super(NetworkFunctionSkuDetails, self).__init__(**kwargs)
self.sku_type = sku_type
self.value = value
self.next_link = None
class NetworkFunctionSkuListResult(msrest.serialization.Model):
"""A list of available network function skus.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The network function vendor sku overview properties.
:type value: list[~hybrid_network_management_client.models.SkuOverview]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SkuOverview]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["SkuOverview"]] = None,
**kwargs
):
super(NetworkFunctionSkuListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class NetworkFunctionSkuRoleDetails(msrest.serialization.Model):
"""The network function user configuration.
:param role_name: The name of the network function role.
:type role_name: str
:param user_data_template: The user data template for customers.
:type user_data_template: any
:param user_data_parameters: The user parameters for customers.
:type user_data_parameters: any
:param network_interfaces: The network interface configuration.
:type network_interfaces: list[~hybrid_network_management_client.models.NetworkInterface]
"""
_attribute_map = {
'role_name': {'key': 'roleName', 'type': 'str'},
'user_data_template': {'key': 'userDataTemplate', 'type': 'object'},
'user_data_parameters': {'key': 'userDataParameters', 'type': 'object'},
'network_interfaces': {'key': 'networkInterfaces', 'type': '[NetworkInterface]'},
}
def __init__(
self,
*,
role_name: Optional[str] = None,
user_data_template: Optional[Any] = None,
user_data_parameters: Optional[Any] = None,
network_interfaces: Optional[List["NetworkInterface"]] = None,
**kwargs
):
super(NetworkFunctionSkuRoleDetails, self).__init__(**kwargs)
self.role_name = role_name
self.user_data_template = user_data_template
self.user_data_parameters = user_data_parameters
self.network_interfaces = network_interfaces
class NetworkFunctionTemplate(msrest.serialization.Model):
"""The network function template.
:param network_function_role_configurations: An array of network function role definitions.
:type network_function_role_configurations:
list[~hybrid_network_management_client.models.NetworkFunctionRoleConfiguration]
"""
_attribute_map = {
'network_function_role_configurations': {'key': 'networkFunctionRoleConfigurations', 'type': '[NetworkFunctionRoleConfiguration]'},
}
def __init__(
self,
*,
network_function_role_configurations: Optional[List["NetworkFunctionRoleConfiguration"]] = None,
**kwargs
):
super(NetworkFunctionTemplate, self).__init__(**kwargs)
self.network_function_role_configurations = network_function_role_configurations
class NetworkFunctionUserConfiguration(msrest.serialization.Model):
"""The network function user configuration.
:param role_name: The name of the network function role.
:type role_name: str
:param user_data_parameters: The user data parameters from the customer.
:type user_data_parameters: any
:param network_interfaces: The network interface configuration.
:type network_interfaces: list[~hybrid_network_management_client.models.NetworkInterface]
:param os_profile: Specifies the operating system settings for the role instance.
:type os_profile:
~hybrid_network_management_client.models.NetworkFunctionUserConfigurationOsProfile
"""
_attribute_map = {
'role_name': {'key': 'roleName', 'type': 'str'},
'user_data_parameters': {'key': 'userDataParameters', 'type': 'object'},
'network_interfaces': {'key': 'networkInterfaces', 'type': '[NetworkInterface]'},
'os_profile': {'key': 'osProfile', 'type': 'NetworkFunctionUserConfigurationOsProfile'},
}
def __init__(
self,
*,
role_name: Optional[str] = None,
user_data_parameters: Optional[Any] = None,
network_interfaces: Optional[List["NetworkInterface"]] = None,
os_profile: Optional["NetworkFunctionUserConfigurationOsProfile"] = None,
**kwargs
):
super(NetworkFunctionUserConfiguration, self).__init__(**kwargs)
self.role_name = role_name
self.user_data_parameters = user_data_parameters
self.network_interfaces = network_interfaces
self.os_profile = os_profile
class NetworkFunctionUserConfigurationOsProfile(msrest.serialization.Model):
"""Specifies the operating system settings for the role instance.
:param custom_data: Specifies a base-64 encoded string of custom data. The base-64 encoded
string is decoded to a binary array that is saved as a file on the virtual machine. The maximum
length of the binary array is 65535 bytes. :code:`<br>`:code:`<br>` **Note: Do not pass any
secrets or passwords in customData property** :code:`<br>`:code:`<br>` This property cannot be
updated after the VM is created. :code:`<br>`:code:`<br>` customData is passed to the VM to be
saved as a file. For more information see `Custom Data on Azure VMs
<https://azure.microsoft.com/en-us/blog/custom-data-and-cloud-init-on-windows-azure/>`_
:code:`<br>`:code:`<br>` For using cloud-init for your Linux VM, see `Using cloud-init to
customize a Linux VM during creation
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_.
:type custom_data: str
"""
_attribute_map = {
'custom_data': {'key': 'customData', 'type': 'str'},
}
def __init__(
self,
*,
custom_data: Optional[str] = None,
**kwargs
):
super(NetworkFunctionUserConfigurationOsProfile, self).__init__(**kwargs)
self.custom_data = custom_data
class NetworkFunctionVendor(msrest.serialization.Model):
"""The network function vendor.
:param vendor_name: The network function vendor name.
:type vendor_name: str
:param sku_list: The network function sku list.
:type sku_list: list[~hybrid_network_management_client.models.SkuOverview]
"""
_attribute_map = {
'vendor_name': {'key': 'properties.vendorName', 'type': 'str'},
'sku_list': {'key': 'properties.skuList', 'type': '[SkuOverview]'},
}
def __init__(
self,
*,
vendor_name: Optional[str] = None,
sku_list: Optional[List["SkuOverview"]] = None,
**kwargs
):
super(NetworkFunctionVendor, self).__init__(**kwargs)
self.vendor_name = vendor_name
self.sku_list = sku_list
class NetworkFunctionVendorConfiguration(msrest.serialization.Model):
"""Network function vendor configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:param role_name: The name of the vendor network function role.
:type role_name: str
:param os_profile: Specifies the operating system settings for the role instance.
:type os_profile: ~hybrid_network_management_client.models.OsProfile
:ivar user_data_parameters: The user parameters from the customer.
:vartype user_data_parameters: any
:param network_interfaces: The network interface configurations.
:type network_interfaces: list[~hybrid_network_management_client.models.NetworkInterface]
"""
_validation = {
'user_data_parameters': {'readonly': True},
}
_attribute_map = {
'role_name': {'key': 'roleName', 'type': 'str'},
'os_profile': {'key': 'osProfile', 'type': 'OsProfile'},
'user_data_parameters': {'key': 'userDataParameters', 'type': 'object'},
'network_interfaces': {'key': 'networkInterfaces', 'type': '[NetworkInterface]'},
}
def __init__(
self,
*,
role_name: Optional[str] = None,
os_profile: Optional["OsProfile"] = None,
network_interfaces: Optional[List["NetworkInterface"]] = None,
**kwargs
):
super(NetworkFunctionVendorConfiguration, self).__init__(**kwargs)
self.role_name = role_name
self.os_profile = os_profile
self.user_data_parameters = None
self.network_interfaces = network_interfaces
class NetworkFunctionVendorListResult(msrest.serialization.Model):
"""The network function vendor list result.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of available network function vendors and skus.
:type value: list[~hybrid_network_management_client.models.NetworkFunctionVendor]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[NetworkFunctionVendor]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["NetworkFunctionVendor"]] = None,
**kwargs
):
super(NetworkFunctionVendorListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class NetworkInterface(msrest.serialization.Model):
"""Network interface properties.
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param mac_address: The MAC address of the network interface.
:type mac_address: str
:param ip_configurations: A list of IP configurations of the network interface.
:type ip_configurations:
list[~hybrid_network_management_client.models.NetworkInterfaceIPConfiguration]
:param vm_switch_type: The type of the VM switch. Possible values include: "Unknown",
"Management", "Wan", "Lan".
:type vm_switch_type: str or ~hybrid_network_management_client.models.VMSwitchType
"""
_attribute_map = {
'network_interface_name': {'key': 'networkInterfaceName', 'type': 'str'},
'mac_address': {'key': 'macAddress', 'type': 'str'},
'ip_configurations': {'key': 'ipConfigurations', 'type': '[NetworkInterfaceIPConfiguration]'},
'vm_switch_type': {'key': 'vmSwitchType', 'type': 'str'},
}
def __init__(
self,
*,
network_interface_name: Optional[str] = None,
mac_address: Optional[str] = None,
ip_configurations: Optional[List["NetworkInterfaceIPConfiguration"]] = None,
vm_switch_type: Optional[Union[str, "VMSwitchType"]] = None,
**kwargs
):
super(NetworkInterface, self).__init__(**kwargs)
self.network_interface_name = network_interface_name
self.mac_address = mac_address
self.ip_configurations = ip_configurations
self.vm_switch_type = vm_switch_type
class NetworkInterfaceIPConfiguration(msrest.serialization.Model):
"""Network interface IP configuration properties.
:param ip_allocation_method: IP address allocation method. Possible values include: "Unknown",
"Static", "Dynamic".
:type ip_allocation_method: str or ~hybrid_network_management_client.models.IPAllocationMethod
:param ip_address: The value of the IP address.
:type ip_address: str
:param subnet: The value of the subnet.
:type subnet: str
:param gateway: The value of the gateway.
:type gateway: str
:param ip_version: IP address version. Possible values include: "Unknown", "IPv4".
:type ip_version: str or ~hybrid_network_management_client.models.IPVersion
:param dns_servers: The list of DNS servers IP addresses.
:type dns_servers: list[str]
"""
_attribute_map = {
'ip_allocation_method': {'key': 'ipAllocationMethod', 'type': 'str'},
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'str'},
'gateway': {'key': 'gateway', 'type': 'str'},
'ip_version': {'key': 'ipVersion', 'type': 'str'},
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
}
def __init__(
self,
*,
ip_allocation_method: Optional[Union[str, "IPAllocationMethod"]] = None,
ip_address: Optional[str] = None,
subnet: Optional[str] = None,
gateway: Optional[str] = None,
ip_version: Optional[Union[str, "IPVersion"]] = None,
dns_servers: Optional[List[str]] = None,
**kwargs
):
super(NetworkInterfaceIPConfiguration, self).__init__(**kwargs)
self.ip_allocation_method = ip_allocation_method
self.ip_address = ip_address
self.subnet = subnet
self.gateway = gateway
self.ip_version = ip_version
self.dns_servers = dns_servers
class Operation(msrest.serialization.Model):
"""Object that describes a single Microsoft.HybridNetwork operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Operation name: {provider}/{resource}/{operation}.
:vartype name: str
:ivar display: The object that represents the operation.
:vartype display: ~hybrid_network_management_client.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
'display': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = None
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
:param provider: Service provider: Microsoft.HybridNetwork.
:type provider: str
:param resource: Resource on which the operation is performed: Registration definition,
registration assignment, etc.
:type resource: str
:param operation: Operation type: Read, write, delete, etc.
:type operation: str
:param description: Description of the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationList(msrest.serialization.Model):
"""A list of the operations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A list of Microsoft.HybridNetwork operations.
:vartype value: list[~hybrid_network_management_client.models.Operation]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class OsDisk(msrest.serialization.Model):
"""Specifies information about the operating system disk used by the virtual machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs for Azure virtual machines <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_.
:param os_type: The OS type. Possible values include: "Unknown", "Windows", "Linux".
:type os_type: str or ~hybrid_network_management_client.models.OperatingSystemTypes
:param name: The VHD name.
:type name: str
:param vhd: The virtual hard disk.
:type vhd: ~hybrid_network_management_client.models.VirtualHardDisk
:param disk_size_gb: Specifies the size of os disk in gigabytes. This is the fully expanded
disk size needed of the VHD image on the ASE. This disk size should be greater than the size of
the VHD provided in vhdUri.
:type disk_size_gb: int
"""
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'vhd': {'key': 'vhd', 'type': 'VirtualHardDisk'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
}
def __init__(
self,
*,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
name: Optional[str] = None,
vhd: Optional["VirtualHardDisk"] = None,
disk_size_gb: Optional[int] = None,
**kwargs
):
super(OsDisk, self).__init__(**kwargs)
self.os_type = os_type
self.name = name
self.vhd = vhd
self.disk_size_gb = disk_size_gb
class OsProfile(msrest.serialization.Model):
"""Specifies the operating system settings for the role instance.
:param admin_username: Specifies the name of the administrator account.
:code:`<br>`:code:`<br>` **Windows-only restriction:** Cannot end in "."
:code:`<br>`:code:`<br>` **Disallowed values:** "administrator", "admin", "user", "user1",
"test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2",
"aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql",
"support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".
:code:`<br>`:code:`<br>` **Minimum-length (Linux):** 1 character :code:`<br>`:code:`<br>`
**Max-length (Linux):** 64 characters :code:`<br>`:code:`<br>` **Max-length (Windows):** 20
characters :code:`<br>`:code:`<br>`:code:`<li>` For root access to the Linux VM, see `Using
root privileges on Linux virtual machines in Azure
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_\
:code:`<br>`:code:`<li>` For a list of built-in system users on Linux that should not be used
in this field, see `Selecting User Names for Linux on Azure
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_.
:type admin_username: str
:param linux_configuration: Specifies the Linux operating system settings on the virtual
machine. :code:`<br>`:code:`<br>`For a list of supported Linux distributions, see `Linux on
Azure-Endorsed Distributions
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_
:code:`<br>`:code:`<br>` For running non-endorsed distributions, see `Information for
Non-Endorsed Distributions
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_.
:type linux_configuration: ~hybrid_network_management_client.models.LinuxConfiguration
:param custom_data: Specifies a base-64 encoded string of custom data. The base-64 encoded
string is decoded to a binary array that is saved as a file on the virtual machine. The maximum
length of the binary array is 65535 bytes. :code:`<br>`:code:`<br>` **Note: Do not pass any
secrets or passwords in customData property** :code:`<br>`:code:`<br>` This property cannot be
updated after the VM is created. :code:`<br>`:code:`<br>` customData is passed to the VM to be
saved as a file. For more information see `Custom Data on Azure VMs
<https://azure.microsoft.com/en-us/blog/custom-data-and-cloud-init-on-windows-azure/>`_
:code:`<br>`:code:`<br>` For using cloud-init for your Linux VM, see `Using cloud-init to
customize a Linux VM during creation
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_.
:type custom_data: str
:param custom_data_required: Indicates if custom data is required to deploy this role.
:type custom_data_required: bool
"""
_attribute_map = {
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'custom_data': {'key': 'customData', 'type': 'str'},
'custom_data_required': {'key': 'customDataRequired', 'type': 'bool'},
}
def __init__(
self,
*,
admin_username: Optional[str] = None,
linux_configuration: Optional["LinuxConfiguration"] = None,
custom_data: Optional[str] = None,
custom_data_required: Optional[bool] = True,
**kwargs
):
super(OsProfile, self).__init__(**kwargs)
self.admin_username = admin_username
self.linux_configuration = linux_configuration
self.custom_data = custom_data
self.custom_data_required = custom_data_required
class PreviewSubscription(msrest.serialization.Model):
"""Customer subscription which can use a sku.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The preview subscription ID.
:vartype name: str
:ivar id: The ARM ID of the resource.
:vartype id: str
:ivar type: The type of the resource.
:vartype type: str
:ivar system_data: The system meta data relating to this resource.
:vartype system_data: ~hybrid_network_management_client.models.SystemData
:ivar provisioning_state: The provisioning state of the PreviewSubscription resource. Possible
values include: "Unknown", "Succeeded", "Accepted", "Deleting", "Failed", "Canceled",
"Deleted".
:vartype provisioning_state: str or ~hybrid_network_management_client.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'id': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PreviewSubscription, self).__init__(**kwargs)
self.name = None
self.id = None
self.type = None
self.system_data = None
self.provisioning_state = None
class PreviewSubscriptionsList(msrest.serialization.Model):
"""A list of customer subscriptions which can use a sku.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of preview subscriptions.
:type value: list[~hybrid_network_management_client.models.PreviewSubscription]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PreviewSubscription]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["PreviewSubscription"]] = None,
**kwargs
):
super(PreviewSubscriptionsList, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class RoleInstance(msrest.serialization.Model):
"""The role instance sub resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: The role instance name.
:type name: str
:param id: The ARM ID of the resource.
:type id: str
:param type: The type of the resource.
:type type: str
:ivar system_data: The system meta data relating to this resource.
:vartype system_data: ~hybrid_network_management_client.models.SystemData
:ivar provisioning_state: The provisioning state of the RoleInstance resource. Possible values
include: "Unknown", "Succeeded", "Accepted", "Deleting", "Failed", "Canceled", "Deleted".
:vartype provisioning_state: str or ~hybrid_network_management_client.models.ProvisioningState
:param operational_state: The operational state of the role instance. Possible values include:
"Unknown", "Stopped", "Running", "Stopping", "Starting".
:type operational_state: str or ~hybrid_network_management_client.models.OperationalState
"""
_validation = {
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'operational_state': {'key': 'properties.operationalState', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
id: Optional[str] = None,
type: Optional[str] = None,
operational_state: Optional[Union[str, "OperationalState"]] = None,
**kwargs
):
super(RoleInstance, self).__init__(**kwargs)
self.name = name
self.id = id
self.type = type
self.system_data = None
self.provisioning_state = None
self.operational_state = operational_state
class SkuOverview(msrest.serialization.Model):
"""The network function sku overview.
:param sku_name: The vendor sku name.
:type sku_name: str
:param sku_type: The vendor sku type. Possible values include: "Unknown", "EvolvedPacketCore",
"SDWAN", "Firewall".
:type sku_type: str or ~hybrid_network_management_client.models.SkuType
"""
_attribute_map = {
'sku_name': {'key': 'skuName', 'type': 'str'},
'sku_type': {'key': 'skuType', 'type': 'str'},
}
def __init__(
self,
*,
sku_name: Optional[str] = None,
sku_type: Optional[Union[str, "SkuType"]] = None,
**kwargs
):
super(SkuOverview, self).__init__(**kwargs)
self.sku_name = sku_name
self.sku_type = sku_type
class SshConfiguration(msrest.serialization.Model):
"""SSH configuration for Linux based VMs running on Azure.
:param public_keys: The list of SSH public keys used to authenticate with linux based VMs.
:type public_keys: list[~hybrid_network_management_client.models.SshPublicKey]
"""
_attribute_map = {
'public_keys': {'key': 'publicKeys', 'type': '[SshPublicKey]'},
}
def __init__(
self,
*,
public_keys: Optional[List["SshPublicKey"]] = None,
**kwargs
):
super(SshConfiguration, self).__init__(**kwargs)
self.public_keys = public_keys
class SshPublicKey(msrest.serialization.Model):
"""Contains information about SSH certificate public key and the path on the Linux VM where the public key is placed.
:param path: Specifies the full path on the created VM where ssh public key is stored. If the
file already exists, the specified key is appended to the file. Example:
/home/user/.ssh/authorized_keys.
:type path: str
:param key_data: SSH public key certificate used to authenticate with the VM through ssh. The
key needs to be at least 2048-bit and in ssh-rsa format. :code:`<br>`:code:`<br>` For creating
ssh keys, see `Create SSH keys on Linux and Mac for Linux VMs in Azure
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-mac-create-ssh-keys?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_.
:type key_data: str
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'key_data': {'key': 'keyData', 'type': 'str'},
}
def __init__(
self,
*,
path: Optional[str] = None,
key_data: Optional[str] = None,
**kwargs
):
super(SshPublicKey, self).__init__(**kwargs)
self.path = path
self.key_data = key_data
class StorageProfile(msrest.serialization.Model):
"""Specifies the storage settings for the virtual machine disks.
:param image_reference: The image reference properties.
:type image_reference: ~hybrid_network_management_client.models.ImageReference
:param os_disk: Specifies information about the operating system disk used by the virtual
machine.
:type os_disk: ~hybrid_network_management_client.models.OsDisk
:param data_disks: Specifies the parameters that are used to add a data disk to a virtual
machine.
:type data_disks: list[~hybrid_network_management_client.models.DataDisk]
"""
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'OsDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'},
}
def __init__(
self,
*,
image_reference: Optional["ImageReference"] = None,
os_disk: Optional["OsDisk"] = None,
data_disks: Optional[List["DataDisk"]] = None,
**kwargs
):
super(StorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
class SubResource(msrest.serialization.Model):
"""Reference to another sub resource.
:param id: Resource ID.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(SubResource, self).__init__(**kwargs)
self.id = id
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~hybrid_network_management_client.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~hybrid_network_management_client.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class TagsObject(msrest.serialization.Model):
"""Tags object for patch operations.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(TagsObject, self).__init__(**kwargs)
self.tags = tags
class Vendor(ProxyResource):
"""Vendor resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system meta data relating to this resource.
:vartype system_data: ~hybrid_network_management_client.models.SystemData
:ivar provisioning_state: The provisioning state of the vendor resource. Possible values
include: "Unknown", "Succeeded", "Accepted", "Deleting", "Failed", "Canceled", "Deleted".
:vartype provisioning_state: str or ~hybrid_network_management_client.models.ProvisioningState
:ivar skus: A list of IDs of the vendor skus offered by the vendor.
:vartype skus: list[~hybrid_network_management_client.models.SubResource]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'skus': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'skus': {'key': 'properties.skus', 'type': '[SubResource]'},
}
def __init__(
self,
**kwargs
):
super(Vendor, self).__init__(**kwargs)
self.system_data = None
self.provisioning_state = None
self.skus = None
class VendorListResult(msrest.serialization.Model):
"""Response for vendors API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of vendors.
:type value: list[~hybrid_network_management_client.models.Vendor]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Vendor]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Vendor"]] = None,
**kwargs
):
super(VendorListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class VendorNetworkFunction(ProxyResource):
"""Vendor network function sub resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system meta data relating to this resource.
:vartype system_data: ~hybrid_network_management_client.models.SystemData
:ivar provisioning_state: The provisioning state of the vendor network function sub resource.
Possible values include: "Unknown", "Succeeded", "Accepted", "Deleting", "Failed", "Canceled",
"Deleted".
:vartype provisioning_state: str or ~hybrid_network_management_client.models.ProvisioningState
:param vendor_provisioning_state: The vendor controlled provisioning state of the vendor
network function. Possible values include: "Unknown", "NotProvisioned", "Provisioning",
"Provisioned", "Deprovisioned", "UserDataValidationFailed".
:type vendor_provisioning_state: str or
~hybrid_network_management_client.models.VendorProvisioningState
:ivar sku_name: The name of the sku. Once set, it cannot be updated.
:vartype sku_name: str
:ivar sku_type: The sku type. Possible values include: "Unknown", "EvolvedPacketCore", "SDWAN",
"Firewall".
:vartype sku_type: str or ~hybrid_network_management_client.models.SkuType
:param network_function_vendor_configurations: An array of network function vendor
configurations.
:type network_function_vendor_configurations:
list[~hybrid_network_management_client.models.NetworkFunctionVendorConfiguration]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'sku_name': {'readonly': True},
'sku_type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'vendor_provisioning_state': {'key': 'properties.vendorProvisioningState', 'type': 'str'},
'sku_name': {'key': 'properties.skuName', 'type': 'str'},
'sku_type': {'key': 'properties.skuType', 'type': 'str'},
'network_function_vendor_configurations': {'key': 'properties.networkFunctionVendorConfigurations', 'type': '[NetworkFunctionVendorConfiguration]'},
}
def __init__(
self,
*,
vendor_provisioning_state: Optional[Union[str, "VendorProvisioningState"]] = None,
network_function_vendor_configurations: Optional[List["NetworkFunctionVendorConfiguration"]] = None,
**kwargs
):
super(VendorNetworkFunction, self).__init__(**kwargs)
self.system_data = None
self.provisioning_state = None
self.vendor_provisioning_state = vendor_provisioning_state
self.sku_name = None
self.sku_type = None
self.network_function_vendor_configurations = network_function_vendor_configurations
class VendorNetworkFunctionListResult(msrest.serialization.Model):
"""Response for vendors API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of vendor network functions.
:type value: list[~hybrid_network_management_client.models.VendorNetworkFunction]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VendorNetworkFunction]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["VendorNetworkFunction"]] = None,
**kwargs
):
super(VendorNetworkFunctionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class VendorSku(ProxyResource):
"""Sku sub resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system meta data relating to this resource.
:vartype system_data: ~hybrid_network_management_client.models.SystemData
:ivar provisioning_state: The provisioning state of the vendor sku sub resource. Possible
values include: "Unknown", "Succeeded", "Accepted", "Deleting", "Failed", "Canceled",
"Deleted".
:vartype provisioning_state: str or ~hybrid_network_management_client.models.ProvisioningState
:param sku_type: The sku type. Possible values include: "Unknown", "EvolvedPacketCore",
"SDWAN", "Firewall".
:type sku_type: str or ~hybrid_network_management_client.models.SkuType
:param deployment_mode: The sku deployment mode. Possible values include: "Unknown", "Azure",
"PrivateEdgeZone".
:type deployment_mode: str or ~hybrid_network_management_client.models.SkuDeploymentMode
:param network_function_type: The network function type. Possible values include: "Unknown",
"VirtualNetworkFunction", "ContainerizedNetworkFunction".
:type network_function_type: str or
~hybrid_network_management_client.models.NetworkFunctionType
:param preview: Indicates if the vendor sku is in preview mode.
:type preview: bool
:param managed_application_parameters: The parameters for the managed application to be
supplied by the vendor.
:type managed_application_parameters: any
:param managed_application_template: The template for the managed application deployment.
:type managed_application_template: any
:param network_function_template: The template definition of the network function.
:type network_function_template:
~hybrid_network_management_client.models.NetworkFunctionTemplate
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'sku_type': {'key': 'properties.skuType', 'type': 'str'},
'deployment_mode': {'key': 'properties.deploymentMode', 'type': 'str'},
'network_function_type': {'key': 'properties.networkFunctionType', 'type': 'str'},
'preview': {'key': 'properties.preview', 'type': 'bool'},
'managed_application_parameters': {'key': 'properties.managedApplicationParameters', 'type': 'object'},
'managed_application_template': {'key': 'properties.managedApplicationTemplate', 'type': 'object'},
'network_function_template': {'key': 'properties.networkFunctionTemplate', 'type': 'NetworkFunctionTemplate'},
}
def __init__(
self,
*,
sku_type: Optional[Union[str, "SkuType"]] = None,
deployment_mode: Optional[Union[str, "SkuDeploymentMode"]] = None,
network_function_type: Optional[Union[str, "NetworkFunctionType"]] = None,
preview: Optional[bool] = None,
managed_application_parameters: Optional[Any] = None,
managed_application_template: Optional[Any] = None,
network_function_template: Optional["NetworkFunctionTemplate"] = None,
**kwargs
):
super(VendorSku, self).__init__(**kwargs)
self.system_data = None
self.provisioning_state = None
self.sku_type = sku_type
self.deployment_mode = deployment_mode
self.network_function_type = network_function_type
self.preview = preview
self.managed_application_parameters = managed_application_parameters
self.managed_application_template = managed_application_template
self.network_function_template = network_function_template
class VendorSkuListResult(msrest.serialization.Model):
"""Response for list vendor sku API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of vendor skus offered by the vendor.
:type value: list[~hybrid_network_management_client.models.VendorSku]
:ivar next_link: The URI to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VendorSku]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["VendorSku"]] = None,
**kwargs
):
super(VendorSkuListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class VirtualHardDisk(msrest.serialization.Model):
"""Describes the uri of a disk.
:param uri: Specifies the virtual hard disk's uri.
:type uri: str
"""
_attribute_map = {
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
*,
uri: Optional[str] = None,
**kwargs
):
super(VirtualHardDisk, self).__init__(**kwargs)
self.uri = uri
| Azure/azure-sdk-for-python | sdk/hybridnetwork/azure-mgmt-hybridnetwork/azure/mgmt/hybridnetwork/models/_models_py3.py | Python | mit | 79,941 | [
"ASE"
] | e604d4fce74aa8194f2f7e51ac5a99923c45814e2732f952d404a559eaa3ebf7 |
from __future__ import print_function
import os
import shutil
import itertools
import tempfile
import subprocess
from distutils.spawn import find_executable
import numpy as np
import mdtraj as md
from mdtraj.testing import get_fn, eq, skipif
HAVE_DSSP = find_executable('mkdssp')
DSSP_MSG = "This tests required mkdssp to be installed, from http://swift.cmbi.ru.nl/gv/dssp/"
tmpdir = None
def setup():
global tmpdir
tmpdir = tempfile.mkdtemp()
def teardown():
shutil.rmtree(tmpdir)
def call_dssp(traj, frame=0):
inp = os.path.join(tmpdir, 'temp.pdb')
out = os.path.join(tmpdir, 'temp.pdb.dssp')
traj[frame].save(inp)
cmd = ['mkdssp', '-i', inp, '-o', out]
subprocess.check_output(' '.join(cmd), shell=True)
KEY_LINE = ' # RESIDUE AA STRUCTURE BP1 BP2 ACC N-H-->O O-->H-N N-H-->O O-->H-N TCO KAPPA ALPHA PHI PSI X-CA Y-CA Z-CA'
with open(out) as f:
# exaust the first entries
max(itertools.takewhile(lambda l: not l.startswith(KEY_LINE), f))
return np.array([line[16] for line in f if line[13] != '!'])
def assert_(a, b):
try:
assert np.all(a == b)
except AssertionError:
if len(a) != len(b):
print('Not the same length: %d vs %s' % (len(a), len(b)))
raise
for i, (aa, bb) in enumerate(zip(a, b)):
if aa == bb:
print("%3d: '%s' '%s'" % (i, aa, bb))
else:
print("%3d: '%s' '%s' <-" % (i, aa, bb))
raise
@skipif(not HAVE_DSSP, DSSP_MSG)
def test_1():
for fn in ['1bpi.pdb', '1vii.pdb', '4K6Q.pdb', '1am7_protein.pdb']:
t = md.load_pdb(get_fn(fn))
t = t.atom_slice(t.top.select_atom_indices('minimal'))
f = lambda : assert_(call_dssp(t), md.compute_dssp(t, simplified=False)[0])
f.description = 'test_1: %s' % fn
yield f
@skipif(not HAVE_DSSP, DSSP_MSG)
def test_2():
t = md.load(get_fn('2EQQ.pdb'))
for i in range(len(t)):
yield lambda: assert_(call_dssp(t[i]), md.compute_dssp(t[i], simplified=False)[0])
@skipif(not HAVE_DSSP, DSSP_MSG)
def test_3():
# 1COY gives a small error, due to a broken chain.
pdbids = ['1GAI', '6gsv', '2AAC']
for pdbid in pdbids:
t = md.load_pdb('http://www.rcsb.org/pdb/files/%s.pdb' % pdbid)
t = t.atom_slice(t.top.select_atom_indices('minimal'))
f = lambda : assert_(call_dssp(t), md.compute_dssp(t, simplified=False)[0])
f.description = 'test_1: %s' % pdbid
yield f
def test_4():
t = md.load_pdb(get_fn('1am7_protein.pdb'))
a = md.compute_dssp(t, simplified=True)
b = md.compute_dssp(t, simplified=False)
assert len(a) == len(b)
assert len(a[0]) == len(b[0])
assert list(np.unique(a[0])) == ['C', 'E', 'H']
def test_5():
t = md.load(get_fn('4waters.pdb'))
a = md.compute_dssp(t, simplified=True)
b = md.compute_dssp(t, simplified=False)
ref = np.array([['NA', 'NA', 'NA', 'NA']])
np.testing.assert_array_equal(a, ref)
np.testing.assert_array_equal(b, ref)
def test_6():
t = md.load(get_fn('alanine-dipeptide-explicit.pdb'))
a = md.compute_dssp(t, simplified=True)
protein_residues = np.array([set(a.name for a in r.atoms).issuperset(('C', 'N', 'O', 'CA')) for r in t.topology.residues])
assert np.unique(a[:, protein_residues]) == "C"
assert np.unique(a[:, np.logical_not(protein_residues)]) == 'NA'
| casawa/mdtraj | mdtraj/geometry/tests/test_dssp.py | Python | lgpl-2.1 | 3,448 | [
"MDTraj"
] | dda275caf3290f2d4f173508b81ff4249b92b954f796f21a6f86637026565991 |
# coding: utf-8
"""
This module provides objects describing the basic parameters of the
pseudopotentials used in Abinit, and a parser to instantiate pseudopotential objects..
"""
from __future__ import unicode_literals, division, print_function
import sys
import os
import abc
import collections
import json
import six
import numpy as np
from monty.string import list_strings, is_string
from monty.itertools import iterator_from_slice
from monty.io import FileLock
from monty.collections import AttrDict, Namespace
from pymatgen.core.periodic_table import PeriodicTable #, Element
__all__ = [
"Pseudo",
"PseudoTable",
]
__author__ = "Matteo Giantomassi"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
_PTABLE = PeriodicTable()
# Tools and helper functions.
def straceback():
"""Returns a string with the traceback."""
import sys
import traceback
return "\n".join((traceback.format_exc(), str(sys.exc_info()[0])))
def _read_nlines(filename, nlines):
"""
Read at most nlines lines from file filename.
If nlines is < 0, the entire file is read.
"""
if nlines < 0:
with open(filename, 'r') as fh:
return fh.readlines()
lines = []
with open(filename, 'r') as fh:
for (lineno, line) in enumerate(fh):
if lineno == nlines: break
lines.append(line)
return lines
_l2str = {
0: "s",
1: "p",
2: "d",
3: "f",
4: "g",
5: "h",
6: "i",
}
_str2l = {v: k for k, v in _l2str.items()}
def l2str(l):
"""Convert the angular momentum l (int) to string."""
try:
return _l2str[l]
except KeyError:
return "Unknown angular momentum, received l = %s" % l
def str2l(s):
"""Convert a string to the angular momentum l (int)"""
return _str2l[s]
def read_dojo_report(filename):
"""Helper function to read the DOJO_REPORT from file."""
with open(filename, "r") as fh:
lines = fh.readlines()
try:
start = lines.index("<DOJO_REPORT>\n")
except ValueError:
return {}
stop = lines.index("</DOJO_REPORT>\n")
d = json.loads("".join(lines[start+1:stop]))
return d
class Pseudo(six.with_metaclass(abc.ABCMeta, object)):
"""
Abstract base class defining the methods that must be
implemented by the concrete pseudopotential classes.
"""
@classmethod
def as_pseudo(cls, obj):
"""
Convert obj into a pseudo. Accepts:
* Pseudo object.
* string defining a valid path.
"""
return obj if isinstance(obj, cls) else cls.from_file(obj)
@staticmethod
def from_file(filename):
"""
Return a pseudopotential object from filename.
Note: the parser knows the concrete class that should be instanciated
"""
return PseudoParser().parse(filename)
def __repr__(self):
return "<%s at %s, name = %s>" % (self.__class__.__name__, id(self), self.name)
def __str__(self):
"""String representation."""
lines = []
app = lines.append
app("<%s: %s>" % (self.__class__.__name__, self.name))
app(" summary: " + self.summary.strip())
app(" number of valence electrons: %s" % self.Z_val)
#FIXME: rewrite the treatment of xc, use XML specs as starting point
#app(" XC correlation (ixc): %s" % self._pspxc) #FIXME
app(" maximum angular momentum: %s" % l2str(self.l_max))
app(" angular momentum for local part: %s" % l2str(self.l_local))
if self.isnc:
app(" radius for non-linear core correction: %s" % self.nlcc_radius)
app("")
hint_normal = self.hint_for_accuracy()
if hint_normal is not None:
app(" hint for normal accuracy: %s" % str(hint_normal))
return "\n".join(lines)
@abc.abstractproperty
def summary(self):
"""String summarizing the most important properties."""
@property
def filepath(self):
return os.path.abspath(self.path)
@property
def name(self):
"""File basename."""
return os.path.basename(self.filepath)
@abc.abstractproperty
def Z(self):
"""The atomic number of the atom."""
@abc.abstractproperty
def Z_val(self):
"""Valence charge"""
@property
def type(self):
return self.__class__.__name__
@property
def element(self):
"""Pymatgen `Element`."""
#return Element.from_Z(self.Z)
try:
return _PTABLE[self.Z]
except (KeyError, IndexError):
return _PTABLE[int(self.Z)]
@property
def symbol(self):
"""Element symbol."""
return self.element.symbol
@abc.abstractproperty
def l_max(self):
"""Maximum angular momentum."""
@abc.abstractproperty
def l_local(self):
"""Angular momentum used for the local part."""
@property
def isnc(self):
"""True if norm-conserving pseudopotential."""
return isinstance(self, NcPseudo)
@property
def ispaw(self):
"""True if PAW pseudopotential."""
return isinstance(self, PawPseudo)
#@abc.abstractproperty
#def xc_type(self):
# """XC family e.g LDA, GGA, MGGA."""
#@abc.abstractproperty
#def xc_flavor(self):
# """XC flavor e.g PW, PW91, PBE."""
#@property
#def xc_functional(self):
# """XC identifier e.g LDA-PW91, GGA-PBE, GGA-revPBE."""
# return "-".join([self.xc_type, self.xc_flavor])
#@abc.abstractproperty
#def has_soc(self):
# """True if pseudo contains spin-orbit coupling."""
#@abc.abstractmethod
#def num_of_projectors(self, l='s'):
# """Number of projectors for the angular channel l"""
#@abc.abstractmethod
#def generation_mode
# """scalar scalar-relativistic, relativistic."""
def as_dict(self, **kwargs):
return dict(
name=self.name,
type=self.type,
symbol=self.symbol,
Z=self.Z,
Z_val=self.Z_val,
l_max=self.l_max,
#nlcc_radius=self.nlcc_radius,
)
@property
def has_dojo_report(self):
"""True if self contains the DOJO_REPORT section."""
return bool(self.dojo_report)
def delta_factor(self, accuracy="normal"):
"""
Returns the deltafactor [meV/natom] computed with the given accuracy.
None if self does not have info on the deltafactor.
"""
if not self.has_dojo_report:
return None
try:
return self.dojo_report["delta_factor"][accuracy]["dfact"]
except KeyError:
return None
def read_dojo_report(self):
"""
Read the DOJO_REPORT section and set dojo_report attribute.
returns {} if section is not present.
"""
self.dojo_report = read_dojo_report(self.path)
return self.dojo_report
def write_dojo_report(self, report=None):
"""Write a new DOJO_REPORT section to the pseudopotential file."""
if report is None:
report = self.dojo_report
# Create JSON string from report.
jstring = json.dumps(report, indent=4, sort_keys=True) + "\n"
#jstring = json.dumps(report, sort_keys=True) + "\n"
# Read lines from file and insert jstring between the tags.
with open(self.path, "r") as fh:
lines = fh.readlines()
try:
start = lines.index("<DOJO_REPORT>\n")
except ValueError:
start = -1
if start == -1:
# DOJO_REPORT was not present.
lines += ["\n", "<DOJO_REPORT>\n", jstring , "</DOJO_REPORT>\n",]
else:
stop = lines.index("</DOJO_REPORT>\n")
lines.insert(stop, jstring)
del lines[start+1:stop]
# Write new file.
with FileLock(self.path):
with open(self.path, "w") as fh:
fh.writelines(lines)
def remove_dojo_report(self):
"""Remove the DOJO_REPORT section from the pseudopotential file."""
# Read lines from file and insert jstring between the tags.
with open(self.path, "r") as fh:
lines = fh.readlines()
try:
start = lines.index("<DOJO_REPORT>\n")
except ValueError:
start = -1
if start == -1:
return
stop = lines.index("</DOJO_REPORT>\n")
if stop == -1:
return
del lines[start+1:stop]
# Write new file.
with FileLock(self.path):
with open(self.path, "w") as fh:
fh.writelines(lines)
def hint_for_accuracy(self, accuracy="normal"):
"""
Returns an hint object with parameters such as ecut [Ha] and
aug_ratio for given accuracy. Returns None if no hint is available.
Args:
accuracy: ["low", "normal", "high"]
"""
if self.has_dojo_report:
return Hint.from_dict(self.dojo_report["hints"][accuracy])
else:
return None
@property
def has_hints(self):
"""True if self provides hints on the cutoff energy."""
for acc in ["low", "normal", "high"]:
if self.hint_for_accuracy(acc) is None:
return False
return True
class NcPseudo(six.with_metaclass(abc.ABCMeta, object)):
"""
Abstract class defining the methods that must be implemented
by the concrete classes representing norm-conserving pseudopotentials.
"""
@abc.abstractproperty
def nlcc_radius(self):
"""
Radius at which the core charge vanish (i.e. cut-off in a.u.).
Returns 0.0 if nlcc is not used.
"""
@property
def has_nlcc(self):
"""True if the pseudo is generated with non-linear core correction."""
return self.nlcc_radius > 0.0
@property
def rcore(self):
"""Radius of the pseudization sphere in a.u."""
try:
return self._core
except AttributeError:
return None
class PawPseudo(six.with_metaclass(abc.ABCMeta, object)):
"""
Abstract class that defines the methods that must be implemented
by the concrete classes representing PAW pseudopotentials.
"""
#def nlcc_radius(self):
# """
# Radius at which the core charge vanish (i.e. cut-off in a.u.).
# Returns 0.0 if nlcc is not used.
# """
# return 0.0
#
#@property
#def has_nlcc(self):
# """True if the pseudo is generated with non-linear core correction."""
# return True
@abc.abstractproperty
def paw_radius(self):
"""Radius of the PAW sphere in a.u."""
@property
def rcore(self):
"""Alias of paw_radius."""
return self.paw_radius
class AbinitPseudo(Pseudo):
"""
An AbinitPseudo is a pseudopotential whose file contains an abinit header.
"""
def __init__(self, path, header):
"""
Args:
path:
Filename.
header:
`AbinitHeader` instance.
"""
self.path = path
self._summary = header.summary
if hasattr(header, "dojo_report"):
self.dojo_report = header.dojo_report
else:
self.dojo_report = {}
#self.pspcod = header.pspcod
for attr_name, desc in header.items():
value = header.get(attr_name, None)
# Hide these attributes since one should always use the public interface.
setattr(self, "_" + attr_name, value)
@property
def summary(self):
"""Summary line reported in the ABINIT header."""
return self._summary.strip()
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
return self._zion
@property
def l_max(self):
return self._lmax
@property
def l_local(self):
return self._lloc
class NcAbinitPseudo(NcPseudo, AbinitPseudo):
"""
Norm-conserving pseudopotential in the Abinit format.
"""
@property
def summary(self):
return self._summary.strip()
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
"""Number of valence electrons."""
return self._zion
@property
def l_max(self):
return self._lmax
@property
def l_local(self):
return self._lloc
@property
def nlcc_radius(self):
return self._rchrg
class PawAbinitPseudo(PawPseudo, AbinitPseudo):
"""Paw pseudopotential in the Abinit format."""
@property
def paw_radius(self):
return self._r_cut
#def orbitals(self):
class Hint(collections.namedtuple("Hint", "ecut aug_ratio")):
"""
Suggested value for the cutoff energy [Hartree units] and the augmentation ratio (PAW pseudo)
"""
def as_dict(self):
return {f: getattr(self, f) for f in self._fields}
@classmethod
def from_dict(cls, d):
return cls(**{k: v for k,v in d.items() if not k.startswith("@")})
def _dict_from_lines(lines, key_nums, sep=None):
"""
Helper function to parse formatted text structured like:
value1 value2 ... sep key1, key2 ...
key_nums is a list giving the number of keys for each line. 0 if line should be skipped.
sep is a string denoting the character that separates the keys from the value (None if
no separator is present).
Returns:
dict{key1 : value1, key2 : value2, ...}
Raises:
ValueError if parsing fails.
"""
if is_string(lines):
lines = [lines]
if not isinstance(key_nums, collections.Iterable):
key_nums = list(key_nums)
if len(lines) != len(key_nums):
err_msg = "lines = %s\n key_num = %s" % (str(lines), str(key_nums))
raise ValueError(err_msg)
kwargs = Namespace()
for (i, nk) in enumerate(key_nums):
if nk == 0: continue
line = lines[i]
tokens = [t.strip() for t in line.split()]
values, keys = tokens[:nk], "".join(tokens[nk:])
# Sanitize keys: In some case we might string in for foo[,bar]
keys.replace("[", "").replace("]", "")
keys = keys.split(",")
if sep is not None:
check = keys[0][0]
if check != sep:
raise ValueError("Expecting separator %s, got %s" % (sep, check))
keys[0] = keys[0][1:]
if len(values) != len(keys):
msg = "line: %s\n len(keys) != len(value)\nkeys: %s\n values: %s" % (line, keys, values)
raise ValueError(msg)
kwargs.update(zip(keys, values))
return kwargs
class AbinitHeader(dict):
"""Dictionary whose keys can be also accessed as attributes."""
def __getattr__(self, name):
try:
# Default behaviour
return super(AbinitHeader, self).__getattribute__(name)
except AttributeError:
try:
# Try in the dictionary.
return self[name]
except KeyError as exc:
raise AttributeError(str(exc))
def _int_from_str(string):
"""
Convert string into integer
Raise:
TypeError if string is not a valid integer
"""
float_num = float(string)
int_num = int(float_num)
if float_num == int_num:
return int_num
else:
raise TypeError("Cannot convert string %s to int" % string)
class NcAbinitHeader(AbinitHeader):
"""
The abinit header found in the NC pseudopotential files.
"""
_attr_desc = collections.namedtuple("att", "default astype")
_VARS = {
# Mandatory
"zatom" : _attr_desc(None, _int_from_str),
"zion" : _attr_desc(None, float),
"pspdat" : _attr_desc(None, float),
"pspcod" : _attr_desc(None, int),
"pspxc" : _attr_desc(None, int),
"lmax" : _attr_desc(None, int),
"lloc" : _attr_desc(None, int),
"r2well" : _attr_desc(None, float),
"mmax" : _attr_desc(None, float),
# Optional variables for non linear-core correction. HGH does not have it.
"rchrg" : _attr_desc(0.0, float), # radius at which the core charge vanish (i.e. cut-off in a.u.)
"fchrg" : _attr_desc(0.0, float),
"qchrg" : _attr_desc(0.0, float),
}
del _attr_desc
def __init__(self, summary, **kwargs):
super(NcAbinitHeader, self).__init__()
# APE uses llocal instead of lloc.
if "llocal" in kwargs:
kwargs["lloc"] = kwargs.pop("llocal")
self.summary = summary.strip()
for (key, desc) in NcAbinitHeader._VARS.items():
default, astype = desc.default, desc.astype
value = kwargs.pop(key, None)
if value is None:
value = default
if default is None:
raise RuntimeError("Attribute %s must be specified" % key)
else:
try:
value = astype(value)
except:
raise RuntimeError("Conversion Error for key %s, value %s" % (key, value))
self[key] = value
# Add dojo_report
self["dojo_report"] = kwargs.pop("dojo_report", {})
#if kwargs:
# raise RuntimeError("kwargs should be empty but got %s" % str(kwargs))
@staticmethod
def fhi_header(filename, ppdesc):
"""Parse the FHI abinit header."""
# Example:
# Troullier-Martins psp for element Sc Thu Oct 27 17:33:22 EDT 1994
# 21.00000 3.00000 940714 zatom, zion, pspdat
# 1 1 2 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
# 1.80626423934776 .22824404341771 1.17378968127746 rchrg,fchrg,qchrg
lines = _read_nlines(filename, -1)
try:
header = _dict_from_lines(lines[:4], [0, 3, 6, 3])
except ValueError:
# The last record with rchrg ... seems to be optional.
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
header["dojo_report"] = read_dojo_report(filename)
return NcAbinitHeader(summary, **header)
@staticmethod
def hgh_header(filename, ppdesc):
"""Parse the HGH abinit header."""
# Example:
#Hartwigsen-Goedecker-Hutter psp for Ne, from PRB58, 3641 (1998)
# 10 8 010605 zatom,zion,pspdat
# 3 1 1 0 2001 0 pspcod,pspxc,lmax,lloc,mmax,r2well
lines = _read_nlines(filename, -1)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
header["dojo_report"] = read_dojo_report(filename)
return NcAbinitHeader(summary, **header)
@staticmethod
def oncvpsp_header(filename, ppdesc):
"""Parse the ONCVPSP abinit header."""
# Example
#Li ONCVPSP r_core= 2.01 3.02
# 3.0000 3.0000 140504 zatom,zion,pspd
# 8 2 1 4 600 0 pspcod,pspxc,lmax,lloc,mmax,r2well
# 5.99000000 0.00000000 0.00000000 rchrg fchrg qchrg
# 2 2 0 0 0 nproj
# 0 extension_switch
# 0 -2.5000025868368D+00 -1.2006906995331D+00
# 1 0.0000000000000D+00 0.0000000000000D+00 0.0000000000000D+00
# 2 1.0000000000000D-02 4.4140499497377D-02 1.9909081701712D-02
lines = _read_nlines(filename, -1)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
header.update({'pspdat': header['pspd']})
header.pop('pspd')
header["dojo_report"] = read_dojo_report(filename)
return NcAbinitHeader(summary, **header)
@staticmethod
def tm_header(filename, ppdesc):
"""Parse the TM abinit header."""
# Example:
#Troullier-Martins psp for element Fm Thu Oct 27 17:28:39 EDT 1994
#100.00000 14.00000 940714 zatom, zion, pspdat
# 1 1 3 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
# 0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp
# .00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
# 1 3.116 4.632 1 3.4291849 l,e99.0,e99.9,nproj,rcpsp
# .00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
# 2 4.557 6.308 1 2.1865358 l,e99.0,e99.9,nproj,rcpsp
# .00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
# 3 23.251 29.387 1 2.4776730 l,e99.0,e99.9,nproj,rcpsp
# .00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
# 3.62474762267880 .07409391739104 3.07937699839200 rchrg,fchrg,qchrg
lines = _read_nlines(filename, -1)
header = []
for (lineno, line) in enumerate(lines):
header.append(line)
if lineno == 2:
# Read lmax.
tokens = line.split()
pspcod, pspxc, lmax, lloc = map(int, tokens[:4])
mmax, r2well = map(float, tokens[4:6])
#if tokens[-1].strip() != "pspcod,pspxc,lmax,lloc,mmax,r2well":
# raise RuntimeError("%s: Invalid line\n %s" % (filename, line))
lines = lines[3:]
break
# TODO
# Parse the section with the projectors.
#0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp
#.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
projectors = collections.OrderedDict()
for idx in range(2*(lmax+1)):
line = lines[idx]
if idx % 2 == 0: proj_info = [line,]
if idx % 2 == 1:
proj_info.append(line)
d = _dict_from_lines(proj_info, [5,4])
projectors[int(d["l"])] = d
# Add the last line with info on nlcc.
header.append(lines[idx+1])
summary = header[0]
header = _dict_from_lines(header, [0,3,6,3])
header["dojo_report"] = read_dojo_report(filename)
return NcAbinitHeader(summary, **header)
class PawAbinitHeader(AbinitHeader):
"""
The abinit header found in the PAW pseudopotential files.
"""
_attr_desc = collections.namedtuple("att", "default astype")
_VARS = {
"zatom" : _attr_desc(None, _int_from_str),
"zion" : _attr_desc(None, float),
"pspdat" : _attr_desc(None, float),
"pspcod" : _attr_desc(None, int),
"pspxc" : _attr_desc(None, int),
"lmax" : _attr_desc(None, int),
"lloc" : _attr_desc(None, int),
"mmax" : _attr_desc(None, int),
"r2well" : _attr_desc(None, float),
"pspfmt" : _attr_desc(None, str),
"creatorID" : _attr_desc(None, int),
"basis_size" : _attr_desc(None, int),
"lmn_size" : _attr_desc(None, int),
"orbitals" : _attr_desc(None, list),
"number_of_meshes": _attr_desc(None, int),
"r_cut" : _attr_desc(None, float), # r_cut(PAW) in the header
"shape_type" : _attr_desc(None, int),
"rshape" : _attr_desc(None, float),
}
del _attr_desc
def __init__(self, summary, **kwargs):
super(PawAbinitHeader, self).__init__()
self.summary = summary.strip()
for (key, desc) in self._VARS.items():
default, astype = desc.default, desc.astype
value = kwargs.pop(key, None)
if value is None:
value = default
if default is None:
raise RuntimeError("Attribute %s must be specified" % key)
else:
try:
value = astype(value)
except:
raise RuntimeError("Conversion Error for key %s, with value %s" % (key, value))
self[key] = value
if kwargs:
raise RuntimeError("kwargs should be empty but got %s" % str(kwargs))
@staticmethod
def paw_header(filename, ppdesc):
"""Parse the PAW abinit header."""
#Paw atomic data for element Ni - Generated by AtomPAW (N. Holzwarth) + AtomPAW2Abinit v3.0.5
# 28.000 18.000 20061204 : zatom,zion,pspdat
# 7 7 2 0 350 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
# paw3 1305 : pspfmt,creatorID
# 5 13 : basis_size,lmn_size
# 0 0 1 1 2 : orbitals
# 3 : number_of_meshes
# 1 3 350 1.1803778368E-05 3.5000000000E-02 : mesh 1, type,size,rad_step[,log_step]
# 2 1 921 2.500000000000E-03 : mesh 2, type,size,rad_step[,log_step]
# 3 3 391 1.1803778368E-05 3.5000000000E-02 : mesh 3, type,size,rad_step[,log_step]
# 2.3000000000 : r_cut(SPH)
# 2 0.
# Example
#C (US d-loc) - PAW data extracted from US-psp (D.Vanderbilt) - generated by USpp2Abinit v2.3.0
# 6.000 4.000 20090106 : zatom,zion,pspdat
# 7 11 1 0 560 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
# paw4 2230 : pspfmt,creatorID
# 4 8 : basis_size,lmn_size
# 0 0 1 1 : orbitals
# 5 : number_of_meshes
# 1 2 560 1.5198032759E-04 1.6666666667E-02 : mesh 1, type,size,rad_step[,log_step]
# 2 2 556 1.5198032759E-04 1.6666666667E-02 : mesh 2, type,size,rad_step[,log_step]
# 3 2 576 1.5198032759E-04 1.6666666667E-02 : mesh 3, type,size,rad_step[,log_step]
# 4 2 666 1.5198032759E-04 1.6666666667E-02 : mesh 4, type,size,rad_step[,log_step]
# 5 2 673 1.5198032759E-04 1.6666666667E-02 : mesh 5, type,size,rad_step[,log_step]
# 1.5550009124 : r_cut(PAW)
# 3 0. : shape_type,rshape
#Paw atomic data for element Si - Generated by atompaw v3.0.1.3 & AtomPAW2Abinit v3.3.1
# 14.000 4.000 20120814 : zatom,zion,pspdat
# 7 11 1 0 663 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
# paw5 1331 : pspfmt,creatorID
# 4 8 : basis_size,lmn_size
# 0 0 1 1 : orbitals
# 5 : number_of_meshes
# 1 2 663 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 1, type,size,rad_step[,log_step]
# 2 2 658 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 2, type,size,rad_step[,log_step]
# 3 2 740 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 3, type,size,rad_step[,log_step]
# 4 2 819 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 4, type,size,rad_step[,log_step]
# 5 2 870 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 5, type,size,rad_step[,log_step]
# 1.5669671236 : r_cut(PAW)
# 2 0. : shape_type,rshape
supported_formats = ["paw3", "paw4", "paw5"]
if ppdesc.format not in supported_formats:
raise NotImplementedError("format %s not in %s" % (ppdesc.format, supported_formats))
lines = _read_nlines(filename, -1)
summary = lines[0]
header = _dict_from_lines(lines[:5], [0, 3, 6, 2, 2], sep=":")
lines = lines[5:]
# TODO
# Parse orbitals and number of meshes.
header["orbitals"] = [int(t) for t in lines[0].split(":")[0].split()]
header["number_of_meshes"] = num_meshes = int(lines[1].split(":")[0])
#print filename, header
# Skip meshes =
lines = lines[2+num_meshes:]
#for midx in range(num_meshes):
# l = midx + 1
#print lines[0]
header["r_cut"] = float(lines[0].split(":")[0])
#print lines[1]
header.update(_dict_from_lines(lines[1], [2], sep=":"))
report = read_dojo_report(filename)
if report:
header["dojo_report"] = report
#print("PAW header\n", header)
return PawAbinitHeader(summary, **header)
class PseudoParserError(Exception):
"""Base Error class for the exceptions raised by `PseudoParser`"""
class PseudoParser(object):
"""
Responsible for parsing pseudopotential files and returning pseudopotential objects.
Usage::
pseudo = PseudoParser().parse("filename")
"""
Error = PseudoParserError
# Supported values of pspcod
ppdesc = collections.namedtuple("ppdesc", "pspcod name psp_type format")
# TODO Recheck
_PSPCODES = collections.OrderedDict( {
1 : ppdesc(1, "TM", "NC", None),
3 : ppdesc(3, "HGH", "NC", None),
#4 : ppdesc(4, "NC", , None),
#5 : ppdesc(5, "NC", , None),
6 : ppdesc(6, "FHI", "NC", None),
7 : ppdesc(6, "PAW_abinit_text", "PAW", None),
8 : ppdesc(8, "ONCVPSP", "NC", None),
10 : ppdesc(10, "HGHK", "NC", None),
})
del ppdesc
# renumber functionals from oncvpsp todo confrim that 3 is 2
_FUNCTIONALS = {1: {'n': 4, 'name': 'Wigner'},
2: {'n': 5, 'name': 'HL'},
3: {'n': 2, 'name': 'PWCA'},
4: {'n': 11, 'name': 'PBE'}}
def __init__(self):
# List of files that have been parsed succesfully.
self._parsed_paths = []
# List of files that could not been parsed.
self._wrong_paths = []
def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()):
"""
Analyze the files contained in directory dirname.
Args:
dirname:
directory path
exclude_exts:
list of file extensions that should be skipped.
exclude_fnames:
list of file names that should be skipped.
returns:
List of pseudopotential objects.
"""
for (i, ext) in enumerate(exclude_exts):
if not ext.strip().startswith("."):
exclude_exts[i] = "." + ext.strip()
# Exclude files depending on the extension.
paths = []
for fname in os.listdir(dirname):
root, ext = os.path.splitext(fname)
path = os.path.join(dirname, fname)
if (ext in exclude_exts or fname in exclude_fnames or
fname.startswith(".") or not os.path.isfile(path)): continue
paths.append(path)
pseudos = []
for path in paths:
# Parse the file and generate the pseudo.
try:
pseudo = self.parse(path)
except:
pseudo = None
if pseudo is not None:
pseudos.append(pseudo)
self._parsed_paths.extend(path)
else:
self._wrong_paths.extend(path)
return pseudos
def read_ppdesc(self, filename):
"""
Read the pseudopotential descriptor from file filename.
Returns:
Pseudopotential descriptor. None if filename is not a valid pseudopotential file.
Raises:
`PseudoParserError` if fileformat is not supported.
"""
if filename.endswith(".xml"):
raise self.Error("XML pseudo not supported yet")
else:
# Assume file with the abinit header.
lines = _read_nlines(filename, 80)
for (lineno, line) in enumerate(lines):
if lineno == 2:
try:
tokens = line.split()
pspcod, pspxc = map(int, tokens[:2])
except:
msg = "%s: Cannot parse pspcod, pspxc in line\n %s" % (filename, line)
sys.stderr.write(msg)
return None
#if tokens[-1].strip().replace(" ","") not in ["pspcod,pspxc,lmax,lloc,mmax,r2well",
# "pspcod,pspxc,lmax,llocal,mmax,r2well"]:
# raise self.Error("%s: Invalid line\n %s" % (filename, line))
# return None
if pspcod not in self._PSPCODES:
raise self.Error("%s: Don't know how to handle pspcod %s\n" % (filename, pspcod))
ppdesc = self._PSPCODES[pspcod]
if pspcod == 7:
# PAW -> need to know the format pspfmt
tokens = lines[lineno+1].split()
pspfmt, creatorID = tokens[:2]
#if tokens[-1].strip() != "pspfmt,creatorID":
# raise self.Error("%s: Invalid line\n %s" % (filename, line))
# return None
ppdesc = ppdesc._replace(format = pspfmt)
return ppdesc
return None
def parse(self, filename):
"""
Read and parse a pseudopotential file. Main entry point for client code.
Returns:
pseudopotential object or None if filename is not a valid pseudopotential file.
"""
path = os.path.abspath(filename)
# Only PAW supports XML at present.
if filename.endswith(".xml"):
return PawXmlSetup(path)
ppdesc = self.read_ppdesc(path)
if ppdesc is None:
return None
psp_type = ppdesc.psp_type
parsers = {
"FHI" : NcAbinitHeader.fhi_header,
"TM" : NcAbinitHeader.tm_header,
"HGH" : NcAbinitHeader.hgh_header,
"HGHK" : NcAbinitHeader.hgh_header,
"ONCVPSP" : NcAbinitHeader.oncvpsp_header,
"PAW_abinit_text": PawAbinitHeader.paw_header,
}
try:
header = parsers[ppdesc.name](path, ppdesc)
except Exception as exc:
raise self.Error(path + ":\n" + straceback())
root, ext = os.path.splitext(path)
if psp_type == "NC":
pseudo = NcAbinitPseudo(path, header)
elif psp_type == "PAW":
pseudo = PawAbinitPseudo(path, header)
else:
raise NotImplementedError("psp_type not in [NC, PAW]")
return pseudo
#TODO use RadialFunction from pseudo_dojo.
class RadialFunction(collections.namedtuple("RadialFunction", "mesh values")):
pass
class PawXmlSetup(Pseudo, PawPseudo):
def __init__(self, filepath):
# FIXME
self.dojo_report = {}
self.path = os.path.abspath(filepath)
# Get the XML root (this trick is used to that the object is pickleable).
root = self.root
# Get the version of the XML format
self.paw_setup_version = root.get("version")
# Info on the atom.
atom_attrib = root.find("atom").attrib
#self._symbol = atom_attrib["symbol"]
self._zatom = int(float(atom_attrib["Z"]))
self.core, self.valence = map(float, [atom_attrib["core"], atom_attrib["valence"]])
#xc_info = root.find("atom").attrib
#self.xc_type, self.xc_name = xc_info["type"], xc_info["name"]
#self.ae_energy = {k: float(v) for k,v in root.find("ae_energy").attrib.items()}
# Old XML files do not define this field!
# In this case we set the PAW radius to None.
#self._paw_radius = float(root.find("PAW_radius").attrib["rpaw"])
pawr_element = root.find("PAW_radius")
self._paw_radius = None
if pawr_element is not None:
self._paw_radius = float(pawr_element.attrib["rpaw"])
#<valence_states>
# <state n="2" l="0" f="2" rc="1.10" e="-0.6766" id="N-2s"/>
# <state n="2" l="1" f="3" rc="1.10" e="-0.2660" id="N-2p"/>
# <state l="0" rc="1.10" e=" 0.3234" id="N-s1"/>
# <state l="1" rc="1.10" e=" 0.7340" id="N-p1"/>
# <state l="2" rc="1.10" e=" 0.0000" id="N-d1"/>
#</valence_states>
#
# The valence_states element contains several state elements.
# For this setup, the first two lines describe bound eigenstates
# with occupation numbers and principal quantum numbers.
# Notice, that the three additional unbound states should have no f and n attributes.
# In this way, we know that only the first two bound states (with f and n attributes)
# should be used for constructing an initial guess for the wave functions.
self.valence_states = {}
for node in root.find("valence_states"):
attrib = AttrDict(node.attrib)
assert attrib.id not in self.valence_states
self.valence_states[attrib.id] = attrib
#print(self.valence_states)
# Parse the radial grids
self.rad_grids = {}
for node in root.findall("radial_grid"):
grid_params = node.attrib
gid = grid_params["id"]
assert gid not in self.rad_grids
self.rad_grids[id] = self._eval_grid(grid_params)
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
In this case we just remove the XML root element process since Element object cannot be pickled.
"""
return {k: v for k, v in self.__dict__.items() if k not in ["_root"]}
@property
def root(self):
try:
return self._root
except AttributeError:
from xml.etree import cElementTree as Et
tree = Et.parse(self.filepath)
self._root = tree.getroot()
return self._root
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
"""Number of valence electrons."""
return self.valence
# FIXME
@property
def l_max(self):
"""Maximum angular momentum."""
return None
@property
def l_local(self):
"""Angular momentum used for the local part."""
return None
@property
def summary(self):
"""String summarizing the most important properties."""
return ""
@property
def paw_radius(self):
return self._paw_radius
@staticmethod
def _eval_grid(grid_params):
"""
This function receives a dictionary with the parameters defining the
radial mesh and returns a `ndarray` with the mesh
"""
eq = grid_params.get("eq").replace(" ", "")
istart, iend = int(grid_params.get("istart")), int(grid_params.get("iend"))
indices = list(range(istart, iend+1))
if eq == 'r=a*exp(d*i)':
a, d = float(grid_params['a']), float(grid_params['d'])
mesh = [a * np.exp(d * i) for i in indices]
elif eq == 'r=a*i/(n-i)':
a, n = float(grid_params['a']), float(grid_params['n'])
mesh = [a * i / (n - i) for i in indices]
elif eq == 'r=a*(exp(d*i)-1)':
a, d = float(grid_params['a']), float(grid_params['d'])
mesh = [a * (np.exp(d * i) - 1.0) for i in indices]
elif eq == 'r=d*i':
d = float(grid_params['d'])
mesh = [d * i for i in indices]
elif eq == 'r=(i/n+a)^5/a-a^4':
a, n = float(grid_params['a']), float(grid_params['n'])
mesh = [(i / n + a)**5 / a - a**4 for i in indices]
else:
raise ValueError('Unknown grid type: %s' % eq)
return np.array(mesh)
def _parse_radfunc(self, func_name):
"""Parse the first occurence of func_name in the XML file."""
node = self.root.find(func_name)
grid = node.attrib["grid"]
values = np.array([float(s) for s in node.text.split()])
return self.rad_grids[grid], values, node.attrib
def _parse_all_radfuncs(self, func_name):
"""Parse all the nodes with tag func_name in the XML file."""
for node in self.root.findall(func_name):
grid = node.attrib["grid"]
values = np.array([float(s) for s in node.text.split()])
yield self.rad_grids[grid], values, node.attrib
@property
def ae_core_density(self):
"""The all-electron radial density."""
try:
return self._ae_core_density
except AttributeError:
mesh, values, attrib = self._parse_radfunc("ae_core_density")
self._ae_core_density = RadialFunction(mesh, values)
return self._ae_core_density
@property
def pseudo_core_density(self):
"""The pseudized radial density."""
try:
return self._pseudo_core_density
except AttributeError:
mesh, values, attrib = self._parse_radfunc("pseudo_core_density")
self._pseudo_core_density = RadialFunction(mesh, values)
return self._pseudo_core_density
@property
def ae_partial_waves(self):
"""Dictionary with the AE partial waves indexed by state."""
try:
return self._ae_partial_waves
except AttributeError:
self._ae_partial_waves = {}
for (mesh, values, attrib) in self._parse_all_radfuncs("ae_partial_wave"):
state = attrib["state"]
val_state = self.valence_states[state]
self._ae_partial_waves[state] = RadialFunction(mesh, values)
#print("val_state", val_state)
return self._ae_partial_waves
@property
def pseudo_partial_waves(self):
"""Dictionary with the pseudo partial waves indexed by state."""
try:
return self._pseudo_partial_waves
except AttributeError:
self._pseudo_partial_waves = {}
for (mesh, values, attrib) in self._parse_all_radfuncs("pseudo_partial_wave"):
state = attrib["state"]
val_state = self.valence_states[state]
self._pseudo_partial_waves[state] = RadialFunction(mesh, values)
return self._pseudo_partial_waves
@property
def projector_functions(self):
"""Dictionary with the PAW projectors indexed by state."""
try:
return self._projector_functions
except AttributeError:
self._projector_functions = {}
for (mesh, values, attrib) in self._parse_all_radfuncs("projector_function"):
state = attrib["state"]
val_state = self.valence_states[state]
self._projector_functions[state] = RadialFunction(mesh, values)
return self._projector_functions
def plot_densities(self, **kwargs):
"""
Plot the PAW densities.
================ ==============================================================
kwargs Meaning
================ ==============================================================
title Title of the plot (Default: "Densities").
show True to show the figure (Default).
savefig 'abc.png' or 'abc.eps' to save the figure to a file.
================ ==============================================================
Returns:
`matplotlib` figure
"""
title = kwargs.pop("title", "Densities")
show = kwargs.pop("show", True)
savefig = kwargs.pop("savefig", None)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.grid(True)
ax.set_xlabel('r [Bohr]')
#ax.set_ylabel('density')
for i, den_name in enumerate(["ae_core_density", "pseudo_core_density"]):
rden = getattr(self, den_name)
label = "$n_c$" if i == 1 else "$\\tilde{n}_c$"
ax.plot(rden.mesh, rden.mesh * rden.values, label=label, lw=2)
plt.legend(loc="best")
if title is not None:
fig.suptitle(title)
if show:
plt.show()
if savefig:
fig.savefig(savefig)
return fig
def plot_waves(self, **kwargs):
"""
Plot the AE and the pseudo partial waves.
================ ==============================================================
kwargs Meaning
================ ==============================================================
title Title of the plot (Default: "Partial Waves").
show True to show the figure (Default).
savefig 'abc.png' or 'abc.eps' to save the figure to a file.
================ ==============================================================
Returns:
`matplotlib` figure
"""
title = kwargs.pop("title", "Partial Waves")
show = kwargs.pop("show", True)
savefig = kwargs.pop("savefig", None)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.grid(True)
ax.set_xlabel("r [Bohr]")
ax.set_ylabel("$r\phi,\\, r\\tilde\phi\, [Bohr]^{-\\frac{1}{2}}$")
ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
#ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
for state, rfunc in self.pseudo_partial_waves.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="PS-WAVE: " + state)
for state, rfunc in self.ae_partial_waves.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="AE-WAVE: " + state)
plt.legend(loc="best")
if title is not None:
fig.suptitle(title)
if show:
plt.show()
if savefig:
fig.savefig(savefig)
return fig
def plot_projectors(self, **kwargs):
"""
Plot the PAW projectors.
================ ==============================================================
kwargs Meaning
================ ==============================================================
title Title of the plot (Default: "Projectors").
show True to show the figure (Default).
savefig 'abc.png' or 'abc.eps' to save the figure to a file.
================ ==============================================================
Returns:
`matplotlib` figure
"""
title = kwargs.pop("title", "Projectors")
show = kwargs.pop("show", True)
savefig = kwargs.pop("savefig", None)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.grid(True)
ax.set_xlabel('r [Bohr]')
ax.set_ylabel("$r\\tilde p\, [Bohr]^{-\\frac{1}{2}}$")
ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
#ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
for state, rfunc in self.projector_functions.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, label="TPROJ: " + state)
plt.legend(loc="best")
if title is not None:
fig.suptitle(title)
if show:
plt.show()
if savefig:
fig.savefig(savefig)
return fig
#def plot_potentials(self, **kwargs):
# """
# ================ ==============================================================
# kwargs Meaning
# ================ ==============================================================
# title Title of the plot (Default: None).
# show True to show the figure (Default).
# savefig 'abc.png' or 'abc.eps' to save the figure to a file.
# ================ ==============================================================
# Returns:
# `matplotlib` figure
# """
# title = kwargs.pop("title", "Potentials")
# show = kwargs.pop("show", True)
# savefig = kwargs.pop("savefig", None)
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# ax.grid(True)
# ax.set_xlabel('r [Bohr]')
# ax.set_ylabel('density')
# ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
# ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
# for state, rfunc in self.potentials.items():
# ax.plot(rfunc.mesh, rfunc.values, label="TPROJ: " + state)
# plt.legend(loc="best")
# if title is not None:
# fig.suptitle(title)
# if show:
# plt.show()
# if savefig:
# fig.savefig(savefig)
# return fig
class PseudoTable(collections.Sequence):
"""
Define the pseudopotentials from the element table.
Individidual elements are accessed by name, symbol or atomic number.
For example, the following all retrieve iron:
print elements[26]
Fe
print elements.Fe
Fe
print elements.symbol('Fe')
Fe
print elements.name('iron')
Fe
print elements.isotope('Fe')
Fe
"""
@classmethod
def as_table(cls, items):
"""
Return an instance of `PseudoTable` from the iterable items.
"""
if isinstance(items, cls): return items
return cls(items)
def __init__(self, pseudos):
"""
Args:
pseudos:
List of pseudopotentials or filepaths
"""
# Store pseudos in a default dictionary with z as key.
# Note that we can have more than one pseudo for given z.
# hence the values are lists of pseudos.
if not isinstance(pseudos, collections.Iterable):
pseudos = [pseudos]
if is_string(pseudos[0]):
pseudos = list_strings(pseudos)
self._pseudos_with_z = collections.defaultdict(list)
for pseudo in pseudos:
p = pseudo
if not isinstance(pseudo, Pseudo):
p = Pseudo.from_file(pseudo)
self._pseudos_with_z[p.Z].append(p)
for z in self.zlist:
pseudo_list = self._pseudos_with_z[z]
symbols = [p.symbol for p in pseudo_list]
symbol = symbols[0]
if any(symb != symbol for symb in symbols):
raise ValueError("All symbols must be equal while they are: %s" % str(symbols))
setattr(self, symbol, pseudo_list)
def __getitem__(self, Z):
"""
Retrieve pseudos for the atomic number z.
Accepts both int and slice objects.
"""
if isinstance(Z, slice):
assert Z.stop is not None
pseudos = []
for znum in iterator_from_slice(Z):
pseudos.extend(self._pseudos_with_z[znum])
return pseudos
else:
return self._pseudos_with_z[Z]
def __len__(self):
return len(list(self.__iter__()))
def __iter__(self):
"""Process the elements in Z order."""
for z in self.zlist:
for pseudo in self._pseudos_with_z[z]:
yield pseudo
def __repr__(self):
return "<%s at %s>" % (self.__class__.__name__, id(self))
def __str__(self):
lines = []
app = lines.append
app("<%s, len=%d>" % (self.__class__.__name__, len(self)))
for pseudo in self:
app(str(pseudo))
return "\n".join(lines)
@property
def allnc(self):
"""True if all pseudos are norm-conserving."""
return all(p.isnc for p in self)
@property
def allpaw(self):
"""True if all pseudos are PAW."""
return all(p.ispaw for p in self)
@property
def zlist(self):
"""Ordered list with the atomic numbers available in the table."""
zlist = list(self._pseudos_with_z.keys())
zlist.sort()
return zlist
def as_dict(self, **kwargs):
d = {}
for p in self:
k, count = p.name, 1
# Handle multiple-pseudos with the same name!
while k not in d:
k += k + "#" + str(count)
count += 1
d.update({k, p.as_dict()})
return d
def is_complete(self, zmax=118):
"""
True if table is complete i.e. all elements with Z < zmax
have at least on pseudopotential
"""
for z in range(1, zmax):
if not self[z]: return False
return True
def pseudos_with_symbol(self, symbol):
"""
Return the list of pseudopotentials in the table the with given symbol.
Return an empty list if no pseudo is avaiable
"""
try:
return getattr(self, str(symbol))
except AttributeError:
return []
def pseudo_from_name(self, name):
"""Return the pseudo in the table with the given name"""
for pseudo in self:
if pseudo.name == name:
return pseudo
return None
def list_properties(self, *props, **kw):
"""
Print a list of elements with the given set of properties.
Args:
*prop1*, *prop2*, ... : string
Name of the properties to print
*format*: string
Template for displaying the element properties, with one
% for each property.
For example, print a table of mass and density.
from periodictable import elements
elements.list_properties('symbol','mass','density', format="%-2s: %6.2f u %5.2f g/cm^3")
H : 1.01 u 0.07 g/cm^3
He: 4.00 u 0.12 g/cm^3
Li: 6.94 u 0.53 g/cm^3
...
Bk: 247.00 u 14.00 g/cm^3
"""
format = kw.pop('format', None)
assert len(kw) == 0
for pseudo in self:
try:
values = tuple(getattr(pseudo, p) for p in props)
except AttributeError:
# Skip elements which don't define all the attributes
continue
# Skip elements with a value of None
if any(v is None for v in values):
continue
if format is None:
print(" ".join(str(p) for p in values))
else:
try:
print(format % values)
except:
print("format",format,"args",values)
raise
#def print_table(self, stream=sys.stdout, filter_function=None):
# """
# A pretty ASCII printer for the periodic table, based on some filter_function.
# Args:
# filter_function:
# A filtering function that take a Pseudo as input and returns a boolean.
# For example, setting filter_function = lambda el: el.Z_val > 2 will print
# a periodic table containing only pseudos with Z_val > 2.
# """
# for row in range(1, 10):
# rowstr = []
# for group in range(1, 19):
# el = Element.from_row_and_group(row, group)
# if el and ((not filter_function) or filter_function(el)):
# rowstr.append("{:3s}".format(el.symbol))
# else:
# rowstr.append(" ")
# print(" ".join(rowstr))
def sorted(self, attrname, reverse=False):
"""Sort the table according to the value of attribute attrname."""
attrs = []
for i, pseudo in self:
try:
a = getattr(pseudo, attrname)
except AttributeError:
a = np.inf
attrs.append((i, a))
# Sort attrs, and build new table with sorted pseudos.
attrs = sorted(attrs, key=lambda t: t[1], reverse=reverse)
return PseudoTable([self[a[0]] for a in attrs])
def select(self, condition):
"""
Select only those pseudopotentials for which condition is True.
Args:
condition:
Function that accepts a `Pseudo` object and returns True or False.
"""
return PseudoTable([p for p in self if condition(p)])
def with_dojo_report(self):
"""Select pseudos containing the DOJO_REPORT section."""
return self.select(condition=lambda p: p.has_dojo_report)
| yanikou19/pymatgen | pymatgen/io/abinitio/pseudos.py | Python | mit | 57,656 | [
"ABINIT",
"pymatgen"
] | de7fbeca32d8e926e204cafbefbd5e097901576349c5e8fec39f61687e79ebf2 |
#__docformat__ = "restructuredtext en"
# ******NOTICE***************
# optimize.py module by Travis E. Oliphant
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
# A collection of optimization algorithms. Version 0.5
# CHANGES
# Added fminbound (July 2001)
# Added brute (Aug. 2002)
# Finished line search satisfying strong Wolfe conditions (Mar. 2004)
# Updated strong Wolfe conditions line search to use
# cubic-interpolation (Mar. 2004)
from __future__ import division, print_function, absolute_import
# Minimization routines
__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg',
'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der',
'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',
'line_search', 'check_grad', 'OptimizeResult', 'show_options',
'OptimizeWarning']
__docformat__ = "restructuredtext en"
import warnings
import sys
import numpy
from scipy._lib.six import callable, xrange
from numpy import (atleast_1d, eye, mgrid, argmin, zeros, shape, squeeze,
vectorize, asarray, sqrt, Inf, asfarray, isinf)
import numpy as np
from .linesearch import (line_search_wolfe1, line_search_wolfe2,
line_search_wolfe2 as line_search,
LineSearchWarning)
from scipy._lib._util import getargspec_no_self as _getargspec
# standard status messages of optimizers
_status_message = {'success': 'Optimization terminated successfully.',
'maxfev': 'Maximum number of function evaluations has '
'been exceeded.',
'maxiter': 'Maximum number of iterations has been '
'exceeded.',
'pr_loss': 'Desired error not necessarily achieved due '
'to precision loss.'}
class MemoizeJac(object):
""" Decorator that caches the value gradient of function each time it
is called. """
def __init__(self, fun):
self.fun = fun
self.jac = None
self.x = None
def __call__(self, x, *args):
self.x = numpy.asarray(x).copy()
fg = self.fun(x, *args)
self.jac = fg[1]
return fg[0]
def derivative(self, x, *args):
if self.jac is not None and numpy.alltrue(x == self.x):
return self.jac
else:
self(x, *args)
return self.jac
class OptimizeResult(dict):
""" Represents the optimization result.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
message : str
Description of the cause of the termination.
fun, jac, hess: ndarray
Values of objective function, its Jacobian and its Hessian (if
available). The Hessians may be approximations, see the documentation
of the function in question.
hess_inv : object
Inverse of the objective function's Hessian; may be an approximation.
Not available for all solvers. The type of this attribute may be
either np.ndarray or scipy.sparse.linalg.LinearOperator.
nfev, njev, nhev : int
Number of evaluations of the objective functions and of its
Jacobian and Hessian.
nit : int
Number of iterations performed by the optimizer.
maxcv : float
The maximum constraint violation.
Notes
-----
There may be additional attributes not listed above depending of the
specific solver. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
class OptimizeWarning(UserWarning):
pass
def _check_unknown_options(unknown_options):
if unknown_options:
msg = ", ".join(map(str, unknown_options.keys()))
# Stack level 4: this is called from _minimize_*, which is
# called from another function in Scipy. Level 4 is the first
# level in user code.
warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4)
def is_array_scalar(x):
"""Test whether `x` is either a scalar or an array scalar.
"""
return np.size(x) == 1
_epsilon = sqrt(numpy.finfo(float).eps)
def vecnorm(x, ord=2):
if ord == Inf:
return numpy.amax(numpy.abs(x))
elif ord == -Inf:
return numpy.amin(numpy.abs(x))
else:
return numpy.sum(numpy.abs(x)**ord, axis=0)**(1.0 / ord)
def rosen(x):
"""
The Rosenbrock function.
The function computed is::
sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
Parameters
----------
x : array_like
1-D array of points at which the Rosenbrock function is to be computed.
Returns
-------
f : float
The value of the Rosenbrock function.
See Also
--------
rosen_der, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
r = numpy.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
axis=0)
return r
def rosen_der(x):
"""
The derivative (i.e. gradient) of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the derivative is to be computed.
Returns
-------
rosen_der : (N,) ndarray
The gradient of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros_like(x)
der[1:-1] = (200 * (xm - xm_m1**2) -
400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
der[-1] = 200 * (x[-1] - x[-2]**2)
return der
def rosen_hess(x):
"""
The Hessian matrix of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_der, rosen_hess_prod
"""
x = atleast_1d(x)
H = numpy.diag(-400 * x[:-1], 1) - numpy.diag(400 * x[:-1], -1)
diagonal = numpy.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
H = H + numpy.diag(diagonal)
return H
def rosen_hess_prod(x, p):
"""
Product of the Hessian matrix of the Rosenbrock function with a vector.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
p : array_like
1-D array, the vector to be multiplied by the Hessian matrix.
Returns
-------
rosen_hess_prod : ndarray
The Hessian matrix of the Rosenbrock function at `x` multiplied
by the vector `p`.
See Also
--------
rosen, rosen_der, rosen_hess
"""
x = atleast_1d(x)
Hp = numpy.zeros(len(x), dtype=x.dtype)
Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1]
Hp[1:-1] = (-400 * x[:-2] * p[:-2] +
(202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] -
400 * x[1:-1] * p[2:])
Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1]
return Hp
def wrap_function(function, args):
ncalls = [0]
if function is None:
return ncalls, None
def function_wrapper(*wrapper_args):
ncalls[0] += 1
return function(*(wrapper_args + args))
return ncalls, function_wrapper
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None, initial_simplex=None):
"""
Minimize a function using the downhill simplex algorithm.
This algorithm only uses function values, not derivatives or second
derivatives.
Parameters
----------
func : callable func(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func, i.e. ``f(x,*args)``.
xtol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence.
ftol : number, optional
Absolute error in func(xopt) between iterations that is acceptable for
convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : number, optional
Maximum number of function evaluations to make.
full_output : bool, optional
Set to True if fopt and warnflag outputs are desired.
disp : bool, optional
Set to True to print convergence messages.
retall : bool, optional
Set to True to return list of solutions at each iteration.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
initial_simplex : array_like of shape (N + 1, N), optional
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the j-th vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
Returns
-------
xopt : ndarray
Parameter that minimizes function.
fopt : float
Value of function at minimum: ``fopt = func(xopt)``.
iter : int
Number of iterations performed.
funcalls : int
Number of function calls made.
warnflag : int
1 : Maximum number of function evaluations made.
2 : Maximum number of iterations reached.
allvecs : list
Solution at each iteration.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Nelder-Mead' `method` in particular.
Notes
-----
Uses a Nelder-Mead simplex algorithm to find the minimum of function of
one or more variables.
This algorithm has a long history of successful use in applications.
But it will usually be slower than an algorithm that uses first or
second derivative information. In practice it can have poor
performance in high-dimensional problems and is not robust to
minimizing complicated functions. Additionally, there currently is no
complete theory describing when the algorithm will successfully
converge to the minimum, or how fast it will if it does. Both the ftol and
xtol criteria must be met for convergence.
Examples
--------
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fmin(f, 1)
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 17
Function evaluations: 34
>>> minimum[0]
-8.8817841970012523e-16
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
.. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now
Respectable", in Numerical Analysis 1995, Proceedings of the
1995 Dundee Biennial Conference in Numerical Analysis, D.F.
Griffiths and G.A. Watson (Eds.), Addison Wesley Longman,
Harlow, UK, pp. 191-208.
"""
opts = {'xatol': xtol,
'fatol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'return_all': retall,
'initial_simplex': initial_simplex}
res = _minimize_neldermead(func, x0, args, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_neldermead(func, x0, args=(), callback=None,
maxiter=None, maxfev=None, disp=False,
return_all=False, initial_simplex=None,
xatol=1e-4, fatol=1e-4, **unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*200``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
initial_simplex : array_like of shape (N + 1, N)
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the j-th vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
xatol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence.
fatol : number, optional
Absolute error in func(xopt) between iterations that is acceptable for
convergence.
"""
if 'ftol' in unknown_options:
warnings.warn("ftol is deprecated for Nelder-Mead,"
" use fatol instead. If you specified both, only"
" fatol is used.",
DeprecationWarning)
if (np.isclose(fatol, 1e-4) and
not np.isclose(unknown_options['ftol'], 1e-4)):
# only ftol was probably specified, use it.
fatol = unknown_options['ftol']
unknown_options.pop('ftol')
if 'xtol' in unknown_options:
warnings.warn("xtol is deprecated for Nelder-Mead,"
" use xatol instead. If you specified both, only"
" xatol is used.",
DeprecationWarning)
if (np.isclose(xatol, 1e-4) and
not np.isclose(unknown_options['xtol'], 1e-4)):
# only xtol was probably specified, use it.
xatol = unknown_options['xtol']
unknown_options.pop('xtol')
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
fcalls, func = wrap_function(func, args)
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
nonzdelt = 0.05
zdelt = 0.00025
x0 = asfarray(x0).flatten()
if initial_simplex is None:
N = len(x0)
sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
sim[0] = x0
for k in range(N):
y = numpy.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt)*y[k]
else:
y[k] = zdelt
sim[k + 1] = y
else:
sim = np.asfarray(initial_simplex).copy()
if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1:
raise ValueError("`initial_simplex` should be an array of shape (N+1,N)")
if len(x0) != sim.shape[1]:
raise ValueError("Size of `initial_simplex` is not consistent with `x0`")
N = sim.shape[1]
if retall:
allvecs = [sim[0]]
# If neither are set, then set both to default
if maxiter is None and maxfun is None:
maxiter = N * 200
maxfun = N * 200
elif maxiter is None:
# Convert remaining Nones, to np.inf, unless the other is np.inf, in
# which case use the default to avoid unbounded iteration
if maxfun == np.inf:
maxiter = N * 200
else:
maxiter = np.inf
elif maxfun is None:
if maxiter == np.inf:
maxfun = N * 200
else:
maxfun = np.inf
one2np1 = list(range(1, N + 1))
fsim = numpy.zeros((N + 1,), float)
for k in range(N + 1):
fsim[k] = func(sim[k])
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim, ind, 0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and
numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol):
break
xbar = numpy.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim, ind, 0)
fsim = numpy.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
iterations += 1
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = numpy.min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print('Warning: ' + msg)
elif iterations >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print('Warning: ' + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
print(" Function evaluations: %d" % fcalls[0])
result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x, final_simplex=(sim, fsim))
if retall:
result['allvecs'] = allvecs
return result
def _approx_fprime_helper(xk, f, epsilon, args=(), f0=None):
"""
See ``approx_fprime``. An optional initial function value arg is added.
"""
if f0 is None:
f0 = f(*((xk,) + args))
grad = numpy.zeros((len(xk),), float)
ei = numpy.zeros((len(xk),), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
def approx_fprime(xk, f, epsilon, *args):
"""Finite-difference approximation of the gradient of a scalar function.
Parameters
----------
xk : array_like
The coordinate vector at which to determine the gradient of `f`.
f : callable
The function of which to determine the gradient (partial derivatives).
Should take `xk` as first argument, other arguments to `f` can be
supplied in ``*args``. Should return a scalar, the value of the
function at `xk`.
epsilon : array_like
Increment to `xk` to use for determining the function gradient.
If a scalar, uses the same finite difference delta for all partial
derivatives. If an array, should contain one value per element of
`xk`.
\\*args : args, optional
Any other arguments that are to be passed to `f`.
Returns
-------
grad : ndarray
The partial derivatives of `f` to `xk`.
See Also
--------
check_grad : Check correctness of gradient function against approx_fprime.
Notes
-----
The function gradient is determined by the forward finite difference
formula::
f(xk[i] + epsilon[i]) - f(xk[i])
f'[i] = ---------------------------------
epsilon[i]
The main use of `approx_fprime` is in scalar function optimizers like
`fmin_bfgs`, to determine numerically the Jacobian of a function.
Examples
--------
>>> from scipy import optimize
>>> def func(x, c0, c1):
... "Coordinate vector `x` should be an array of size two."
... return c0 * x[0]**2 + c1*x[1]**2
>>> x = np.ones(2)
>>> c0, c1 = (1, 200)
>>> eps = np.sqrt(np.finfo(float).eps)
>>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1)
array([ 2. , 400.00004198])
"""
return _approx_fprime_helper(xk, f, epsilon, args=args)
def check_grad(func, grad, x0, *args, **kwargs):
"""Check the correctness of a gradient function by comparing it against a
(forward) finite-difference approximation of the gradient.
Parameters
----------
func : callable ``func(x0, *args)``
Function whose derivative is to be checked.
grad : callable ``grad(x0, *args)``
Gradient of `func`.
x0 : ndarray
Points to check `grad` against forward difference approximation of grad
using `func`.
args : \\*args, optional
Extra arguments passed to `func` and `grad`.
epsilon : float, optional
Step size used for the finite difference approximation. It defaults to
``sqrt(numpy.finfo(float).eps)``, which is approximately 1.49e-08.
Returns
-------
err : float
The square root of the sum of squares (i.e. the 2-norm) of the
difference between ``grad(x0, *args)`` and the finite difference
approximation of `grad` using func at the points `x0`.
See Also
--------
approx_fprime
Examples
--------
>>> def func(x):
... return x[0]**2 - 0.5 * x[1]**3
>>> def grad(x):
... return [2 * x[0], -1.5 * x[1]**2]
>>> from scipy.optimize import check_grad
>>> check_grad(func, grad, [1.5, -1.5])
2.9802322387695312e-08
"""
step = kwargs.pop('epsilon', _epsilon)
if kwargs:
raise ValueError("Unknown keyword arguments: %r" %
(list(kwargs.keys()),))
return sqrt(sum((grad(x0, *args) -
approx_fprime(x0, func, step, *args))**2))
def approx_fhess_p(x0, p, fprime, epsilon, *args):
f2 = fprime(*((x0 + epsilon*p,) + args))
f1 = fprime(*((x0,) + args))
return (f2 - f1) / epsilon
class _LineSearchError(RuntimeError):
pass
def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
**kwargs):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found
"""
extra_condition = kwargs.pop('extra_condition', None)
ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
**kwargs)
if ret[0] is not None and extra_condition is not None:
xp1 = xk + ret[0] * pk
if not extra_condition(ret[0], xp1, ret[3], ret[5]):
# Reject step if extra_condition fails
ret = (None,)
if ret[0] is None:
# line search failed: try different one.
with warnings.catch_warnings():
warnings.simplefilter('ignore', LineSearchWarning)
kwargs2 = {}
for key in ('c1', 'c2', 'amax'):
if key in kwargs:
kwargs2[key] = kwargs[key]
ret = line_search_wolfe2(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
extra_condition=extra_condition,
**kwargs2)
if ret[0] is None:
raise _LineSearchError()
return ret
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1,
retall=0, callback=None):
"""
Minimize a function using the BFGS algorithm.
Parameters
----------
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args), optional
Gradient of f.
args : tuple, optional
Extra arguments passed to f and fprime.
gtol : float, optional
Gradient norm must be less than gtol before successful termination.
norm : float, optional
Order of norm (Inf is max, -Inf is min)
epsilon : int or ndarray, optional
If fprime is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function to call after each
iteration. Called as callback(xk), where xk is the
current parameter vector.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True,return fopt, func_calls, grad_calls, and warnflag
in addition to xopt.
disp : bool, optional
Print convergence message if True.
retall : bool, optional
Return a list of results at each iteration if True.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. f(xopt) == fopt.
fopt : float
Minimum value.
gopt : ndarray
Value of gradient at minimum, f'(xopt), which should be near 0.
Bopt : ndarray
Value of 1/f''(xopt), i.e. the inverse hessian matrix.
func_calls : int
Number of function_calls made.
grad_calls : int
Number of gradient calls made.
warnflag : integer
1 : Maximum number of iterations exceeded.
2 : Gradient and/or function calls not changing.
allvecs : list
`OptimizeResult` at each iteration. Only returned if retall is True.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'BFGS' `method` in particular.
Notes
-----
Optimize the function, f, whose gradient is given by fprime
using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (BFGS)
References
----------
Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198.
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
BFGS algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if x0.ndim == 0:
x0.shape = (1,)
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
N = len(x0)
I = numpy.eye(N, dtype=int)
Hk = I
# Sets the initial step guess to dx ~ 1
old_fval = f(x0)
old_old_fval = old_fval + np.linalg.norm(gfk) / 2
xk = x0
if retall:
allvecs = [x0]
sk = [2 * gtol]
warnflag = 0
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
pk = -numpy.dot(Hk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk,
old_fval, old_old_fval, amin=1e-100, amax=1e100)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xkp1 = xk + alpha_k * pk
if retall:
allvecs.append(xkp1)
sk = xkp1 - xk
xk = xkp1
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
gfk = gfkp1
if callback is not None:
callback(xk)
k += 1
gnorm = vecnorm(gfk, ord=norm)
if (gnorm <= gtol):
break
if not numpy.isfinite(old_fval):
# We correctly found +-Inf as optimal value, or something went
# wrong.
warnflag = 2
break
try: # this was handled in numeric, let it remaines for more safety
rhok = 1.0 / (numpy.dot(yk, sk))
except ZeroDivisionError:
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
if isinf(rhok): # this is patch for numpy
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok
A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok
Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + (rhok * sk[:, numpy.newaxis] *
sk[numpy.newaxis, :])
fval = old_fval
if np.isnan(fval):
# This can happen if the first call to f returned NaN;
# the loop is then never entered.
warnflag = 2
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
maxiter=None, full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using a nonlinear conjugate gradient algorithm.
Parameters
----------
f : callable, ``f(x, *args)``
Objective function to be minimized. Here `x` must be a 1-D array of
the variables that are to be changed in the search for a minimum, and
`args` are the other (fixed) parameters of `f`.
x0 : ndarray
A user-supplied initial estimate of `xopt`, the optimal value of `x`.
It must be a 1-D array of values.
fprime : callable, ``fprime(x, *args)``, optional
A function that returns the gradient of `f` at `x`. Here `x` and `args`
are as described above for `f`. The returned value must be a 1-D array.
Defaults to None, in which case the gradient is approximated
numerically (see `epsilon`, below).
args : tuple, optional
Parameter values passed to `f` and `fprime`. Must be supplied whenever
additional fixed parameters are needed to completely specify the
functions `f` and `fprime`.
gtol : float, optional
Stop when the norm of the gradient is less than `gtol`.
norm : float, optional
Order to use for the norm of the gradient
(``-np.Inf`` is min, ``np.Inf`` is max).
epsilon : float or ndarray, optional
Step size(s) to use when `fprime` is approximated numerically. Can be a
scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the
floating point machine precision. Usually ``sqrt(eps)`` is about
1.5e-8.
maxiter : int, optional
Maximum number of iterations to perform. Default is ``200 * len(x0)``.
full_output : bool, optional
If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in
addition to `xopt`. See the Returns section below for additional
information on optional return values.
disp : bool, optional
If True, return a convergence message, followed by `xopt`.
retall : bool, optional
If True, add to the returned values the results of each iteration.
callback : callable, optional
An optional user-supplied function, called after each iteration.
Called as ``callback(xk)``, where ``xk`` is the current value of `x0`.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float, optional
Minimum value found, f(xopt). Only returned if `full_output` is True.
func_calls : int, optional
The number of function_calls made. Only returned if `full_output`
is True.
grad_calls : int, optional
The number of gradient calls made. Only returned if `full_output` is
True.
warnflag : int, optional
Integer value with warning status, only returned if `full_output` is
True.
0 : Success.
1 : The maximum number of iterations was exceeded.
2 : Gradient and/or function calls were not changing. May indicate
that precision was lost, i.e., the routine did not converge.
allvecs : list of ndarray, optional
List of arrays, containing the results at each iteration.
Only returned if `retall` is True.
See Also
--------
minimize : common interface to all `scipy.optimize` algorithms for
unconstrained and constrained minimization of multivariate
functions. It provides an alternative way to call
``fmin_cg``, by specifying ``method='CG'``.
Notes
-----
This conjugate gradient algorithm is based on that of Polak and Ribiere
[1]_.
Conjugate gradient methods tend to work better when:
1. `f` has a unique global minimizing point, and no local minima or
other stationary points,
2. `f` is, at least locally, reasonably well approximated by a
quadratic function of the variables,
3. `f` is continuous and has a continuous gradient,
4. `fprime` is not too large, e.g., has a norm less than 1000,
5. The initial guess, `x0`, is reasonably close to `f` 's global
minimizing point, `xopt`.
References
----------
.. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122.
Examples
--------
Example 1: seek the minimum value of the expression
``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values
of the parameters and an initial guess ``(u, v) = (0, 0)``.
>>> args = (2, 3, 7, 8, 9, 10) # parameter values
>>> def f(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f
>>> def gradf(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... gu = 2*a*u + b*v + d # u-component of the gradient
... gv = b*u + 2*c*v + e # v-component of the gradient
... return np.asarray((gu, gv))
>>> x0 = np.asarray((0, 0)) # Initial guess.
>>> from scipy import optimize
>>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 4
Function evaluations: 8
Gradient evaluations: 8
>>> res1
array([-1.80851064, -0.25531915])
Example 2: solve the same problem using the `minimize` function.
(This `myopts` dictionary shows all of the available options,
although in practice only non-default values would be needed.
The returned value will be a dictionary.)
>>> opts = {'maxiter' : None, # default value.
... 'disp' : True, # non-default value.
... 'gtol' : 1e-5, # default value.
... 'norm' : np.inf, # default value.
... 'eps' : 1.4901161193847656e-08} # default value.
>>> res2 = optimize.minimize(f, x0, jac=gradf, args=args,
... method='CG', options=opts)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 4
Function evaluations: 8
Gradient evaluations: 8
>>> res2.x # minimum found
array([-1.80851064, -0.25531915])
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_cg(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
conjugate gradient algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
xk = x0
# Sets the initial step guess to dx ~ 1
old_fval = f(xk)
old_old_fval = old_fval + np.linalg.norm(gfk) / 2
if retall:
allvecs = [xk]
warnflag = 0
pk = -gfk
gnorm = vecnorm(gfk, ord=norm)
sigma_3 = 0.01
while (gnorm > gtol) and (k < maxiter):
deltak = numpy.dot(gfk, gfk)
cached_step = [None]
def polak_ribiere_powell_step(alpha, gfkp1=None):
xkp1 = xk + alpha * pk
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
beta_k = max(0, numpy.dot(yk, gfkp1) / deltak)
pkp1 = -gfkp1 + beta_k * pk
gnorm = vecnorm(gfkp1, ord=norm)
return (alpha, xkp1, pkp1, gfkp1, gnorm)
def descent_condition(alpha, xkp1, fp1, gfkp1):
# Polak-Ribiere+ needs an explicit check of a sufficient
# descent condition, which is not guaranteed by strong Wolfe.
#
# See Gilbert & Nocedal, "Global convergence properties of
# conjugate gradient methods for optimization",
# SIAM J. Optimization 2, 21 (1992).
cached_step[:] = polak_ribiere_powell_step(alpha, gfkp1)
alpha, xk, pk, gfk, gnorm = cached_step
# Accept step if it leads to convergence.
if gnorm <= gtol:
return True
# Accept step if sufficient descent condition applies.
return numpy.dot(pk, gfk) <= -sigma_3 * numpy.dot(gfk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval,
old_old_fval, c2=0.4, amin=1e-100, amax=1e100,
extra_condition=descent_condition)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
# Reuse already computed results if possible
if alpha_k == cached_step[0]:
alpha_k, xk, pk, gfk, gnorm = cached_step
else:
alpha_k, xk, pk, gfk, gnorm = polak_ribiere_powell_step(alpha_k, gfkp1)
if retall:
allvecs.append(xk)
if callback is not None:
callback(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
callback=None):
"""
Unconstrained minimization of a function using the Newton-CG method.
Parameters
----------
f : callable ``f(x, *args)``
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable ``f'(x, *args)``
Gradient of f.
fhess_p : callable ``fhess_p(x, p, *args)``, optional
Function which computes the Hessian of f times an
arbitrary vector, p.
fhess : callable ``fhess(x, *args)``, optional
Function to compute the Hessian matrix of f.
args : tuple, optional
Extra arguments passed to f, fprime, fhess_p, and fhess
(the same set of extra arguments is supplied to all of
these functions).
epsilon : float or ndarray, optional
If fhess is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function which is called after
each iteration. Called as callback(xk), where xk is the
current parameter vector.
avextol : float, optional
Convergence is assumed when the average relative error in
the minimizer falls below this amount.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True, return the optional outputs.
disp : bool, optional
If True, print convergence message.
retall : bool, optional
If True, return a list of results at each iteration.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float
Value of the function at xopt, i.e. ``fopt = f(xopt)``.
fcalls : int
Number of function calls made.
gcalls : int
Number of gradient calls made.
hcalls : int
Number of hessian calls made.
warnflag : int
Warnings generated by the algorithm.
1 : Maximum number of iterations exceeded.
allvecs : list
The result at each iteration, if retall is True (see below).
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Newton-CG' `method` in particular.
Notes
-----
Only one of `fhess_p` or `fhess` need to be given. If `fhess`
is provided, then `fhess_p` will be ignored. If neither `fhess`
nor `fhess_p` is provided, then the hessian product will be
approximated using finite differences on `fprime`. `fhess_p`
must compute the hessian times an arbitrary vector. If it is not
given, finite-differences on `fprime` are used to compute
it.
Newton-CG methods are also called truncated Newton methods. This
function differs from scipy.optimize.fmin_tnc because
1. scipy.optimize.fmin_ncg is written purely in python using numpy
and scipy while scipy.optimize.fmin_tnc calls a C function.
2. scipy.optimize.fmin_ncg is only for unconstrained minimization
while scipy.optimize.fmin_tnc is for unconstrained minimization
or box constrained minimization. (Box constraints give
lower and upper bounds for each variable separately.)
References
----------
Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140.
"""
opts = {'xtol': avextol,
'eps': epsilon,
'maxiter': maxiter,
'disp': disp,
'return_all': retall}
res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p,
callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['nfev'], res['njev'],
res['nhev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
callback=None, xtol=1e-5, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Note that the `jac` parameter (Jacobian) is required.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
maxiter : int
Maximum number of iterations to perform.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
if jac is None:
raise ValueError('Jacobian is required for Newton-CG method')
f = fun
fprime = jac
fhess_p = hessp
fhess = hess
avextol = xtol
epsilon = eps
retall = return_all
def terminate(warnflag, msg):
if disp:
print(msg)
print(" Current function value: %f" % old_fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
fval = old_fval
result = OptimizeResult(fun=fval, jac=gfk, nfev=fcalls[0],
njev=gcalls[0], nhev=hcalls, status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
x0 = asarray(x0).flatten()
fcalls, f = wrap_function(f, args)
gcalls, fprime = wrap_function(fprime, args)
hcalls = 0
if maxiter is None:
maxiter = len(x0)*200
cg_maxiter = 20*len(x0)
xtol = len(x0) * avextol
update = [2 * xtol]
xk = x0
if retall:
allvecs = [xk]
k = 0
old_fval = f(x0)
old_old_fval = None
float64eps = numpy.finfo(numpy.float64).eps
while numpy.add.reduce(numpy.abs(update)) > xtol:
if k >= maxiter:
msg = "Warning: " + _status_message['maxiter']
return terminate(1, msg)
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - grad f(xk) starting from 0.
b = -fprime(xk)
maggrad = numpy.add.reduce(numpy.abs(b))
eta = numpy.min([0.5, numpy.sqrt(maggrad)])
termcond = eta * maggrad
xsupi = zeros(len(x0), dtype=x0.dtype)
ri = -b
psupi = -ri
i = 0
dri0 = numpy.dot(ri, ri)
if fhess is not None: # you want to compute hessian once.
A = fhess(*(xk,) + args)
hcalls = hcalls + 1
for k2 in xrange(cg_maxiter):
if numpy.add.reduce(numpy.abs(ri)) <= termcond:
break
if fhess is None:
if fhess_p is None:
Ap = approx_fhess_p(xk, psupi, fprime, epsilon)
else:
Ap = fhess_p(xk, psupi, *args)
hcalls = hcalls + 1
else:
Ap = numpy.dot(A, psupi)
# check curvature
Ap = asarray(Ap).squeeze() # get rid of matrices...
curv = numpy.dot(psupi, Ap)
if 0 <= curv <= 3 * float64eps:
break
elif curv < 0:
if (i > 0):
break
else:
# fall back to steepest descent direction
xsupi = dri0 / (-curv) * b
break
alphai = dri0 / curv
xsupi = xsupi + alphai * psupi
ri = ri + alphai * Ap
dri1 = numpy.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
i = i + 1
dri0 = dri1 # update numpy.dot(ri,ri) for next time.
else:
# curvature keeps increasing, bail out
msg = ("Warning: CG iterations didn't converge. The Hessian is not "
"positive definite.")
return terminate(3, msg)
pk = xsupi # search direction is solution to system.
gfk = -b # gradient at xk
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, fprime, xk, pk, gfk,
old_fval, old_old_fval)
except _LineSearchError:
# Line search failed to find a better solution.
msg = "Warning: " + _status_message['pr_loss']
return terminate(2, msg)
update = alphak * pk
xk = xk + update # upcast if necessary
if callback is not None:
callback(xk)
if retall:
allvecs.append(xk)
k += 1
else:
msg = _status_message['success']
return terminate(0, msg)
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
full_output=0, disp=1):
"""Bounded minimization for scalar functions.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized (must accept and return scalars).
x1, x2 : float or array scalar
The optimization bounds.
args : tuple, optional
Extra arguments passed to function.
xtol : float, optional
The convergence tolerance.
maxfun : int, optional
Maximum number of function evaluations allowed.
full_output : bool, optional
If True, return optional outputs.
disp : int, optional
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
Returns
-------
xopt : ndarray
Parameters (over given interval) which minimize the
objective function.
fval : number
The function value at the minimum point.
ierr : int
An error flag (0 if converged, 1 if maximum number of
function calls reached).
numfunc : int
The number of function calls made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Bounded' `method` in particular.
Notes
-----
Finds a local minimizer of the scalar function `func` in the
interval x1 < xopt < x2 using Brent's method. (See `brent`
for auto-bracketing).
Examples
--------
`fminbound` finds the minimum of the function in the given range.
The following examples illustrate the same
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fminbound(f, -1, 2)
>>> minimum
0.0
>>> minimum = optimize.fminbound(f, 1, 2)
>>> minimum
1.0000059608609866
"""
options = {'xatol': xtol,
'maxiter': maxfun,
'disp': disp}
res = _minimize_scalar_bounded(func, (x1, x2), args, **options)
if full_output:
return res['x'], res['fun'], res['status'], res['nfev']
else:
return res['x']
def _minimize_scalar_bounded(func, bounds, args=(),
xatol=1e-5, maxiter=500, disp=0,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
xatol : float
Absolute error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
# Test bounds are of correct form
if len(bounds) != 2:
raise ValueError('bounds must have two elements.')
x1, x2 = bounds
if not (is_array_scalar(x1) and is_array_scalar(x2)):
raise ValueError("Optimisation bounds must be scalars"
" or array scalars.")
if x1 > x2:
raise ValueError("The lower bound exceeds the upper bound.")
flag = 0
header = ' Func-count x f(x) Procedure'
step = ' initial'
sqrt_eps = sqrt(2.2e-16)
golden_mean = 0.5 * (3.0 - sqrt(5.0))
a, b = x1, x2
fulc = a + golden_mean * (b - a)
nfc, xf = fulc, fulc
rat = e = 0.0
x = xf
fx = func(x, *args)
num = 1
fmin_data = (1, xf, fx)
ffulc = fnfc = fx
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if disp > 2:
print(" ")
print(header)
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
while (numpy.abs(xf - xm) > (tol2 - 0.5 * (b - a))):
golden = 1
# Check for parabolic fit
if numpy.abs(e) > tol1:
golden = 0
r = (xf - nfc) * (fx - ffulc)
q = (xf - fulc) * (fx - fnfc)
p = (xf - fulc) * q - (xf - nfc) * r
q = 2.0 * (q - r)
if q > 0.0:
p = -p
q = numpy.abs(q)
r = e
e = rat
# Check for acceptability of parabola
if ((numpy.abs(p) < numpy.abs(0.5*q*r)) and (p > q*(a - xf)) and
(p < q * (b - xf))):
rat = (p + 0.0) / q
x = xf + rat
step = ' parabolic'
if ((x - a) < tol2) or ((b - x) < tol2):
si = numpy.sign(xm - xf) + ((xm - xf) == 0)
rat = tol1 * si
else: # do a golden section step
golden = 1
if golden: # Do a golden-section step
if xf >= xm:
e = a - xf
else:
e = b - xf
rat = golden_mean*e
step = ' golden'
si = numpy.sign(rat) + (rat == 0)
x = xf + si * numpy.max([numpy.abs(rat), tol1])
fu = func(x, *args)
num += 1
fmin_data = (num, x, fu)
if disp > 2:
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
if fu <= fx:
if x >= xf:
a = xf
else:
b = xf
fulc, ffulc = nfc, fnfc
nfc, fnfc = xf, fx
xf, fx = x, fu
else:
if x < xf:
a = x
else:
b = x
if (fu <= fnfc) or (nfc == xf):
fulc, ffulc = nfc, fnfc
nfc, fnfc = x, fu
elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc):
fulc, ffulc = x, fu
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if num >= maxfun:
flag = 1
break
fval = fx
if disp > 0:
_endprint(x, flag, fval, maxfun, xatol, disp)
result = OptimizeResult(fun=fval, status=flag, success=(flag == 0),
message={0: 'Solution found.',
1: 'Maximum number of function calls '
'reached.'}.get(flag, ''),
x=xf, nfev=num)
return result
class Brent:
#need to rethink design of __init__
def __init__(self, func, args=(), tol=1.48e-8, maxiter=500,
full_output=0):
self.func = func
self.args = args
self.tol = tol
self.maxiter = maxiter
self._mintol = 1.0e-11
self._cg = 0.3819660
self.xmin = None
self.fval = None
self.iter = 0
self.funcalls = 0
# need to rethink design of set_bracket (new options, etc)
def set_bracket(self, brack=None):
self.brack = brack
def get_bracket_info(self):
#set up
func = self.func
args = self.args
brack = self.brack
### BEGIN core bracket_info code ###
### carefully DOCUMENT any CHANGES in core ##
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be "
"length 2 or 3 sequence.")
### END core bracket_info code ###
return xa, xb, xc, fa, fb, fc, funcalls
def optimize(self):
# set up for optimization
func = self.func
xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info()
_mintol = self._mintol
_cg = self._cg
#################################
#BEGIN CORE ALGORITHM
#################################
x = w = v = xb
fw = fv = fx = func(*((x,) + self.args))
if (xa < xc):
a = xa
b = xc
else:
a = xc
b = xa
deltax = 0.0
funcalls = 1
iter = 0
while (iter < self.maxiter):
tol1 = self.tol * numpy.abs(x) + _mintol
tol2 = 2.0 * tol1
xmid = 0.5 * (a + b)
# check for convergence
if numpy.abs(x - xmid) < (tol2 - 0.5 * (b - a)):
break
# XXX In the first iteration, rat is only bound in the true case
# of this conditional. This used to cause an UnboundLocalError
# (gh-4140). It should be set before the if (but to what?).
if (numpy.abs(deltax) <= tol1):
if (x >= xmid):
deltax = a - x # do a golden section step
else:
deltax = b - x
rat = _cg * deltax
else: # do a parabolic step
tmp1 = (x - w) * (fx - fv)
tmp2 = (x - v) * (fx - fw)
p = (x - v) * tmp2 - (x - w) * tmp1
tmp2 = 2.0 * (tmp2 - tmp1)
if (tmp2 > 0.0):
p = -p
tmp2 = numpy.abs(tmp2)
dx_temp = deltax
deltax = rat
# check parabolic fit
if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and
(numpy.abs(p) < numpy.abs(0.5 * tmp2 * dx_temp))):
rat = p * 1.0 / tmp2 # if parabolic step is useful.
u = x + rat
if ((u - a) < tol2 or (b - u) < tol2):
if xmid - x >= 0:
rat = tol1
else:
rat = -tol1
else:
if (x >= xmid):
deltax = a - x # if it's not do a golden section step
else:
deltax = b - x
rat = _cg * deltax
if (numpy.abs(rat) < tol1): # update by at least tol1
if rat >= 0:
u = x + tol1
else:
u = x - tol1
else:
u = x + rat
fu = func(*((u,) + self.args)) # calculate new output value
funcalls += 1
if (fu > fx): # if it's bigger than current
if (u < x):
a = u
else:
b = u
if (fu <= fw) or (w == x):
v = w
w = u
fv = fw
fw = fu
elif (fu <= fv) or (v == x) or (v == w):
v = u
fv = fu
else:
if (u >= x):
a = x
else:
b = x
v = w
w = x
x = u
fv = fw
fw = fx
fx = fu
iter += 1
#################################
#END CORE ALGORITHM
#################################
self.xmin = x
self.fval = fx
self.iter = iter
self.funcalls = funcalls
def get_result(self, full_output=False):
if full_output:
return self.xmin, self.fval, self.iter, self.funcalls
else:
return self.xmin
def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500):
"""
Given a function of one-variable and a possible bracket, return
the local minimum of the function isolated to a fractional precision
of tol.
Parameters
----------
func : callable f(x,*args)
Objective function.
args : tuple, optional
Additional arguments (if present).
brack : tuple, optional
Either a triple (xa,xb,xc) where xa<xb<xc and func(xb) <
func(xa), func(xc) or a pair (xa,xb) which are used as a
starting interval for a downhill bracket search (see
`bracket`). Providing the pair (xa,xb) does not always mean
the obtained solution will satisfy xa<=x<=xb.
tol : float, optional
Stop if between iteration change is less than `tol`.
full_output : bool, optional
If True, return all output args (xmin, fval, iter,
funcalls).
maxiter : int, optional
Maximum number of iterations in solution.
Returns
-------
xmin : ndarray
Optimum point.
fval : float
Optimum value.
iter : int
Number of iterations.
funcalls : int
Number of objective function evaluations made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Brent' `method` in particular.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
Does not ensure that the minimum lies in the range specified by
`brack`. See `fminbound`.
Examples
--------
We illustrate the behaviour of the function when `brack` is of
size 2 and 3 respectively. In the case where `brack` is of the
form (xa,xb), we can see for the given values, the output need
not necessarily lie in the range (xa,xb).
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.brent(f,brack=(1,2))
>>> minimum
0.0
>>> minimum = optimize.brent(f,brack=(-1,0.5,2))
>>> minimum
-2.7755575615628914e-17
"""
options = {'xtol': tol,
'maxiter': maxiter}
res = _minimize_scalar_brent(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nit'], res['nfev']
else:
return res['x']
def _minimize_scalar_brent(func, brack=None, args=(),
xtol=1.48e-8, maxiter=500,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
"""
_check_unknown_options(unknown_options)
tol = xtol
if tol < 0:
raise ValueError('tolerance should be >= 0, got %r' % tol)
brent = Brent(func=func, args=args, tol=tol,
full_output=True, maxiter=maxiter)
brent.set_bracket(brack)
brent.optimize()
x, fval, nit, nfev = brent.get_result(full_output=True)
return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev,
success=nit < maxiter)
def golden(func, args=(), brack=None, tol=_epsilon,
full_output=0, maxiter=5000):
"""
Return the minimum of a function of one variable using golden section
method.
Given a function of one variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable func(x,*args)
Objective function to minimize.
args : tuple, optional
Additional arguments (if present), passed to func.
brack : tuple, optional
Triple (a,b,c), where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,
c), then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that obtained solution will satisfy a<=x<=c.
tol : float, optional
x tolerance stop criterion
full_output : bool, optional
If True, return optional outputs.
maxiter : int
Maximum number of iterations to perform.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Golden' `method` in particular.
Notes
-----
Uses analog of bisection method to decrease the bracketed
interval.
Examples
--------
We illustrate the behaviour of the function when `brack` is of
size 2 and 3 respectively. In the case where `brack` is of the
form (xa,xb), we can see for the given values, the output need
not necessarily lie in the range ``(xa, xb)``.
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.golden(f, brack=(1, 2))
>>> minimum
1.5717277788484873e-162
>>> minimum = optimize.golden(f, brack=(-1, 0.5, 2))
>>> minimum
-1.5717277788484873e-162
"""
options = {'xtol': tol, 'maxiter': maxiter}
res = _minimize_scalar_golden(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nfev']
else:
return res['x']
def _minimize_scalar_golden(func, brack=None, args=(),
xtol=_epsilon, maxiter=5000, **unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
tol = xtol
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be length 2 or 3 sequence.")
_gR = 0.61803399 # golden ratio conjugate: 2.0/(1.0+sqrt(5.0))
_gC = 1.0 - _gR
x3 = xc
x0 = xa
if (numpy.abs(xc - xb) > numpy.abs(xb - xa)):
x1 = xb
x2 = xb + _gC * (xc - xb)
else:
x2 = xb
x1 = xb - _gC * (xb - xa)
f1 = func(*((x1,) + args))
f2 = func(*((x2,) + args))
funcalls += 2
nit = 0
for i in xrange(maxiter):
if numpy.abs(x3 - x0) <= tol * (numpy.abs(x1) + numpy.abs(x2)):
break
if (f2 < f1):
x0 = x1
x1 = x2
x2 = _gR * x1 + _gC * x3
f1 = f2
f2 = func(*((x2,) + args))
else:
x3 = x2
x2 = x1
x1 = _gR * x2 + _gC * x0
f2 = f1
f1 = func(*((x1,) + args))
funcalls += 1
nit += 1
if (f1 < f2):
xmin = x1
fval = f1
else:
xmin = x2
fval = f2
return OptimizeResult(fun=fval, nfev=funcalls, x=xmin, nit=nit,
success=nit < maxiter)
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):
"""
Bracket the minimum of the function.
Given a function and distinct initial points, search in the
downhill direction (as defined by the initital points) and return
new points xa, xb, xc that bracket the minimum of the function
f(xa) > f(xb) < f(xc). It doesn't always mean that obtained
solution will satisfy xa<=x<=xb
Parameters
----------
func : callable f(x,*args)
Objective function to minimize.
xa, xb : float, optional
Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0.
args : tuple, optional
Additional arguments (if present), passed to `func`.
grow_limit : float, optional
Maximum grow limit. Defaults to 110.0
maxiter : int, optional
Maximum number of iterations to perform. Defaults to 1000.
Returns
-------
xa, xb, xc : float
Bracket.
fa, fb, fc : float
Objective function values in bracket.
funcalls : int
Number of function evaluations made.
"""
_gold = 1.618034 # golden ratio: (1.0+sqrt(5.0))/2.0
_verysmall_num = 1e-21
fa = func(*(xa,) + args)
fb = func(*(xb,) + args)
if (fa < fb): # Switch so fa > fb
xa, xb = xb, xa
fa, fb = fb, fa
xc = xb + _gold * (xb - xa)
fc = func(*((xc,) + args))
funcalls = 3
iter = 0
while (fc < fb):
tmp1 = (xb - xa) * (fb - fc)
tmp2 = (xb - xc) * (fb - fa)
val = tmp2 - tmp1
if numpy.abs(val) < _verysmall_num:
denom = 2.0 * _verysmall_num
else:
denom = 2.0 * val
w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom
wlim = xb + grow_limit * (xc - xb)
if iter > maxiter:
raise RuntimeError("Too many iterations.")
iter += 1
if (w - xc) * (xb - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xa = xb
xb = w
fa = fb
fb = fw
return xa, xb, xc, fa, fb, fc, funcalls
elif (fw > fb):
xc = w
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(wlim - xc) >= 0.0:
w = wlim
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(xc - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xb = xc
xc = w
w = xc + _gold * (xc - xb)
fb = fc
fc = fw
fw = func(*((w,) + args))
funcalls += 1
else:
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
xa = xb
xb = xc
xc = w
fa = fb
fb = fc
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
def _linesearch_powell(func, p, xi, tol=1e-3):
"""Line-search algorithm using fminbound.
Find the minimium of the function ``func(x0+ alpha*direc)``.
"""
def myfunc(alpha):
return func(p + alpha*xi)
alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol)
xi = alpha_min*xi
return squeeze(fret), p + xi, xi
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
maxfun=None, full_output=0, disp=1, retall=0, callback=None,
direc=None):
"""
Minimize a function using modified Powell's method. This method
only uses function values, not derivatives.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func.
callback : callable, optional
An optional user-supplied function, called after each
iteration. Called as ``callback(xk)``, where ``xk`` is the
current parameter vector.
direc : ndarray, optional
Initial direction set.
xtol : float, optional
Line-search error tolerance.
ftol : float, optional
Relative error in ``func(xopt)`` acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : int, optional
Maximum number of function evaluations to make.
full_output : bool, optional
If True, fopt, xi, direc, iter, funcalls, and
warnflag are returned.
disp : bool, optional
If True, print convergence messages.
retall : bool, optional
If True, return a list of the solution at each iteration.
Returns
-------
xopt : ndarray
Parameter which minimizes `func`.
fopt : number
Value of function at minimum: ``fopt = func(xopt)``.
direc : ndarray
Current direction set.
iter : int
Number of iterations.
funcalls : int
Number of function calls made.
warnflag : int
Integer warning flag:
1 : Maximum number of function evaluations.
2 : Maximum number of iterations.
allvecs : list
List of solutions at each iteration.
See also
--------
minimize: Interface to unconstrained minimization algorithms for
multivariate functions. See the 'Powell' `method` in particular.
Notes
-----
Uses a modification of Powell's method to find the minimum of
a function of N variables. Powell's method is a conjugate
direction method.
The algorithm has two loops. The outer loop
merely iterates over the inner loop. The inner loop minimizes
over each current direction in the direction set. At the end
of the inner loop, if certain conditions are met, the direction
that gave the largest decrease is dropped and replaced with
the difference between the current estimated x and the estimated
x from the beginning of the inner-loop.
The technical conditions for replacing the direction of greatest
increase amount to checking that
1. No further gain can be made along the direction of greatest increase
from that iteration.
2. The direction of greatest increase accounted for a large sufficient
fraction of the decrease in the function value from that iteration of
the inner loop.
Examples
--------
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fmin_powell(f, -1)
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 2
Function evaluations: 18
>>> minimum
array(0.0)
References
----------
Powell M.J.D. (1964) An efficient method for finding the minimum of a
function of several variables without calculating derivatives,
Computer Journal, 7 (2):155-162.
Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.:
Numerical Recipes (any edition), Cambridge University Press
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'direc': direc,
'return_all': retall}
res = _minimize_powell(func, x0, args, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_powell(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, direc=None, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*1000``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
direc : ndarray
Initial set of direction vectors for the Powell method.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = wrap_function(func, args)
x = asarray(x0).flatten()
if retall:
allvecs = [x]
N = len(x)
# If neither are set, then set both to default
if maxiter is None and maxfun is None:
maxiter = N * 1000
maxfun = N * 1000
elif maxiter is None:
# Convert remaining Nones, to np.inf, unless the other is np.inf, in
# which case use the default to avoid unbounded iteration
if maxfun == np.inf:
maxiter = N * 1000
else:
maxiter = np.inf
elif maxfun is None:
if maxiter == np.inf:
maxfun = N * 1000
else:
maxfun = np.inf
if direc is None:
direc = eye(N, dtype=float)
else:
direc = asarray(direc, dtype=float)
fval = squeeze(func(x))
x1 = x.copy()
iter = 0
ilist = list(range(N))
while True:
fx = fval
bigind = 0
delta = 0.0
for i in ilist:
direc1 = direc[i]
fx2 = fval
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol * 100)
if (fx2 - fval) > delta:
delta = fx2 - fval
bigind = i
iter += 1
if callback is not None:
callback(x)
if retall:
allvecs.append(x)
bnd = ftol * (numpy.abs(fx) + numpy.abs(fval)) + 1e-20
if 2.0 * (fx - fval) <= bnd:
break
if fcalls[0] >= maxfun:
break
if iter >= maxiter:
break
# Construct the extrapolated point
direc1 = x - x1
x2 = 2*x - x1
x1 = x.copy()
fx2 = squeeze(func(x2))
if (fx > fx2):
t = 2.0*(fx + fx2 - 2.0*fval)
temp = (fx - fval - delta)
t *= temp*temp
temp = fx - fx2
t -= delta*temp*temp
if t < 0.0:
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol*100)
direc[bigind] = direc[-1]
direc[-1] = direc1
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print("Warning: " + msg)
elif iter >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iter)
print(" Function evaluations: %d" % fcalls[0])
x = squeeze(x)
result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x)
if retall:
result['allvecs'] = allvecs
return result
def _endprint(x, flag, fval, maxfun, xtol, disp):
if flag == 0:
if disp > 1:
print("\nOptimization terminated successfully;\n"
"The returned value satisfies the termination criteria\n"
"(using xtol = ", xtol, ")")
if flag == 1:
if disp:
print("\nMaximum number of function evaluations exceeded --- "
"increase maxfun argument.\n")
return
def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin,
disp=False):
"""Minimize a function over a given range by brute force.
Uses the "brute force" method, i.e. computes the function's value
at each point of a multidimensional grid of points, to find the global
minimum of the function.
The function is evaluated everywhere in the range with the datatype of the
first call to the function, as enforced by the ``vectorize`` NumPy
function. The value and type of the function evaluation returned when
``full_output=True`` are affected in addition by the ``finish`` argument
(see Notes).
Parameters
----------
func : callable
The objective function to be minimized. Must be in the
form ``f(x, *args)``, where ``x`` is the argument in
the form of a 1-D array and ``args`` is a tuple of any
additional fixed parameters needed to completely specify
the function.
ranges : tuple
Each component of the `ranges` tuple must be either a
"slice object" or a range tuple of the form ``(low, high)``.
The program uses these to create the grid of points on which
the objective function will be computed. See `Note 2` for
more detail.
args : tuple, optional
Any additional fixed parameters needed to completely specify
the function.
Ns : int, optional
Number of grid points along the axes, if not otherwise
specified. See `Note2`.
full_output : bool, optional
If True, return the evaluation grid and the objective function's
values on it.
finish : callable, optional
An optimization function that is called with the result of brute force
minimization as initial guess. `finish` should take `func` and
the initial guess as positional arguments, and take `args` as
keyword arguments. It may additionally take `full_output`
and/or `disp` as keyword arguments. Use None if no "polishing"
function is to be used. See Notes for more details.
disp : bool, optional
Set to True to print convergence messages.
Returns
-------
x0 : ndarray
A 1-D array containing the coordinates of a point at which the
objective function had its minimum value. (See `Note 1` for
which point is returned.)
fval : float
Function value at the point `x0`. (Returned when `full_output` is
True.)
grid : tuple
Representation of the evaluation grid. It has the same
length as `x0`. (Returned when `full_output` is True.)
Jout : ndarray
Function values at each point of the evaluation
grid, `i.e.`, ``Jout = func(*grid)``. (Returned
when `full_output` is True.)
See Also
--------
basinhopping, differential_evolution
Notes
-----
*Note 1*: The program finds the gridpoint at which the lowest value
of the objective function occurs. If `finish` is None, that is the
point returned. When the global minimum occurs within (or not very far
outside) the grid's boundaries, and the grid is fine enough, that
point will be in the neighborhood of the global minimum.
However, users often employ some other optimization program to
"polish" the gridpoint values, `i.e.`, to seek a more precise
(local) minimum near `brute's` best gridpoint.
The `brute` function's `finish` option provides a convenient way to do
that. Any polishing program used must take `brute's` output as its
initial guess as a positional argument, and take `brute's` input values
for `args` as keyword arguments, otherwise an error will be raised.
It may additionally take `full_output` and/or `disp` as keyword arguments.
`brute` assumes that the `finish` function returns either an
`OptimizeResult` object or a tuple in the form:
``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing
value of the argument, ``Jmin`` is the minimum value of the objective
function, "..." may be some other returned values (which are not used
by `brute`), and ``statuscode`` is the status code of the `finish` program.
Note that when `finish` is not None, the values returned are those
of the `finish` program, *not* the gridpoint ones. Consequently,
while `brute` confines its search to the input grid points,
the `finish` program's results usually will not coincide with any
gridpoint, and may fall outside the grid's boundary. Thus, if a
minimum only needs to be found over the provided grid points, make
sure to pass in `finish=None`.
*Note 2*: The grid of points is a `numpy.mgrid` object.
For `brute` the `ranges` and `Ns` inputs have the following effect.
Each component of the `ranges` tuple can be either a slice object or a
two-tuple giving a range of values, such as (0, 5). If the component is a
slice object, `brute` uses it directly. If the component is a two-tuple
range, `brute` internally converts it to a slice object that interpolates
`Ns` points from its low-value to its high-value, inclusive.
Examples
--------
We illustrate the use of `brute` to seek the global minimum of a function
of two variables that is given as the sum of a positive-definite
quadratic and two deep "Gaussian-shaped" craters. Specifically, define
the objective function `f` as the sum of three other functions,
``f = f1 + f2 + f3``. We suppose each of these has a signature
``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions
are as defined below.
>>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
>>> def f1(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
>>> def f2(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
>>> def f3(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
>>> def f(z, *params):
... return f1(z, *params) + f2(z, *params) + f3(z, *params)
Thus, the objective function may have local minima near the minimum
of each of the three functions of which it is composed. To
use `fmin` to polish its gridpoint result, we may then continue as
follows:
>>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
>>> from scipy import optimize
>>> resbrute = optimize.brute(f, rranges, args=params, full_output=True,
... finish=optimize.fmin)
>>> resbrute[0] # global minimum
array([-1.05665192, 1.80834843])
>>> resbrute[1] # function value at global minimum
-3.4085818767
Note that if `finish` had been set to None, we would have gotten the
gridpoint [-1.0 1.75] where the rounded function value is -2.892.
"""
N = len(ranges)
if N > 40:
raise ValueError("Brute Force not possible with more "
"than 40 variables.")
lrange = list(ranges)
for k in range(N):
if type(lrange[k]) is not type(slice(None)):
if len(lrange[k]) < 3:
lrange[k] = tuple(lrange[k]) + (complex(Ns),)
lrange[k] = slice(*lrange[k])
if (N == 1):
lrange = lrange[0]
def _scalarfunc(*params):
params = squeeze(asarray(params))
return func(params, *args)
vecfunc = vectorize(_scalarfunc)
grid = mgrid[lrange]
if (N == 1):
grid = (grid,)
Jout = vecfunc(*grid)
Nshape = shape(Jout)
indx = argmin(Jout.ravel(), axis=-1)
Nindx = zeros(N, int)
xmin = zeros(N, float)
for k in range(N - 1, -1, -1):
thisN = Nshape[k]
Nindx[k] = indx % Nshape[k]
indx = indx // thisN
for k in range(N):
xmin[k] = grid[k][tuple(Nindx)]
Jmin = Jout[tuple(Nindx)]
if (N == 1):
grid = grid[0]
xmin = xmin[0]
if callable(finish):
# set up kwargs for `finish` function
finish_args = _getargspec(finish).args
finish_kwargs = dict()
if 'full_output' in finish_args:
finish_kwargs['full_output'] = 1
if 'disp' in finish_args:
finish_kwargs['disp'] = disp
elif 'options' in finish_args:
# pass 'disp' as `options`
# (e.g. if `finish` is `minimize`)
finish_kwargs['options'] = {'disp': disp}
# run minimizer
res = finish(func, xmin, args=args, **finish_kwargs)
if isinstance(res, OptimizeResult):
xmin = res.x
Jmin = res.fun
success = res.success
else:
xmin = res[0]
Jmin = res[1]
success = res[-1] == 0
if not success:
if disp:
print("Warning: Either final optimization did not succeed "
"or `finish` does not return `statuscode` as its last "
"argument.")
if full_output:
return xmin, Jmin, grid, Jout
else:
return xmin
def show_options(solver=None, method=None, disp=True):
"""
Show documentation for additional options of optimization solvers.
These are method-specific options that can be supplied through the
``options`` dict.
Parameters
----------
solver : str
Type of optimization solver. One of 'minimize', 'minimize_scalar',
'root', or 'linprog'.
method : str, optional
If not given, shows all methods of the specified solver. Otherwise,
show only the options for the specified method. Valid values
corresponds to methods' names of respective solver (e.g. 'BFGS' for
'minimize').
disp : bool, optional
Whether to print the result rather than returning it.
Returns
-------
text
Either None (for disp=False) or the text string (disp=True)
Notes
-----
The solver-specific methods are:
`scipy.optimize.minimize`
- :ref:`Nelder-Mead <optimize.minimize-neldermead>`
- :ref:`Powell <optimize.minimize-powell>`
- :ref:`CG <optimize.minimize-cg>`
- :ref:`BFGS <optimize.minimize-bfgs>`
- :ref:`Newton-CG <optimize.minimize-newtoncg>`
- :ref:`L-BFGS-B <optimize.minimize-lbfgsb>`
- :ref:`TNC <optimize.minimize-tnc>`
- :ref:`COBYLA <optimize.minimize-cobyla>`
- :ref:`SLSQP <optimize.minimize-slsqp>`
- :ref:`dogleg <optimize.minimize-dogleg>`
- :ref:`trust-ncg <optimize.minimize-trustncg>`
`scipy.optimize.root`
- :ref:`hybr <optimize.root-hybr>`
- :ref:`lm <optimize.root-lm>`
- :ref:`broyden1 <optimize.root-broyden1>`
- :ref:`broyden2 <optimize.root-broyden2>`
- :ref:`anderson <optimize.root-anderson>`
- :ref:`linearmixing <optimize.root-linearmixing>`
- :ref:`diagbroyden <optimize.root-diagbroyden>`
- :ref:`excitingmixing <optimize.root-excitingmixing>`
- :ref:`krylov <optimize.root-krylov>`
- :ref:`df-sane <optimize.root-dfsane>`
`scipy.optimize.minimize_scalar`
- :ref:`brent <optimize.minimize_scalar-brent>`
- :ref:`golden <optimize.minimize_scalar-golden>`
- :ref:`bounded <optimize.minimize_scalar-bounded>`
`scipy.optimize.linprog`
- :ref:`simplex <optimize.linprog-simplex>`
- :ref:`interior-point <optimize.linprog-interior-point>`
"""
import textwrap
doc_routines = {
'minimize': (
('bfgs', 'scipy.optimize.optimize._minimize_bfgs'),
('cg', 'scipy.optimize.optimize._minimize_cg'),
('cobyla', 'scipy.optimize.cobyla._minimize_cobyla'),
('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'),
('l-bfgs-b', 'scipy.optimize.lbfgsb._minimize_lbfgsb'),
('nelder-mead', 'scipy.optimize.optimize._minimize_neldermead'),
('newtoncg', 'scipy.optimize.optimize._minimize_newtoncg'),
('powell', 'scipy.optimize.optimize._minimize_powell'),
('slsqp', 'scipy.optimize.slsqp._minimize_slsqp'),
('tnc', 'scipy.optimize.tnc._minimize_tnc'),
('trust-ncg', 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'),
),
'root': (
('hybr', 'scipy.optimize.minpack._root_hybr'),
('lm', 'scipy.optimize._root._root_leastsq'),
('broyden1', 'scipy.optimize._root._root_broyden1_doc'),
('broyden2', 'scipy.optimize._root._root_broyden2_doc'),
('anderson', 'scipy.optimize._root._root_anderson_doc'),
('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'),
('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'),
('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'),
('krylov', 'scipy.optimize._root._root_krylov_doc'),
('df-sane', 'scipy.optimize._spectral._root_df_sane'),
),
'linprog': (
('simplex', 'scipy.optimize._linprog._linprog_simplex'),
('interior-point', 'scipy.optimize._linprog._linprog_ip'),
),
'minimize_scalar': (
('brent', 'scipy.optimize.optimize._minimize_scalar_brent'),
('bounded', 'scipy.optimize.optimize._minimize_scalar_bounded'),
('golden', 'scipy.optimize.optimize._minimize_scalar_golden'),
),
}
if solver is None:
text = ["\n\n\n========\n", "minimize\n", "========\n"]
text.append(show_options('minimize', disp=False))
text.extend(["\n\n===============\n", "minimize_scalar\n",
"===============\n"])
text.append(show_options('minimize_scalar', disp=False))
text.extend(["\n\n\n====\n", "root\n",
"====\n"])
text.append(show_options('root', disp=False))
text.extend(['\n\n\n=======\n', 'linprog\n',
'=======\n'])
text.append(show_options('linprog', disp=False))
text = "".join(text)
else:
solver = solver.lower()
if solver not in doc_routines:
raise ValueError('Unknown solver %r' % (solver,))
if method is None:
text = []
for name, _ in doc_routines[solver]:
text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"])
text.append(show_options(solver, name, disp=False))
text = "".join(text)
else:
methods = dict(doc_routines[solver])
if method not in methods:
raise ValueError("Unknown method %r" % (method,))
name = methods[method]
# Import function object
parts = name.split('.')
mod_name = ".".join(parts[:-1])
__import__(mod_name)
obj = getattr(sys.modules[mod_name], parts[-1])
# Get doc
doc = obj.__doc__
if doc is not None:
text = textwrap.dedent(doc).strip()
else:
text = ""
if disp:
print(text)
return
else:
return text
def main():
import time
times = []
algor = []
x0 = [0.8, 1.2, 0.7]
print("Nelder-Mead Simplex")
print("===================")
start = time.time()
x = fmin(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Nelder-Mead Simplex\t')
print()
print("Powell Direction Set Method")
print("===========================")
start = time.time()
x = fmin_powell(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Powell Direction Set Method.')
print()
print("Nonlinear CG")
print("============")
start = time.time()
x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200)
print(x)
times.append(time.time() - start)
algor.append('Nonlinear CG \t')
print()
print("BFGS Quasi-Newton")
print("=================")
start = time.time()
x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('BFGS Quasi-Newton\t')
print()
print("BFGS approximate gradient")
print("=========================")
start = time.time()
x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100)
print(x)
times.append(time.time() - start)
algor.append('BFGS without gradient\t')
print()
print("Newton-CG with Hessian product")
print("==============================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with hessian product')
print()
print("Newton-CG with full Hessian")
print("===========================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with full hessian')
print()
print("\nMinimizing the Rosenbrock function of order 3\n")
print(" Algorithm \t\t\t Seconds")
print("===========\t\t\t =========")
for k in range(len(algor)):
print(algor[k], "\t -- ", times[k])
if __name__ == "__main__":
main()
| mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/scipy/optimize/optimize.py | Python | mit | 104,561 | [
"Gaussian"
] | 55859d6fc5e4310b1eb486c151cb37f16dfd854371d0a64f92c28fa1db84a9a7 |
"""CRYSTAL calculator interface."""
# Copyright (C) 2016 Antti J. Karttunen (antti.j.karttunen@iki.fi)
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import numpy as np
from phonopy.file_IO import iter_collect_forces
from phonopy.interface.vasp import check_forces, get_drift_forces
from phonopy.structure.atoms import PhonopyAtoms as Atoms
from phonopy.structure.symmetry import Symmetry
from phonopy.units import Bohr, Hartree
def parse_set_of_forces(num_atoms, forces_filenames, verbose=True):
"""Parse forces from output files."""
hook = "ATOM X Y Z"
force_sets = []
for i, filename in enumerate(forces_filenames):
if verbose:
sys.stdout.write("%d. " % (i + 1))
crystal_forces = iter_collect_forces(filename, num_atoms, hook, [2, 3, 4])
if check_forces(crystal_forces, num_atoms, filename, verbose=verbose):
is_parsed = True
drift_force = get_drift_forces(
crystal_forces, filename=filename, verbose=verbose
)
# Convert forces Hartree / Bohr -> eV / Angstrom
# This avoids confusion with the units. CRYSTAL uses Angstroms for
# coordinates, but Hartree / Bohr for forces. This would lead in mixed
# units hartree / (Angstrom * Bohr) for force constants, requiring
# additional tweaks for unit conversions in other parts of the code
force_sets.append(
np.multiply(np.array(crystal_forces) - drift_force, Hartree / Bohr)
)
else:
is_parsed = False
if is_parsed:
return force_sets
else:
return []
def read_crystal(filename):
"""Read crystal structure."""
f_crystal = open(filename)
crystal_in = CrystalIn(f_crystal.readlines())
f_crystal.close()
tags = crystal_in.get_tags()
cell = Atoms(
cell=tags["lattice_vectors"],
symbols=tags["atomic_species"],
scaled_positions=tags["coordinates"],
)
magmoms = tags["magnetic_moments"]
if magmoms is not None:
# Print out symmetry information for magnetic cases
# Original code from structure/symmetry.py
symmetry = Symmetry(cell, symprec=1e-5)
print(
"CRYSTAL-interface: Magnetic structure, "
"number of operations without spin: %d"
% len(symmetry.get_symmetry_operations()["rotations"])
)
print(
"CRYSTAL-interface: Spacegroup without spin: %s"
% symmetry.get_international_table()
)
cell.set_magnetic_moments(magmoms)
symmetry = Symmetry(cell, symprec=1e-5)
print(
"CRYSTAL-interface: Magnetic structure, number of operations with spin: %d"
% len(symmetry.get_symmetry_operations()["rotations"])
)
print("")
return cell, tags["conv_numbers"]
def write_crystal(
filename, cell, conv_numbers, template_file="TEMPLATE", write_symmetry=False
):
"""Write cell to file."""
# Write geometry in EXTERNAL file (fort.34)
f_ext = open(filename + ".ext", "w")
f_ext.write(get_crystal_structure(cell, conv_numbers, write_symmetry))
f_ext.close()
# Create input file (filename.d12)
lines = "Created by Phonopy CRYSTAL interface\n"
lines += "EXTERNAL\n"
lines += "ENDGEOM\n"
# If template_file exists, insert it at this point
try:
f_template = open(template_file)
lines += f_template.read()
f_template.close()
except IOError:
lines += "***** Insert basis sets and parameters here *****\n"
lines += "TOLDEE\n"
lines += "10\n"
# For magnetic structures, create ATOMSPIN entry
# Only spins != 0 are written
magmoms = cell.get_magnetic_moments()
if magmoms is not None:
atomspins = ""
N_spins = 0
for i in range(0, len(magmoms)):
if magmoms[i] != 0:
N_spins += 1
atomspins += "%d %d " % (i + 1, magmoms[i])
lines += "ATOMSPIN\n"
lines += "%d\n" % N_spins
lines += atomspins + "\n"
lines += "GRADCAL\n"
lines += "END\n"
# Write the input file
f_inputfile = open(filename + ".d12", "w")
f_inputfile.writelines(lines)
f_inputfile.close()
def write_supercells_with_displacements(
supercell,
cells_with_displacements,
ids,
conv_numbers,
num_unitcells_in_supercell,
pre_filename="supercell",
width=3,
template_file="TEMPLATE",
):
"""Write supercells with displacements to files."""
convnum_super = []
for i in conv_numbers:
for j in range(num_unitcells_in_supercell):
convnum_super.append(i)
# Currently, symmetry is not used by default
# It can be turned on by creating a file called CRY_SYM
try:
f = open("CRY_SYM")
use_symmetry = True
f.close()
except IOError:
use_symmetry = False
if use_symmetry:
print(
"CRYSTAL-interface: WARNING: Symmetry enabled in EXTERNAL files.\n"
" Check the supercells very carefully, some spacegroups do not work "
"(e.g. R-3m)\n"
" Non-displaced supercell is always written without symmetry"
)
write_crystal(
pre_filename, supercell, convnum_super, template_file, write_symmetry=False
)
for i, cell in zip(ids, cells_with_displacements):
filename = "{pre_filename}-{0:0{width}}".format(
i, pre_filename=pre_filename, width=width
)
write_crystal(
filename, cell, convnum_super, template_file, write_symmetry=use_symmetry
)
def get_crystal_structure(cell, conv_numbers, write_symmetry=False):
"""Return CRYSTAL structure in text."""
lattice = cell.get_cell()
positions = cell.get_positions()
# Create and EXTERNAL file (fort.34)
# Dimensionality, centring, crystal type
lines = "3 1 1\n"
# Cartesian components of the lattice vectors
for lattvec in lattice:
lines += ("%12.8f" * 3 + "\n") % tuple(lattvec)
# Symmetry operators
if write_symmetry:
symmetry = Symmetry(cell, symprec=1e-5)
rotations = symmetry.get_symmetry_operations()["rotations"]
translations = symmetry.get_symmetry_operations()["translations"]
N_symmops = 0
symmlines = ""
for i in range(0, len(rotations)):
N_symmops += 1
for j in range(0, 3):
symmlines += (" %5.2f" * 3 + "\n") % tuple(rotations[i][j])
symmlines += (" %5.2f" * 3 + "\n") % tuple(translations[i])
lines += "%d\n" % N_symmops
lines += symmlines
else:
lines += "1\n"
lines += " 1.00 0.00 0.00\n"
lines += " 0.00 1.00 0.00\n"
lines += " 0.00 0.00 1.00\n"
lines += " 0.00 0.00 0.00\n"
# Number of atoms in the unit cell (asymmetric unit)
lines += ("%d\n") % len(positions)
# Conventional atomic number and cartesian coordinates of the atoms
for i, pos in zip(conv_numbers, positions):
lines += (" %d " + "%16.12f" * 3 + "\n") % (i, pos[0], pos[1], pos[2])
return lines
class CrystalIn:
"""Class to create CRYSTAL input file."""
def __init__(self, lines):
"""Init method."""
# conv_numbers = CRYSTAL conventional atomic number mapping:
# 'Ge' -> 32 or 'Ge' -> 232
self._tags = {
"lattice_vectors": None,
"atomic_species": None,
"coordinates": None,
"magnetic_moments": None,
"conv_numbers": None,
}
self._values = None
self._collect(lines)
def get_tags(self):
"""Return tags."""
return self._tags
def _collect(self, lines):
# Reads a CRYSTAL output file (lattice vectors, conventional atomic numbers,
# fractional atomic positions).
# - For optimization outputs, the final geometry in the file is read.
# - Dielectric tensor and effective Born charges can be read with script
# phonopy-crystal-born
# - If ATOMSPIN keyword is present, magnetic moments are read from it
magmoms = []
atomspins = []
numspins = 0
ll = 0
while ll < len(lines):
line = lines[ll]
if "PRIMITIVE CELL - CENTRING CODE" in line:
aspecies = []
coords = []
convnum = []
ll += 4
# ATOMS IN THE ASYMMETRIC UNIT 2 - ATOMS IN THE UNIT CELL: 6
N_atoms = int(lines[ll].split()[12])
ll += 3
# 1 T 22 TI 4.721218104494E-21 3.307446203077E-21 1.413771901417E-21 # noqa E501
for atom in range(0, N_atoms):
atomdata = lines[ll].split()
aspecies.append(atomdata[3].capitalize())
coords.append([float(x) for x in atomdata[4:7]])
convnum.append(int(atomdata[2]))
ll += 1
elif "DIRECT LATTICE VECTORS CARTESIAN COMPONENTS" in line:
lattvecs = []
ll += 2
# X Y Z
for lattvec in range(1, 4):
lattvecs.append([float(x) for x in lines[ll].split()])
ll += 1
elif "ATOMSPIN" in line:
# Read ATOMSPIN, and save the magnetic moments for later parsing
# (not all necessary information is available at this point)
# All spins must be entered on one line!
# ATOMSPIN
# 8
# 1 1 2 1 3 -1 4 -1 5 1 6 1 7 -1 8 -1
ll += 1
numspins = int(lines[ll])
ll += 1
atomspins = [int(x) for x in lines[ll].split()]
ll += 1
ll += 1 # while l < len(lines)
if (
len(lattvecs) == 3
and len(aspecies) > 0
and len(aspecies) == len(coords)
and len(aspecies) == len(convnum)
):
self._tags["lattice_vectors"] = lattvecs
self._tags["atomic_species"] = aspecies
self._tags["coordinates"] = coords
self._tags["conv_numbers"] = convnum
else:
print("CRYSTAL-interface: Error parsing CRYSTAL output file")
# Set magnetic moments
if numspins > 0:
# Initialize all moments to zero
magmoms = [0] * N_atoms
if numspins * 2 == len(atomspins):
for i in range(0, numspins):
atomnum = atomspins[i * 2] - 1
magmom = atomspins[i * 2 + 1]
magmoms[atomnum] = magmom
self._tags["magnetic_moments"] = magmoms
print(
"CRYSTAL-interface: Following magnetic moments "
"have been read from ATOMSPIN entry:"
)
print(magmoms)
else:
print(
"CRYSTAL-interface: Invalid ATOMSPIN entry, "
"magnetic moments have not been set"
)
else:
print("")
if __name__ == "__main__":
cell, conv_numbers = read_crystal(sys.argv[1])
symmetry = Symmetry(cell)
print("# %s" % symmetry.get_international_table())
print(get_crystal_structure(cell, conv_numbers))
| atztogo/phonopy | phonopy/interface/crystal.py | Python | bsd-3-clause | 13,116 | [
"CRYSTAL",
"VASP",
"phonopy"
] | 74e0eec357cf55946ba758fcb179af4cf5321bf69bb521b650e2ccc0c0011455 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class spatialRepAll(vtk.test.Testing.vtkTest):
def testspatialRepAll(self):
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren)
asource = vtk.vtkSTLReader()
asource.SetFileName(VTK_DATA_ROOT + "/Data/42400-IDGH.stl")
dataMapper = vtk.vtkPolyDataMapper()
dataMapper.SetInputConnection(asource.GetOutputPort())
model = vtk.vtkActor()
model.SetMapper(dataMapper)
model.GetProperty().SetColor(1, 0, 0)
model.VisibilityOn()
locators = ["vtkPointLocator", "vtkCellLocator", "vtkOBBTree"]
locator = list()
boxes = list()
boxMapper = list()
boxActor = list()
for idx, vtkLocatorType in enumerate(locators):
eval('locator.append(vtk.' + vtkLocatorType + '())')
locator[idx].AutomaticOff()
locator[idx].SetMaxLevel(3)
boxes.append(vtk.vtk.vtkSpatialRepresentationFilter())
boxes[idx].SetInputConnection(asource.GetOutputPort())
boxes[idx].SetSpatialRepresentation(locator[idx])
boxes[idx].SetGenerateLeaves(1)
boxes[idx].Update()
output = boxes[idx].GetOutput().GetBlock(boxes[idx].GetMaximumLevel() + 1)
boxMapper.append(vtk.vtkPolyDataMapper())
boxMapper[idx].SetInputData(output)
boxActor.append(vtk.vtkActor())
boxActor[idx].SetMapper(boxMapper[idx])
boxActor[idx].AddPosition((idx + 1) * 15, 0, 0)
ren.AddActor(boxActor[idx])
ren.AddActor(model)
ren.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(400, 160)
# render the image
camera = vtk.vtkCamera()
camera.SetPosition(148.579, 136.352, 214.961)
camera.SetFocalPoint(151.889, 86.3178, 223.333)
camera.SetViewAngle(30)
camera.SetViewUp(0, 0, -1)
camera.SetClippingRange(1, 100)
ren.SetActiveCamera(camera)
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "spatialRepAll.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(spatialRepAll, 'test')])
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/General/Testing/Python/spatialRepAll.py | Python | gpl-3.0 | 3,220 | [
"VTK"
] | b8057da24f93a69cc8f6c90111e4239331715a09f946e09fd00b8197bc8f5c6d |
# vim: fdm=indent
'''
author: Fabio Zanini
date: 17/03/15
content: Clustering functions
'''
# Modules
import numpy as np
# Functions
def energy_function(v, e1, e2):
'''Energy function to minimize for the cluster-making'''
# NOTE: v comes in as a flattened array, but it's a 2D vector
N = v.shape[0] // 2
v = v.reshape((N, 2))
# Coefficients
l1 = 5e-2 # repulsion from consensus
l2 = 1e-4 / (N - 1) # interactions
l2_rep = 3.0 # -- " -- (repulsive part)
l2_att = 1e-2 # -- " -- (elastic attraction)
# Calculate the radius
r = np.sqrt((v**2).sum(axis=-1))
# Mutual distances between all points
a = np.zeros((N, N, 2))
a[:] = v
a -= a.swapaxes(0, 1)
d = np.zeros((N, N))
d[:] = np.sqrt((a**2).sum(axis=-1))
# Initial level
e = 0
# Infinity trap
e += np.cosh(r).sum()
# Consensus repulsor
e += -l1 * (e1 * r).sum()
# Pairwise interactions (constant repulsion + harmonic oscillator)
e += l2 * (-l2_rep * d + l2_att * e2 * d**2).sum()
return e
def energy_gradient_function(v, e1, e2):
'''Gradient of the energy function'''
# NOTE: v comes in as a flattened array, but it's a 2D vector
N = v.shape[0] // 2
v = v.reshape((N, 2))
# Coefficients
l1 = 5e-2 # repulsion from consensus
l2 = 1e-4 / N # interactions
l2_rep = 3.0 # -- " -- (repulsive part)
l2_att = 1e-2 # -- " -- (elastic attraction)
# Calculate the radius
r = np.sqrt((v**2).sum(axis=-1))
# Mutual distances between all points
a = np.zeros((N, N, 2))
a[:] = v
a -= a.swapaxes(0, 1)
d = np.zeros((N, N))
d[:] = np.sqrt((a**2).sum(axis=-1))
# Initial level
J = np.zeros_like(v)
for ix in xrange(2):
for i in xrange(N):
g = 0
# Infinity trap
#e += np.cosh(r).sum()
g += np.sinh(r[i]) * (v[i, ix] + 1e-10) / (r[i] + 1e-10)
# Consensus repulsor
#e += -l1 * (e1 * r).sum()
g -= l1 * e1[i] * (v[i, ix] + 1e-10) / (r[i] + 1e-10)
# Pairwise interactions (constant repulsion + harmonic oscillator)
#e += l2 * (-l2_rep * d + l2_att * e2 * d**2).sum()
for j in xrange(N):
g -= 2 * l2 * l2_rep * (v[i, ix] - v[j, ix]) / (d[i, j] + 1e-15)
g += 4 * l2 * l2_att * e2[i, j] * (v[i, ix] - v[j, ix])
J[i, ix] = g
return J.ravel()
def energy_withgradient_function(v, e1, e2):
'''Energy function + gradient for the clustering minimization'''
# NOTE: v comes in as a flattened array, but it's a 2D vector
N = v.shape[0] // 2
v = v.reshape((N, 2))
# Coefficients
l1 = 5e-2 # repulsion from consensus
l2 = 1e-4 / N # interactions
l2_rep = 3.0 # -- " -- (repulsive part)
l2_att = 1e-2 # -- " -- (elastic attraction)
# Calculate the radius
r = np.sqrt((v**2).sum(axis=-1))
# Mutual distances between all points
a = np.zeros((N, N, 2))
a[:] = v
a -= a.swapaxes(0, 1)
d = np.sqrt((a**2).sum(axis=-1))
# ENERGY
e = 0
# Infinity trap
e += np.cosh(r).sum()
# Consensus repulsor
e += -l1 * (e1 * r).sum()
# Pairwise interactions (constant repulsion + harmonic oscillator)
e += l2 * (-l2_rep * d + l2_att * e2 * d**2).sum()
# GRADIENT
J = np.zeros_like(v)
for ix in xrange(2):
# Infinity trap and consensus repulsor
#e += np.cosh(r).sum()
#e += -l1 * (e1 * r).sum()
J[:, ix] = (np.sinh(r) - l1 * e1) * (v[:, ix] + 1e-10) / (r + 1e-10)
## Pairwise interactions (constant repulsion + harmonic oscillator)
vd = np.tile(v[:, ix], (N, 1))
vd -= vd.T
J[:, ix] += 2 * l2 * (-l2_rep * vd.T / (d + 1e-15) + 2 * l2_att * e2 * vd.T).sum(axis=-1)
return (e, J.ravel())
def get_distance_parameters(alim, refseq=None):
'''Get distance parameters, from a reference and between the seqs'''
if refseq is None:
from .utils.sequence import get_consensus
refseq = get_consensus(alim)
# Distance from the reference (consensus)
e1 = (alim != refseq).mean(axis=1)
# Distance between the sequences
e2 = np.tile(alim, (alim.shape[0], 1, 1))
e2 = 1.0 * (e2 == e2.swapaxes(0, 1)).mean(axis=2)
return e1, e2
def add_distance_parameters(alim, seq, refseq=None):
'''Calculate distance parameters for a new sequence'''
if refseq is None:
from .utils.sequence import get_consensus
refseq = get_consensus(alim)
e1 = (seq != refseq).mean()
e2 = (seq != alim).mean(axis=1)
return e1, e2
def cluster_force(alim, method='BFGS-jac', plot=False):
'''Cluster sequences with physical forces
Paramters:
alim (biopython alignment or numpy matrix of chars): alignment to analyze
method (str): minimization method (see scipy.optimize.minimize)
plot (bool): plot clustering
'''
import numpy as np
from scipy.optimize import minimize
from .utils.sequence import get_consensus
alim = np.asarray(alim, 'S1')
N = alim.shape[0]
L = alim.shape[-1]
e1, e2 = get_distance_parameters(alim)
# Minimize the energy
v0 = np.random.rand(N, 2)
if method in ['Powell', 'BFGS']:
print method, 'not using Jacobian'
res = minimize(energy_function, v0.ravel(), method=method, args=(e1, e2))
elif method in ['BFGS-jac']:
print 'BFGS, using Jacobian'
res = minimize(energy_withgradient_function, v0.ravel(), method='BFGS', args=(e1, e2),
jac=True)
elif method in ['CG']:
print method, 'using Jacobian'
res = minimize(energy_withgradient_function, v0.ravel(), method=method, args=(e1, e2),
jac=True)
else:
raise ValueError('Method for minimization not found!')
v = res.x.reshape((N, 2))
print 'Minimal value of the function:', res.fun
if plot:
from matplotlib import cm
import matplotlib.pyplot as plt
# Plot the force field and the scatter
fig, ax = plt.subplots()
cons = get_consensus(alim)
dcon = (alim != cons).sum(axis=1)
colors = cm.jet(1.0 * dcon / dcon.max())
ax.scatter([0], [0], s=200, edgecolor='k', facecolor='none', lw=2, zorder=-1)
ax.scatter(v[:, 0], v[:, 1], s=40, c=colors)
ax.grid(True)
ax.set_xlim(-1.04*np.abs(v[:, 0]).max(), 1.04*np.abs(v[:, 0]).max())
ax.set_ylim(-1.04*np.abs(v[:, 1]).max(), 1.04*np.abs(v[:, 1]).max())
sm = plt.cm.ScalarMappable(cmap=cm.jet, norm=plt.Normalize(vmin=0, vmax=dcon.max()))
sm.set_array(dcon)
cb = plt.colorbar(sm)
cb.set_label('Hamming distance from consensus', rotation=270, labelpad=40)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.tight_layout()
return {'x': v, 'e1e2': (e1, e2), 'method': method}
def position_sequence(seq, v, alim, e1e2=None):
'''Position one sequence to the cluster according to the current force field
Parameters:
seq: the sequence to add
v: the vector of positions for the current points
alim_or_e1e2 (2-tuple or matrix of char): either the alignment to calculate
the force field from, or the distance parameters (e1, e2)
'''
import numpy as np
from scipy.optimize import minimize
alim = np.asarray(alim)
if e1e2 is None:
e1, e2 = get_distance_parameters(np.vstack([alim, seq]))
else:
e1, e2 = e1e2
e1new, e2new = add_distance_parameters(alim, seq)
e1f = np.append(e1, e1new)
e2f = np.zeros((e2.shape[0] + 1, e2.shape[1] + 1), e2.dtype)
e2f[:e2.shape[0], :e2.shape[1]] = e2
e2f[-1, :-1] = e2new
e2f[:-1, -1] = e2new
# The energy function to minimize takes only the last argument now
u0 = np.random.rand(2)
efun = lambda u, e1, e2: energy_function(np.concatenate([v.ravel(), u]), e1, e2)
res = minimize(efun, u0, args=(e1f, e2f))
u = res.x
return u
| iosonofabio/clusterforce | clusterforce/clustering.py | Python | bsd-2-clause | 8,227 | [
"Biopython"
] | 8a92e57168a74a110b35e92826ac6f7dd554fa660fd652fe2caff57b8e0155dd |
import os
import re
import ast
from setuptools import setup, find_packages
from setuptools.command.build_ext import build_ext as _build_ext
package_name = "omicexperiment"
# version parsing from __init__ pulled from scikit-bio
# https://github.com/biocore/scikit-bio/blob/master/setup.py
# which is itself based off Flask's setup.py https://github.com/mitsuhiko/flask/blob/master/setup.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
# Bootstrap setup.py with numpy
# from the solution by coldfix http://stackoverflow.com/a/21621689/579416
class build_ext_numpy(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
import builtins
builtins.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
with open('omicexperiment/__init__.py', 'rb') as f:
hit = _version_re.search(f.read().decode('utf-8')).group(1)
version = str(ast.literal_eval(hit))
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.md')).read()
try:
import pypandoc
long_description = pypandoc.convert(README + '\n\n' + CHANGES, 'rst', format='md')
except ImportError:
long_description= README + '\n\n' + CHANGES
setup_requires = [
'numpy >= 1.10.4'
]
install_requires = [
'numpy >= 1.10.4',
'scipy>=0.16.1',
'pandas >= 0.17.1',
'biom-format >= 2.1.5',
'lxml>=3.5.0',
'pygal >= 2.1.1',
'scikit-bio==0.4.2',
'pyyaml',
'bokeh==0.13.0']
setup(name=package_name,
version=version,
license='BSD',
description="For analysis of omic experiments.",
long_description=long_description,
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
cmdclass={'build_ext': build_ext_numpy},
author='Ahmed Bassiouni',
author_email='ahmedbassi@gmail.com',
maintainer="Ahmed Bassiouni",
maintainer_email="ahmedbassi@gmail.com",
url='https://github.com/bassio/omicexperiment',
download_url = 'https://github.com/bassio/omicexperiment/tarball/' + version,
keywords='bioinformatics',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='omicexperiment.tests',
install_requires=install_requires,
setup_requires=setup_requires,
entry_points="""\
""",
)
| bassio/omicexperiment | setup.py | Python | bsd-3-clause | 2,621 | [
"scikit-bio"
] | e0f78a87c2ad6b2484b53f88cee911b732e016371f184062cf19018eea9851d8 |
from __future__ import absolute_import
import re
import sys
import copy
import codecs
import itertools
from . import TypeSlots
from .ExprNodes import not_a_constant
import cython
cython.declare(UtilityCode=object, EncodedString=object, bytes_literal=object, encoded_string=object,
Nodes=object, ExprNodes=object, PyrexTypes=object, Builtin=object,
UtilNodes=object, _py_int_types=object)
if sys.version_info[0] >= 3:
_py_int_types = int
_py_string_types = (bytes, str)
else:
_py_int_types = (int, long)
_py_string_types = (bytes, unicode)
from . import Nodes
from . import ExprNodes
from . import PyrexTypes
from . import Visitor
from . import Builtin
from . import UtilNodes
from . import Options
from .Code import UtilityCode, TempitaUtilityCode
from .StringEncoding import EncodedString, bytes_literal, encoded_string
from .Errors import error, warning
from .ParseTreeTransforms import SkipDeclarations
try:
from __builtin__ import reduce
except ImportError:
from functools import reduce
try:
from __builtin__ import basestring
except ImportError:
basestring = str # Python 3
def load_c_utility(name):
return UtilityCode.load_cached(name, "Optimize.c")
def unwrap_coerced_node(node, coercion_nodes=(ExprNodes.CoerceToPyTypeNode, ExprNodes.CoerceFromPyTypeNode)):
if isinstance(node, coercion_nodes):
return node.arg
return node
def unwrap_node(node):
while isinstance(node, UtilNodes.ResultRefNode):
node = node.expression
return node
def is_common_value(a, b):
a = unwrap_node(a)
b = unwrap_node(b)
if isinstance(a, ExprNodes.NameNode) and isinstance(b, ExprNodes.NameNode):
return a.name == b.name
if isinstance(a, ExprNodes.AttributeNode) and isinstance(b, ExprNodes.AttributeNode):
return not a.is_py_attr and is_common_value(a.obj, b.obj) and a.attribute == b.attribute
return False
def filter_none_node(node):
if node is not None and node.constant_result is None:
return None
return node
class _YieldNodeCollector(Visitor.TreeVisitor):
"""
YieldExprNode finder for generator expressions.
"""
def __init__(self):
Visitor.TreeVisitor.__init__(self)
self.yield_stat_nodes = {}
self.yield_nodes = []
visit_Node = Visitor.TreeVisitor.visitchildren
def visit_YieldExprNode(self, node):
self.yield_nodes.append(node)
self.visitchildren(node)
def visit_ExprStatNode(self, node):
self.visitchildren(node)
if node.expr in self.yield_nodes:
self.yield_stat_nodes[node.expr] = node
# everything below these nodes is out of scope:
def visit_GeneratorExpressionNode(self, node):
pass
def visit_LambdaNode(self, node):
pass
def visit_FuncDefNode(self, node):
pass
def _find_single_yield_expression(node):
yield_statements = _find_yield_statements(node)
if len(yield_statements) != 1:
return None, None
return yield_statements[0]
def _find_yield_statements(node):
collector = _YieldNodeCollector()
collector.visitchildren(node)
try:
yield_statements = [
(yield_node.arg, collector.yield_stat_nodes[yield_node])
for yield_node in collector.yield_nodes
]
except KeyError:
# found YieldExprNode without ExprStatNode (i.e. a non-statement usage of 'yield')
yield_statements = []
return yield_statements
class IterationTransform(Visitor.EnvTransform):
"""Transform some common for-in loop patterns into efficient C loops:
- for-in-dict loop becomes a while loop calling PyDict_Next()
- for-in-enumerate is replaced by an external counter variable
- for-in-range loop becomes a plain C for loop
"""
def visit_PrimaryCmpNode(self, node):
if node.is_ptr_contains():
# for t in operand2:
# if operand1 == t:
# res = True
# break
# else:
# res = False
pos = node.pos
result_ref = UtilNodes.ResultRefNode(node)
if node.operand2.is_subscript:
base_type = node.operand2.base.type.base_type
else:
base_type = node.operand2.type.base_type
target_handle = UtilNodes.TempHandle(base_type)
target = target_handle.ref(pos)
cmp_node = ExprNodes.PrimaryCmpNode(
pos, operator=u'==', operand1=node.operand1, operand2=target)
if_body = Nodes.StatListNode(
pos,
stats = [Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=1)),
Nodes.BreakStatNode(pos)])
if_node = Nodes.IfStatNode(
pos,
if_clauses=[Nodes.IfClauseNode(pos, condition=cmp_node, body=if_body)],
else_clause=None)
for_loop = UtilNodes.TempsBlockNode(
pos,
temps = [target_handle],
body = Nodes.ForInStatNode(
pos,
target=target,
iterator=ExprNodes.IteratorNode(node.operand2.pos, sequence=node.operand2),
body=if_node,
else_clause=Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=0))))
for_loop = for_loop.analyse_expressions(self.current_env())
for_loop = self.visit(for_loop)
new_node = UtilNodes.TempResultFromStatNode(result_ref, for_loop)
if node.operator == 'not_in':
new_node = ExprNodes.NotNode(pos, operand=new_node)
return new_node
else:
self.visitchildren(node)
return node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
return self._optimise_for_loop(node, node.iterator.sequence)
def _optimise_for_loop(self, node, iterable, reversed=False):
annotation_type = None
if (iterable.is_name or iterable.is_attribute) and iterable.entry and iterable.entry.annotation:
annotation = iterable.entry.annotation
if annotation.is_subscript:
annotation = annotation.base # container base type
# FIXME: generalise annotation evaluation => maybe provide a "qualified name" also for imported names?
if annotation.is_name:
if annotation.entry and annotation.entry.qualified_name == 'typing.Dict':
annotation_type = Builtin.dict_type
elif annotation.name == 'Dict':
annotation_type = Builtin.dict_type
if annotation.entry and annotation.entry.qualified_name in ('typing.Set', 'typing.FrozenSet'):
annotation_type = Builtin.set_type
elif annotation.name in ('Set', 'FrozenSet'):
annotation_type = Builtin.set_type
if Builtin.dict_type in (iterable.type, annotation_type):
# like iterating over dict.keys()
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_dict_iteration(
node, dict_obj=iterable, method=None, keys=True, values=False)
if (Builtin.set_type in (iterable.type, annotation_type) or
Builtin.frozenset_type in (iterable.type, annotation_type)):
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_set_iteration(node, iterable)
# C array (slice) iteration?
if iterable.type.is_ptr or iterable.type.is_array:
return self._transform_carray_iteration(node, iterable, reversed=reversed)
if iterable.type is Builtin.bytes_type:
return self._transform_bytes_iteration(node, iterable, reversed=reversed)
if iterable.type is Builtin.unicode_type:
return self._transform_unicode_iteration(node, iterable, reversed=reversed)
# the rest is based on function calls
if not isinstance(iterable, ExprNodes.SimpleCallNode):
return node
if iterable.args is None:
arg_count = iterable.arg_tuple and len(iterable.arg_tuple.args) or 0
else:
arg_count = len(iterable.args)
if arg_count and iterable.self is not None:
arg_count -= 1
function = iterable.function
# dict iteration?
if function.is_attribute and not reversed and not arg_count:
base_obj = iterable.self or function.obj
method = function.attribute
# in Py3, items() is equivalent to Py2's iteritems()
is_safe_iter = self.global_scope().context.language_level >= 3
if not is_safe_iter and method in ('keys', 'values', 'items'):
# try to reduce this to the corresponding .iter*() methods
if isinstance(base_obj, ExprNodes.CallNode):
inner_function = base_obj.function
if (inner_function.is_name and inner_function.name == 'dict'
and inner_function.entry
and inner_function.entry.is_builtin):
# e.g. dict(something).items() => safe to use .iter*()
is_safe_iter = True
keys = values = False
if method == 'iterkeys' or (is_safe_iter and method == 'keys'):
keys = True
elif method == 'itervalues' or (is_safe_iter and method == 'values'):
values = True
elif method == 'iteritems' or (is_safe_iter and method == 'items'):
keys = values = True
if keys or values:
return self._transform_dict_iteration(
node, base_obj, method, keys, values)
# enumerate/reversed ?
if iterable.self is None and function.is_name and \
function.entry and function.entry.is_builtin:
if function.name == 'enumerate':
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_enumerate_iteration(node, iterable)
elif function.name == 'reversed':
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_reversed_iteration(node, iterable)
# range() iteration?
if Options.convert_range and arg_count >= 1 and (
iterable.self is None and
function.is_name and function.name in ('range', 'xrange') and
function.entry and function.entry.is_builtin):
if node.target.type.is_int or node.target.type.is_enum:
return self._transform_range_iteration(node, iterable, reversed=reversed)
if node.target.type.is_pyobject:
# Assume that small integer ranges (C long >= 32bit) are best handled in C as well.
for arg in (iterable.arg_tuple.args if iterable.args is None else iterable.args):
if isinstance(arg, ExprNodes.IntNode):
if arg.has_constant_result() and -2**30 <= arg.constant_result < 2**30:
continue
break
else:
return self._transform_range_iteration(node, iterable, reversed=reversed)
return node
def _transform_reversed_iteration(self, node, reversed_function):
args = reversed_function.arg_tuple.args
if len(args) == 0:
error(reversed_function.pos,
"reversed() requires an iterable argument")
return node
elif len(args) > 1:
error(reversed_function.pos,
"reversed() takes exactly 1 argument")
return node
arg = args[0]
# reversed(list/tuple) ?
if arg.type in (Builtin.tuple_type, Builtin.list_type):
node.iterator.sequence = arg.as_none_safe_node("'NoneType' object is not iterable")
node.iterator.reversed = True
return node
return self._optimise_for_loop(node, arg, reversed=True)
PyBytes_AS_STRING_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_char_ptr_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
])
PyBytes_GET_SIZE_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
])
def _transform_bytes_iteration(self, node, slice_node, reversed=False):
target_type = node.target.type
if not target_type.is_int and target_type is not Builtin.bytes_type:
# bytes iteration returns bytes objects in Py2, but
# integers in Py3
return node
unpack_temp_node = UtilNodes.LetRefNode(
slice_node.as_none_safe_node("'NoneType' is not iterable"))
slice_base_node = ExprNodes.PythonCapiCallNode(
slice_node.pos, "PyBytes_AS_STRING",
self.PyBytes_AS_STRING_func_type,
args = [unpack_temp_node],
is_temp = 0,
)
len_node = ExprNodes.PythonCapiCallNode(
slice_node.pos, "PyBytes_GET_SIZE",
self.PyBytes_GET_SIZE_func_type,
args = [unpack_temp_node],
is_temp = 0,
)
return UtilNodes.LetNode(
unpack_temp_node,
self._transform_carray_iteration(
node,
ExprNodes.SliceIndexNode(
slice_node.pos,
base = slice_base_node,
start = None,
step = None,
stop = len_node,
type = slice_base_node.type,
is_temp = 1,
),
reversed = reversed))
PyUnicode_READ_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ucs4_type, [
PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_type, None),
PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None)
])
init_unicode_iteration_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_int_type, [
PyrexTypes.CFuncTypeArg("s", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("length", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_ptr_type, None),
PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_ptr_type, None)
],
exception_value = '-1')
def _transform_unicode_iteration(self, node, slice_node, reversed=False):
if slice_node.is_literal:
# try to reduce to byte iteration for plain Latin-1 strings
try:
bytes_value = bytes_literal(slice_node.value.encode('latin1'), 'iso8859-1')
except UnicodeEncodeError:
pass
else:
bytes_slice = ExprNodes.SliceIndexNode(
slice_node.pos,
base=ExprNodes.BytesNode(
slice_node.pos, value=bytes_value,
constant_result=bytes_value,
type=PyrexTypes.c_const_char_ptr_type).coerce_to(
PyrexTypes.c_const_uchar_ptr_type, self.current_env()),
start=None,
stop=ExprNodes.IntNode(
slice_node.pos, value=str(len(bytes_value)),
constant_result=len(bytes_value),
type=PyrexTypes.c_py_ssize_t_type),
type=Builtin.unicode_type, # hint for Python conversion
)
return self._transform_carray_iteration(node, bytes_slice, reversed)
unpack_temp_node = UtilNodes.LetRefNode(
slice_node.as_none_safe_node("'NoneType' is not iterable"))
start_node = ExprNodes.IntNode(
node.pos, value='0', constant_result=0, type=PyrexTypes.c_py_ssize_t_type)
length_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
end_node = length_temp.ref(node.pos)
if reversed:
relation1, relation2 = '>', '>='
start_node, end_node = end_node, start_node
else:
relation1, relation2 = '<=', '<'
kind_temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
data_temp = UtilNodes.TempHandle(PyrexTypes.c_void_ptr_type)
counter_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
target_value = ExprNodes.PythonCapiCallNode(
slice_node.pos, "__Pyx_PyUnicode_READ",
self.PyUnicode_READ_func_type,
args = [kind_temp.ref(slice_node.pos),
data_temp.ref(slice_node.pos),
counter_temp.ref(node.target.pos)],
is_temp = False,
)
if target_value.type != node.target.type:
target_value = target_value.coerce_to(node.target.type,
self.current_env())
target_assign = Nodes.SingleAssignmentNode(
pos = node.target.pos,
lhs = node.target,
rhs = target_value)
body = Nodes.StatListNode(
node.pos,
stats = [target_assign, node.body])
loop_node = Nodes.ForFromStatNode(
node.pos,
bound1=start_node, relation1=relation1,
target=counter_temp.ref(node.target.pos),
relation2=relation2, bound2=end_node,
step=None, body=body,
else_clause=node.else_clause,
from_range=True)
setup_node = Nodes.ExprStatNode(
node.pos,
expr = ExprNodes.PythonCapiCallNode(
slice_node.pos, "__Pyx_init_unicode_iteration",
self.init_unicode_iteration_func_type,
args = [unpack_temp_node,
ExprNodes.AmpersandNode(slice_node.pos, operand=length_temp.ref(slice_node.pos),
type=PyrexTypes.c_py_ssize_t_ptr_type),
ExprNodes.AmpersandNode(slice_node.pos, operand=data_temp.ref(slice_node.pos),
type=PyrexTypes.c_void_ptr_ptr_type),
ExprNodes.AmpersandNode(slice_node.pos, operand=kind_temp.ref(slice_node.pos),
type=PyrexTypes.c_int_ptr_type),
],
is_temp = True,
result_is_used = False,
utility_code=UtilityCode.load_cached("unicode_iter", "Optimize.c"),
))
return UtilNodes.LetNode(
unpack_temp_node,
UtilNodes.TempsBlockNode(
node.pos, temps=[counter_temp, length_temp, data_temp, kind_temp],
body=Nodes.StatListNode(node.pos, stats=[setup_node, loop_node])))
def _transform_carray_iteration(self, node, slice_node, reversed=False):
neg_step = False
if isinstance(slice_node, ExprNodes.SliceIndexNode):
slice_base = slice_node.base
start = filter_none_node(slice_node.start)
stop = filter_none_node(slice_node.stop)
step = None
if not stop:
if not slice_base.type.is_pyobject:
error(slice_node.pos, "C array iteration requires known end index")
return node
elif slice_node.is_subscript:
assert isinstance(slice_node.index, ExprNodes.SliceNode)
slice_base = slice_node.base
index = slice_node.index
start = filter_none_node(index.start)
stop = filter_none_node(index.stop)
step = filter_none_node(index.step)
if step:
if not isinstance(step.constant_result, _py_int_types) \
or step.constant_result == 0 \
or step.constant_result > 0 and not stop \
or step.constant_result < 0 and not start:
if not slice_base.type.is_pyobject:
error(step.pos, "C array iteration requires known step size and end index")
return node
else:
# step sign is handled internally by ForFromStatNode
step_value = step.constant_result
if reversed:
step_value = -step_value
neg_step = step_value < 0
step = ExprNodes.IntNode(step.pos, type=PyrexTypes.c_py_ssize_t_type,
value=str(abs(step_value)),
constant_result=abs(step_value))
elif slice_node.type.is_array:
if slice_node.type.size is None:
error(slice_node.pos, "C array iteration requires known end index")
return node
slice_base = slice_node
start = None
stop = ExprNodes.IntNode(
slice_node.pos, value=str(slice_node.type.size),
type=PyrexTypes.c_py_ssize_t_type, constant_result=slice_node.type.size)
step = None
else:
if not slice_node.type.is_pyobject:
error(slice_node.pos, "C array iteration requires known end index")
return node
if start:
start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop:
stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop is None:
if neg_step:
stop = ExprNodes.IntNode(
slice_node.pos, value='-1', type=PyrexTypes.c_py_ssize_t_type, constant_result=-1)
else:
error(slice_node.pos, "C array iteration requires known step size and end index")
return node
if reversed:
if not start:
start = ExprNodes.IntNode(slice_node.pos, value="0", constant_result=0,
type=PyrexTypes.c_py_ssize_t_type)
# if step was provided, it was already negated above
start, stop = stop, start
ptr_type = slice_base.type
if ptr_type.is_array:
ptr_type = ptr_type.element_ptr_type()
carray_ptr = slice_base.coerce_to_simple(self.current_env())
if start and start.constant_result != 0:
start_ptr_node = ExprNodes.AddNode(
start.pos,
operand1=carray_ptr,
operator='+',
operand2=start,
type=ptr_type)
else:
start_ptr_node = carray_ptr
if stop and stop.constant_result != 0:
stop_ptr_node = ExprNodes.AddNode(
stop.pos,
operand1=ExprNodes.CloneNode(carray_ptr),
operator='+',
operand2=stop,
type=ptr_type
).coerce_to_simple(self.current_env())
else:
stop_ptr_node = ExprNodes.CloneNode(carray_ptr)
counter = UtilNodes.TempHandle(ptr_type)
counter_temp = counter.ref(node.target.pos)
if slice_base.type.is_string and node.target.type.is_pyobject:
# special case: char* -> bytes/unicode
if slice_node.type is Builtin.unicode_type:
target_value = ExprNodes.CastNode(
ExprNodes.DereferenceNode(
node.target.pos, operand=counter_temp,
type=ptr_type.base_type),
PyrexTypes.c_py_ucs4_type).coerce_to(
node.target.type, self.current_env())
else:
# char* -> bytes coercion requires slicing, not indexing
target_value = ExprNodes.SliceIndexNode(
node.target.pos,
start=ExprNodes.IntNode(node.target.pos, value='0',
constant_result=0,
type=PyrexTypes.c_int_type),
stop=ExprNodes.IntNode(node.target.pos, value='1',
constant_result=1,
type=PyrexTypes.c_int_type),
base=counter_temp,
type=Builtin.bytes_type,
is_temp=1)
elif node.target.type.is_ptr and not node.target.type.assignable_from(ptr_type.base_type):
# Allow iteration with pointer target to avoid copy.
target_value = counter_temp
else:
# TODO: can this safely be replaced with DereferenceNode() as above?
target_value = ExprNodes.IndexNode(
node.target.pos,
index=ExprNodes.IntNode(node.target.pos, value='0',
constant_result=0,
type=PyrexTypes.c_int_type),
base=counter_temp,
type=ptr_type.base_type)
if target_value.type != node.target.type:
target_value = target_value.coerce_to(node.target.type,
self.current_env())
target_assign = Nodes.SingleAssignmentNode(
pos = node.target.pos,
lhs = node.target,
rhs = target_value)
body = Nodes.StatListNode(
node.pos,
stats = [target_assign, node.body])
relation1, relation2 = self._find_for_from_node_relations(neg_step, reversed)
for_node = Nodes.ForFromStatNode(
node.pos,
bound1=start_ptr_node, relation1=relation1,
target=counter_temp,
relation2=relation2, bound2=stop_ptr_node,
step=step, body=body,
else_clause=node.else_clause,
from_range=True)
return UtilNodes.TempsBlockNode(
node.pos, temps=[counter],
body=for_node)
def _transform_enumerate_iteration(self, node, enumerate_function):
args = enumerate_function.arg_tuple.args
if len(args) == 0:
error(enumerate_function.pos,
"enumerate() requires an iterable argument")
return node
elif len(args) > 2:
error(enumerate_function.pos,
"enumerate() takes at most 2 arguments")
return node
if not node.target.is_sequence_constructor:
# leave this untouched for now
return node
targets = node.target.args
if len(targets) != 2:
# leave this untouched for now
return node
enumerate_target, iterable_target = targets
counter_type = enumerate_target.type
if not counter_type.is_pyobject and not counter_type.is_int:
# nothing we can do here, I guess
return node
if len(args) == 2:
start = unwrap_coerced_node(args[1]).coerce_to(counter_type, self.current_env())
else:
start = ExprNodes.IntNode(enumerate_function.pos,
value='0',
type=counter_type,
constant_result=0)
temp = UtilNodes.LetRefNode(start)
inc_expression = ExprNodes.AddNode(
enumerate_function.pos,
operand1 = temp,
operand2 = ExprNodes.IntNode(node.pos, value='1',
type=counter_type,
constant_result=1),
operator = '+',
type = counter_type,
#inplace = True, # not worth using in-place operation for Py ints
is_temp = counter_type.is_pyobject
)
loop_body = [
Nodes.SingleAssignmentNode(
pos = enumerate_target.pos,
lhs = enumerate_target,
rhs = temp),
Nodes.SingleAssignmentNode(
pos = enumerate_target.pos,
lhs = temp,
rhs = inc_expression)
]
if isinstance(node.body, Nodes.StatListNode):
node.body.stats = loop_body + node.body.stats
else:
loop_body.append(node.body)
node.body = Nodes.StatListNode(
node.body.pos,
stats = loop_body)
node.target = iterable_target
node.item = node.item.coerce_to(iterable_target.type, self.current_env())
node.iterator.sequence = args[0]
# recurse into loop to check for further optimisations
return UtilNodes.LetNode(temp, self._optimise_for_loop(node, node.iterator.sequence))
def _find_for_from_node_relations(self, neg_step_value, reversed):
if reversed:
if neg_step_value:
return '<', '<='
else:
return '>', '>='
else:
if neg_step_value:
return '>=', '>'
else:
return '<=', '<'
def _transform_range_iteration(self, node, range_function, reversed=False):
args = range_function.arg_tuple.args
if len(args) < 3:
step_pos = range_function.pos
step_value = 1
step = ExprNodes.IntNode(step_pos, value='1', constant_result=1)
else:
step = args[2]
step_pos = step.pos
if not isinstance(step.constant_result, _py_int_types):
# cannot determine step direction
return node
step_value = step.constant_result
if step_value == 0:
# will lead to an error elsewhere
return node
step = ExprNodes.IntNode(step_pos, value=str(step_value),
constant_result=step_value)
if len(args) == 1:
bound1 = ExprNodes.IntNode(range_function.pos, value='0',
constant_result=0)
bound2 = args[0].coerce_to_integer(self.current_env())
else:
bound1 = args[0].coerce_to_integer(self.current_env())
bound2 = args[1].coerce_to_integer(self.current_env())
relation1, relation2 = self._find_for_from_node_relations(step_value < 0, reversed)
bound2_ref_node = None
if reversed:
bound1, bound2 = bound2, bound1
abs_step = abs(step_value)
if abs_step != 1:
if (isinstance(bound1.constant_result, _py_int_types) and
isinstance(bound2.constant_result, _py_int_types)):
# calculate final bounds now
if step_value < 0:
begin_value = bound2.constant_result
end_value = bound1.constant_result
bound1_value = begin_value - abs_step * ((begin_value - end_value - 1) // abs_step) - 1
else:
begin_value = bound1.constant_result
end_value = bound2.constant_result
bound1_value = end_value + abs_step * ((begin_value - end_value - 1) // abs_step) + 1
bound1 = ExprNodes.IntNode(
bound1.pos, value=str(bound1_value), constant_result=bound1_value,
type=PyrexTypes.spanning_type(bound1.type, bound2.type))
else:
# evaluate the same expression as above at runtime
bound2_ref_node = UtilNodes.LetRefNode(bound2)
bound1 = self._build_range_step_calculation(
bound1, bound2_ref_node, step, step_value)
if step_value < 0:
step_value = -step_value
step.value = str(step_value)
step.constant_result = step_value
step = step.coerce_to_integer(self.current_env())
if not bound2.is_literal:
# stop bound must be immutable => keep it in a temp var
bound2_is_temp = True
bound2 = bound2_ref_node or UtilNodes.LetRefNode(bound2)
else:
bound2_is_temp = False
for_node = Nodes.ForFromStatNode(
node.pos,
target=node.target,
bound1=bound1, relation1=relation1,
relation2=relation2, bound2=bound2,
step=step, body=node.body,
else_clause=node.else_clause,
from_range=True)
for_node.set_up_loop(self.current_env())
if bound2_is_temp:
for_node = UtilNodes.LetNode(bound2, for_node)
return for_node
def _build_range_step_calculation(self, bound1, bound2_ref_node, step, step_value):
abs_step = abs(step_value)
spanning_type = PyrexTypes.spanning_type(bound1.type, bound2_ref_node.type)
if step.type.is_int and abs_step < 0x7FFF:
# Avoid loss of integer precision warnings.
spanning_step_type = PyrexTypes.spanning_type(spanning_type, PyrexTypes.c_int_type)
else:
spanning_step_type = PyrexTypes.spanning_type(spanning_type, step.type)
if step_value < 0:
begin_value = bound2_ref_node
end_value = bound1
final_op = '-'
else:
begin_value = bound1
end_value = bound2_ref_node
final_op = '+'
step_calculation_node = ExprNodes.binop_node(
bound1.pos,
operand1=ExprNodes.binop_node(
bound1.pos,
operand1=bound2_ref_node,
operator=final_op, # +/-
operand2=ExprNodes.MulNode(
bound1.pos,
operand1=ExprNodes.IntNode(
bound1.pos,
value=str(abs_step),
constant_result=abs_step,
type=spanning_step_type),
operator='*',
operand2=ExprNodes.DivNode(
bound1.pos,
operand1=ExprNodes.SubNode(
bound1.pos,
operand1=ExprNodes.SubNode(
bound1.pos,
operand1=begin_value,
operator='-',
operand2=end_value,
type=spanning_type),
operator='-',
operand2=ExprNodes.IntNode(
bound1.pos,
value='1',
constant_result=1),
type=spanning_step_type),
operator='//',
operand2=ExprNodes.IntNode(
bound1.pos,
value=str(abs_step),
constant_result=abs_step,
type=spanning_step_type),
type=spanning_step_type),
type=spanning_step_type),
type=spanning_step_type),
operator=final_op, # +/-
operand2=ExprNodes.IntNode(
bound1.pos,
value='1',
constant_result=1),
type=spanning_type)
return step_calculation_node
def _transform_dict_iteration(self, node, dict_obj, method, keys, values):
temps = []
temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
temps.append(temp)
dict_temp = temp.ref(dict_obj.pos)
temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(temp)
pos_temp = temp.ref(node.pos)
key_target = value_target = tuple_target = None
if keys and values:
if node.target.is_sequence_constructor:
if len(node.target.args) == 2:
key_target, value_target = node.target.args
else:
# unusual case that may or may not lead to an error
return node
else:
tuple_target = node.target
elif keys:
key_target = node.target
else:
value_target = node.target
if isinstance(node.body, Nodes.StatListNode):
body = node.body
else:
body = Nodes.StatListNode(pos = node.body.pos,
stats = [node.body])
# keep original length to guard against dict modification
dict_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(dict_len_temp)
dict_len_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=dict_len_temp.ref(dict_obj.pos),
type=PyrexTypes.c_ptr_type(dict_len_temp.type))
temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
temps.append(temp)
is_dict_temp = temp.ref(node.pos)
is_dict_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=is_dict_temp,
type=PyrexTypes.c_ptr_type(temp.type))
iter_next_node = Nodes.DictIterationNextNode(
dict_temp, dict_len_temp.ref(dict_obj.pos), pos_temp,
key_target, value_target, tuple_target,
is_dict_temp)
iter_next_node = iter_next_node.analyse_expressions(self.current_env())
body.stats[0:0] = [iter_next_node]
if method:
method_node = ExprNodes.StringNode(
dict_obj.pos, is_identifier=True, value=method)
dict_obj = dict_obj.as_none_safe_node(
"'NoneType' object has no attribute '%{0}s'".format('.30' if len(method) <= 30 else ''),
error = "PyExc_AttributeError",
format_args = [method])
else:
method_node = ExprNodes.NullNode(dict_obj.pos)
dict_obj = dict_obj.as_none_safe_node("'NoneType' object is not iterable")
def flag_node(value):
value = value and 1 or 0
return ExprNodes.IntNode(node.pos, value=str(value), constant_result=value)
result_code = [
Nodes.SingleAssignmentNode(
node.pos,
lhs = pos_temp,
rhs = ExprNodes.IntNode(node.pos, value='0',
constant_result=0)),
Nodes.SingleAssignmentNode(
dict_obj.pos,
lhs = dict_temp,
rhs = ExprNodes.PythonCapiCallNode(
dict_obj.pos,
"__Pyx_dict_iterator",
self.PyDict_Iterator_func_type,
utility_code = UtilityCode.load_cached("dict_iter", "Optimize.c"),
args = [dict_obj, flag_node(dict_obj.type is Builtin.dict_type),
method_node, dict_len_temp_addr, is_dict_temp_addr,
],
is_temp=True,
)),
Nodes.WhileStatNode(
node.pos,
condition = None,
body = body,
else_clause = node.else_clause
)
]
return UtilNodes.TempsBlockNode(
node.pos, temps=temps,
body=Nodes.StatListNode(
node.pos,
stats = result_code
))
PyDict_Iterator_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_dict", PyrexTypes.c_int_type, None),
PyrexTypes.CFuncTypeArg("method_name", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("p_is_dict", PyrexTypes.c_int_ptr_type, None),
])
PySet_Iterator_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("set", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_set", PyrexTypes.c_int_type, None),
PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("p_is_set", PyrexTypes.c_int_ptr_type, None),
])
def _transform_set_iteration(self, node, set_obj):
temps = []
temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
temps.append(temp)
set_temp = temp.ref(set_obj.pos)
temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(temp)
pos_temp = temp.ref(node.pos)
if isinstance(node.body, Nodes.StatListNode):
body = node.body
else:
body = Nodes.StatListNode(pos = node.body.pos,
stats = [node.body])
# keep original length to guard against set modification
set_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(set_len_temp)
set_len_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=set_len_temp.ref(set_obj.pos),
type=PyrexTypes.c_ptr_type(set_len_temp.type))
temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
temps.append(temp)
is_set_temp = temp.ref(node.pos)
is_set_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=is_set_temp,
type=PyrexTypes.c_ptr_type(temp.type))
value_target = node.target
iter_next_node = Nodes.SetIterationNextNode(
set_temp, set_len_temp.ref(set_obj.pos), pos_temp, value_target, is_set_temp)
iter_next_node = iter_next_node.analyse_expressions(self.current_env())
body.stats[0:0] = [iter_next_node]
def flag_node(value):
value = value and 1 or 0
return ExprNodes.IntNode(node.pos, value=str(value), constant_result=value)
result_code = [
Nodes.SingleAssignmentNode(
node.pos,
lhs=pos_temp,
rhs=ExprNodes.IntNode(node.pos, value='0', constant_result=0)),
Nodes.SingleAssignmentNode(
set_obj.pos,
lhs=set_temp,
rhs=ExprNodes.PythonCapiCallNode(
set_obj.pos,
"__Pyx_set_iterator",
self.PySet_Iterator_func_type,
utility_code=UtilityCode.load_cached("set_iter", "Optimize.c"),
args=[set_obj, flag_node(set_obj.type is Builtin.set_type),
set_len_temp_addr, is_set_temp_addr,
],
is_temp=True,
)),
Nodes.WhileStatNode(
node.pos,
condition=None,
body=body,
else_clause=node.else_clause,
)
]
return UtilNodes.TempsBlockNode(
node.pos, temps=temps,
body=Nodes.StatListNode(
node.pos,
stats = result_code
))
class SwitchTransform(Visitor.EnvTransform):
"""
This transformation tries to turn long if statements into C switch statements.
The requirement is that every clause be an (or of) var == value, where the var
is common among all clauses and both var and value are ints.
"""
NO_MATCH = (None, None, None)
def extract_conditions(self, cond, allow_not_in):
while True:
if isinstance(cond, (ExprNodes.CoerceToTempNode,
ExprNodes.CoerceToBooleanNode)):
cond = cond.arg
elif isinstance(cond, ExprNodes.BoolBinopResultNode):
cond = cond.arg.arg
elif isinstance(cond, UtilNodes.EvalWithTempExprNode):
# this is what we get from the FlattenInListTransform
cond = cond.subexpression
elif isinstance(cond, ExprNodes.TypecastNode):
cond = cond.operand
else:
break
if isinstance(cond, ExprNodes.PrimaryCmpNode):
if cond.cascade is not None:
return self.NO_MATCH
elif cond.is_c_string_contains() and \
isinstance(cond.operand2, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)):
not_in = cond.operator == 'not_in'
if not_in and not allow_not_in:
return self.NO_MATCH
if isinstance(cond.operand2, ExprNodes.UnicodeNode) and \
cond.operand2.contains_surrogates():
# dealing with surrogates leads to different
# behaviour on wide and narrow Unicode
# platforms => refuse to optimise this case
return self.NO_MATCH
return not_in, cond.operand1, self.extract_in_string_conditions(cond.operand2)
elif not cond.is_python_comparison():
if cond.operator == '==':
not_in = False
elif allow_not_in and cond.operator == '!=':
not_in = True
else:
return self.NO_MATCH
# this looks somewhat silly, but it does the right
# checks for NameNode and AttributeNode
if is_common_value(cond.operand1, cond.operand1):
if cond.operand2.is_literal:
return not_in, cond.operand1, [cond.operand2]
elif getattr(cond.operand2, 'entry', None) \
and cond.operand2.entry.is_const:
return not_in, cond.operand1, [cond.operand2]
if is_common_value(cond.operand2, cond.operand2):
if cond.operand1.is_literal:
return not_in, cond.operand2, [cond.operand1]
elif getattr(cond.operand1, 'entry', None) \
and cond.operand1.entry.is_const:
return not_in, cond.operand2, [cond.operand1]
elif isinstance(cond, ExprNodes.BoolBinopNode):
if cond.operator == 'or' or (allow_not_in and cond.operator == 'and'):
allow_not_in = (cond.operator == 'and')
not_in_1, t1, c1 = self.extract_conditions(cond.operand1, allow_not_in)
not_in_2, t2, c2 = self.extract_conditions(cond.operand2, allow_not_in)
if t1 is not None and not_in_1 == not_in_2 and is_common_value(t1, t2):
if (not not_in_1) or allow_not_in:
return not_in_1, t1, c1+c2
return self.NO_MATCH
def extract_in_string_conditions(self, string_literal):
if isinstance(string_literal, ExprNodes.UnicodeNode):
charvals = list(map(ord, set(string_literal.value)))
charvals.sort()
return [ ExprNodes.IntNode(string_literal.pos, value=str(charval),
constant_result=charval)
for charval in charvals ]
else:
# this is a bit tricky as Py3's bytes type returns
# integers on iteration, whereas Py2 returns 1-char byte
# strings
characters = string_literal.value
characters = list(set([ characters[i:i+1] for i in range(len(characters)) ]))
characters.sort()
return [ ExprNodes.CharNode(string_literal.pos, value=charval,
constant_result=charval)
for charval in characters ]
def extract_common_conditions(self, common_var, condition, allow_not_in):
not_in, var, conditions = self.extract_conditions(condition, allow_not_in)
if var is None:
return self.NO_MATCH
elif common_var is not None and not is_common_value(var, common_var):
return self.NO_MATCH
elif not (var.type.is_int or var.type.is_enum) or sum([not (cond.type.is_int or cond.type.is_enum) for cond in conditions]):
return self.NO_MATCH
return not_in, var, conditions
def has_duplicate_values(self, condition_values):
# duplicated values don't work in a switch statement
seen = set()
for value in condition_values:
if value.has_constant_result():
if value.constant_result in seen:
return True
seen.add(value.constant_result)
else:
# this isn't completely safe as we don't know the
# final C value, but this is about the best we can do
try:
if value.entry.cname in seen:
return True
except AttributeError:
return True # play safe
seen.add(value.entry.cname)
return False
def visit_IfStatNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
common_var = None
cases = []
for if_clause in node.if_clauses:
_, common_var, conditions = self.extract_common_conditions(
common_var, if_clause.condition, False)
if common_var is None:
self.visitchildren(node)
return node
cases.append(Nodes.SwitchCaseNode(pos=if_clause.pos,
conditions=conditions,
body=if_clause.body))
condition_values = [
cond for case in cases for cond in case.conditions]
if len(condition_values) < 2:
self.visitchildren(node)
return node
if self.has_duplicate_values(condition_values):
self.visitchildren(node)
return node
# Recurse into body subtrees that we left untouched so far.
self.visitchildren(node, 'else_clause')
for case in cases:
self.visitchildren(case, 'body')
common_var = unwrap_node(common_var)
switch_node = Nodes.SwitchStatNode(pos=node.pos,
test=common_var,
cases=cases,
else_clause=node.else_clause)
return switch_node
def visit_CondExprNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
not_in, common_var, conditions = self.extract_common_conditions(
None, node.test, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
node.true_val, node.false_val)
def visit_BoolBinopNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
not_in, common_var, conditions = self.extract_common_conditions(
None, node, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
node.wrap_operands(self.current_env()) # in case we changed the operands
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
ExprNodes.BoolNode(node.pos, value=True, constant_result=True),
ExprNodes.BoolNode(node.pos, value=False, constant_result=False))
def visit_PrimaryCmpNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
not_in, common_var, conditions = self.extract_common_conditions(
None, node, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
ExprNodes.BoolNode(node.pos, value=True, constant_result=True),
ExprNodes.BoolNode(node.pos, value=False, constant_result=False))
def build_simple_switch_statement(self, node, common_var, conditions,
not_in, true_val, false_val):
result_ref = UtilNodes.ResultRefNode(node)
true_body = Nodes.SingleAssignmentNode(
node.pos,
lhs=result_ref,
rhs=true_val.coerce_to(node.type, self.current_env()),
first=True)
false_body = Nodes.SingleAssignmentNode(
node.pos,
lhs=result_ref,
rhs=false_val.coerce_to(node.type, self.current_env()),
first=True)
if not_in:
true_body, false_body = false_body, true_body
cases = [Nodes.SwitchCaseNode(pos = node.pos,
conditions = conditions,
body = true_body)]
common_var = unwrap_node(common_var)
switch_node = Nodes.SwitchStatNode(pos = node.pos,
test = common_var,
cases = cases,
else_clause = false_body)
replacement = UtilNodes.TempResultFromStatNode(result_ref, switch_node)
return replacement
def visit_EvalWithTempExprNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
# drop unused expression temp from FlattenInListTransform
orig_expr = node.subexpression
temp_ref = node.lazy_temp
self.visitchildren(node)
if node.subexpression is not orig_expr:
# node was restructured => check if temp is still used
if not Visitor.tree_contains(node.subexpression, temp_ref):
return node.subexpression
return node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class FlattenInListTransform(Visitor.VisitorTransform, SkipDeclarations):
"""
This transformation flattens "x in [val1, ..., valn]" into a sequential list
of comparisons.
"""
def visit_PrimaryCmpNode(self, node):
self.visitchildren(node)
if node.cascade is not None:
return node
elif node.operator == 'in':
conjunction = 'or'
eq_or_neq = '=='
elif node.operator == 'not_in':
conjunction = 'and'
eq_or_neq = '!='
else:
return node
if not isinstance(node.operand2, (ExprNodes.TupleNode,
ExprNodes.ListNode,
ExprNodes.SetNode)):
return node
args = node.operand2.args
if len(args) == 0:
# note: lhs may have side effects
return node
lhs = UtilNodes.ResultRefNode(node.operand1)
conds = []
temps = []
for arg in args:
try:
# Trial optimisation to avoid redundant temp
# assignments. However, since is_simple() is meant to
# be called after type analysis, we ignore any errors
# and just play safe in that case.
is_simple_arg = arg.is_simple()
except Exception:
is_simple_arg = False
if not is_simple_arg:
# must evaluate all non-simple RHS before doing the comparisons
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
cond = ExprNodes.PrimaryCmpNode(
pos = node.pos,
operand1 = lhs,
operator = eq_or_neq,
operand2 = arg,
cascade = None)
conds.append(ExprNodes.TypecastNode(
pos = node.pos,
operand = cond,
type = PyrexTypes.c_bint_type))
def concat(left, right):
return ExprNodes.BoolBinopNode(
pos = node.pos,
operator = conjunction,
operand1 = left,
operand2 = right)
condition = reduce(concat, conds)
new_node = UtilNodes.EvalWithTempExprNode(lhs, condition)
for temp in temps[::-1]:
new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
return new_node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class DropRefcountingTransform(Visitor.VisitorTransform):
"""Drop ref-counting in safe places.
"""
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_ParallelAssignmentNode(self, node):
"""
Parallel swap assignments like 'a,b = b,a' are safe.
"""
left_names, right_names = [], []
left_indices, right_indices = [], []
temps = []
for stat in node.stats:
if isinstance(stat, Nodes.SingleAssignmentNode):
if not self._extract_operand(stat.lhs, left_names,
left_indices, temps):
return node
if not self._extract_operand(stat.rhs, right_names,
right_indices, temps):
return node
elif isinstance(stat, Nodes.CascadedAssignmentNode):
# FIXME
return node
else:
return node
if left_names or right_names:
# lhs/rhs names must be a non-redundant permutation
lnames = [ path for path, n in left_names ]
rnames = [ path for path, n in right_names ]
if set(lnames) != set(rnames):
return node
if len(set(lnames)) != len(right_names):
return node
if left_indices or right_indices:
# base name and index of index nodes must be a
# non-redundant permutation
lindices = []
for lhs_node in left_indices:
index_id = self._extract_index_id(lhs_node)
if not index_id:
return node
lindices.append(index_id)
rindices = []
for rhs_node in right_indices:
index_id = self._extract_index_id(rhs_node)
if not index_id:
return node
rindices.append(index_id)
if set(lindices) != set(rindices):
return node
if len(set(lindices)) != len(right_indices):
return node
# really supporting IndexNode requires support in
# __Pyx_GetItemInt(), so let's stop short for now
return node
temp_args = [t.arg for t in temps]
for temp in temps:
temp.use_managed_ref = False
for _, name_node in left_names + right_names:
if name_node not in temp_args:
name_node.use_managed_ref = False
for index_node in left_indices + right_indices:
index_node.use_managed_ref = False
return node
def _extract_operand(self, node, names, indices, temps):
node = unwrap_node(node)
if not node.type.is_pyobject:
return False
if isinstance(node, ExprNodes.CoerceToTempNode):
temps.append(node)
node = node.arg
name_path = []
obj_node = node
while obj_node.is_attribute:
if obj_node.is_py_attr:
return False
name_path.append(obj_node.member)
obj_node = obj_node.obj
if obj_node.is_name:
name_path.append(obj_node.name)
names.append( ('.'.join(name_path[::-1]), node) )
elif node.is_subscript:
if node.base.type != Builtin.list_type:
return False
if not node.index.type.is_int:
return False
if not node.base.is_name:
return False
indices.append(node)
else:
return False
return True
def _extract_index_id(self, index_node):
base = index_node.base
index = index_node.index
if isinstance(index, ExprNodes.NameNode):
index_val = index.name
elif isinstance(index, ExprNodes.ConstNode):
# FIXME:
return None
else:
return None
return (base.name, index_val)
class EarlyReplaceBuiltinCalls(Visitor.EnvTransform):
"""Optimize some common calls to builtin types *before* the type
analysis phase and *after* the declarations analysis phase.
This transform cannot make use of any argument types, but it can
restructure the tree in a way that the type analysis phase can
respond to.
Introducing C function calls here may not be a good idea. Move
them to the OptimizeBuiltinCalls transform instead, which runs
after type analysis.
"""
# only intercept on call nodes
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
function = node.function
if not self._function_is_builtin_name(function):
return node
return self._dispatch_to_handler(node, function, node.args)
def visit_GeneralCallNode(self, node):
self.visitchildren(node)
function = node.function
if not self._function_is_builtin_name(function):
return node
arg_tuple = node.positional_args
if not isinstance(arg_tuple, ExprNodes.TupleNode):
return node
args = arg_tuple.args
return self._dispatch_to_handler(
node, function, args, node.keyword_args)
def _function_is_builtin_name(self, function):
if not function.is_name:
return False
env = self.current_env()
entry = env.lookup(function.name)
if entry is not env.builtin_scope().lookup_here(function.name):
return False
# if entry is None, it's at least an undeclared name, so likely builtin
return True
def _dispatch_to_handler(self, node, function, args, kwargs=None):
if kwargs is None:
handler_name = '_handle_simple_function_%s' % function.name
else:
handler_name = '_handle_general_function_%s' % function.name
handle_call = getattr(self, handler_name, None)
if handle_call is not None:
if kwargs is None:
return handle_call(node, args)
else:
return handle_call(node, args, kwargs)
return node
def _inject_capi_function(self, node, cname, func_type, utility_code=None):
node.function = ExprNodes.PythonCapiFunctionNode(
node.function.pos, node.function.name, cname, func_type,
utility_code = utility_code)
def _error_wrong_arg_count(self, function_name, node, args, expected=None):
if not expected: # None or 0
arg_str = ''
elif isinstance(expected, basestring) or expected > 1:
arg_str = '...'
elif expected == 1:
arg_str = 'x'
else:
arg_str = ''
if expected is not None:
expected_str = 'expected %s, ' % expected
else:
expected_str = ''
error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
function_name, arg_str, expected_str, len(args)))
# specific handlers for simple call nodes
def _handle_simple_function_float(self, node, pos_args):
if not pos_args:
return ExprNodes.FloatNode(node.pos, value='0.0')
if len(pos_args) > 1:
self._error_wrong_arg_count('float', node, pos_args, 1)
arg_type = getattr(pos_args[0], 'type', None)
if arg_type in (PyrexTypes.c_double_type, Builtin.float_type):
return pos_args[0]
return node
def _handle_simple_function_slice(self, node, pos_args):
arg_count = len(pos_args)
start = step = None
if arg_count == 1:
stop, = pos_args
elif arg_count == 2:
start, stop = pos_args
elif arg_count == 3:
start, stop, step = pos_args
else:
self._error_wrong_arg_count('slice', node, pos_args)
return node
return ExprNodes.SliceNode(
node.pos,
start=start or ExprNodes.NoneNode(node.pos),
stop=stop,
step=step or ExprNodes.NoneNode(node.pos))
def _handle_simple_function_ord(self, node, pos_args):
"""Unpack ord('X').
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)):
if len(arg.value) == 1:
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_long_type,
value=str(ord(arg.value)),
constant_result=ord(arg.value)
)
elif isinstance(arg, ExprNodes.StringNode):
if arg.unicode_value and len(arg.unicode_value) == 1 \
and ord(arg.unicode_value) <= 255: # Py2/3 portability
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_int_type,
value=str(ord(arg.unicode_value)),
constant_result=ord(arg.unicode_value)
)
return node
# sequence processing
def _handle_simple_function_all(self, node, pos_args):
"""Transform
_result = all(p(x) for L in LL for x in L)
into
for L in LL:
for x in L:
if not p(x):
return False
else:
return True
"""
return self._transform_any_all(node, pos_args, False)
def _handle_simple_function_any(self, node, pos_args):
"""Transform
_result = any(p(x) for L in LL for x in L)
into
for L in LL:
for x in L:
if p(x):
return True
else:
return False
"""
return self._transform_any_all(node, pos_args, True)
def _transform_any_all(self, node, pos_args, is_any):
if len(pos_args) != 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
generator_body = gen_expr_node.def_node.gbody
loop_node = generator_body.body
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
if yield_expression is None:
return node
if is_any:
condition = yield_expression
else:
condition = ExprNodes.NotNode(yield_expression.pos, operand=yield_expression)
test_node = Nodes.IfStatNode(
yield_expression.pos, else_clause=None, if_clauses=[
Nodes.IfClauseNode(
yield_expression.pos,
condition=condition,
body=Nodes.ReturnStatNode(
node.pos,
value=ExprNodes.BoolNode(yield_expression.pos, value=is_any, constant_result=is_any))
)]
)
loop_node.else_clause = Nodes.ReturnStatNode(
node.pos,
value=ExprNodes.BoolNode(yield_expression.pos, value=not is_any, constant_result=not is_any))
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, test_node)
return ExprNodes.InlinedGeneratorExpressionNode(
gen_expr_node.pos, gen=gen_expr_node, orig_func='any' if is_any else 'all')
PySequence_List_func_type = PyrexTypes.CFuncType(
Builtin.list_type,
[PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
def _handle_simple_function_sorted(self, node, pos_args):
"""Transform sorted(genexpr) and sorted([listcomp]) into
[listcomp].sort(). CPython just reads the iterable into a
list and calls .sort() on it. Expanding the iterable in a
listcomp is still faster and the result can be sorted in
place.
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.ComprehensionNode) and arg.type is Builtin.list_type:
list_node = pos_args[0]
loop_node = list_node.loop
elif isinstance(arg, ExprNodes.GeneratorExpressionNode):
gen_expr_node = arg
loop_node = gen_expr_node.loop
yield_statements = _find_yield_statements(loop_node)
if not yield_statements:
return node
list_node = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node, orig_func='sorted',
comprehension_type=Builtin.list_type)
for yield_expression, yield_stat_node in yield_statements:
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr=yield_expression,
target=list_node.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
elif arg.is_sequence_constructor:
# sorted([a, b, c]) or sorted((a, b, c)). The result is always a list,
# so starting off with a fresh one is more efficient.
list_node = loop_node = arg.as_list()
else:
# Interestingly, PySequence_List works on a lot of non-sequence
# things as well.
list_node = loop_node = ExprNodes.PythonCapiCallNode(
node.pos, "PySequence_List", self.PySequence_List_func_type,
args=pos_args, is_temp=True)
result_node = UtilNodes.ResultRefNode(
pos=loop_node.pos, type=Builtin.list_type, may_hold_none=False)
list_assign_node = Nodes.SingleAssignmentNode(
node.pos, lhs=result_node, rhs=list_node, first=True)
sort_method = ExprNodes.AttributeNode(
node.pos, obj=result_node, attribute=EncodedString('sort'),
# entry ? type ?
needs_none_check=False)
sort_node = Nodes.ExprStatNode(
node.pos, expr=ExprNodes.SimpleCallNode(
node.pos, function=sort_method, args=[]))
sort_node.analyse_declarations(self.current_env())
return UtilNodes.TempResultFromStatNode(
result_node,
Nodes.StatListNode(node.pos, stats=[list_assign_node, sort_node]))
def __handle_simple_function_sum(self, node, pos_args):
"""Transform sum(genexpr) into an equivalent inlined aggregation loop.
"""
if len(pos_args) not in (1,2):
return node
if not isinstance(pos_args[0], (ExprNodes.GeneratorExpressionNode,
ExprNodes.ComprehensionNode)):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
if isinstance(gen_expr_node, ExprNodes.GeneratorExpressionNode):
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
# FIXME: currently nonfunctional
yield_expression = None
if yield_expression is None:
return node
else: # ComprehensionNode
yield_stat_node = gen_expr_node.append
yield_expression = yield_stat_node.expr
try:
if not yield_expression.is_literal or not yield_expression.type.is_int:
return node
except AttributeError:
return node # in case we don't have a type yet
# special case: old Py2 backwards compatible "sum([int_const for ...])"
# can safely be unpacked into a genexpr
if len(pos_args) == 1:
start = ExprNodes.IntNode(node.pos, value='0', constant_result=0)
else:
start = pos_args[1]
result_ref = UtilNodes.ResultRefNode(pos=node.pos, type=PyrexTypes.py_object_type)
add_node = Nodes.SingleAssignmentNode(
yield_expression.pos,
lhs = result_ref,
rhs = ExprNodes.binop_node(node.pos, '+', result_ref, yield_expression)
)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, add_node)
exec_code = Nodes.StatListNode(
node.pos,
stats = [
Nodes.SingleAssignmentNode(
start.pos,
lhs = UtilNodes.ResultRefNode(pos=node.pos, expression=result_ref),
rhs = start,
first = True),
loop_node
])
return ExprNodes.InlinedGeneratorExpressionNode(
gen_expr_node.pos, loop = exec_code, result_node = result_ref,
expr_scope = gen_expr_node.expr_scope, orig_func = 'sum',
has_local_scope = gen_expr_node.has_local_scope)
def _handle_simple_function_min(self, node, pos_args):
return self._optimise_min_max(node, pos_args, '<')
def _handle_simple_function_max(self, node, pos_args):
return self._optimise_min_max(node, pos_args, '>')
def _optimise_min_max(self, node, args, operator):
"""Replace min(a,b,...) and max(a,b,...) by explicit comparison code.
"""
if len(args) <= 1:
if len(args) == 1 and args[0].is_sequence_constructor:
args = args[0].args
if len(args) <= 1:
# leave this to Python
return node
cascaded_nodes = list(map(UtilNodes.ResultRefNode, args[1:]))
last_result = args[0]
for arg_node in cascaded_nodes:
result_ref = UtilNodes.ResultRefNode(last_result)
last_result = ExprNodes.CondExprNode(
arg_node.pos,
true_val = arg_node,
false_val = result_ref,
test = ExprNodes.PrimaryCmpNode(
arg_node.pos,
operand1 = arg_node,
operator = operator,
operand2 = result_ref,
)
)
last_result = UtilNodes.EvalWithTempExprNode(result_ref, last_result)
for ref_node in cascaded_nodes[::-1]:
last_result = UtilNodes.EvalWithTempExprNode(ref_node, last_result)
return last_result
# builtin type creation
def _DISABLED_handle_simple_function_tuple(self, node, pos_args):
if not pos_args:
return ExprNodes.TupleNode(node.pos, args=[], constant_result=())
# This is a bit special - for iterables (including genexps),
# Python actually overallocates and resizes a newly created
# tuple incrementally while reading items, which we can't
# easily do without explicit node support. Instead, we read
# the items into a list and then copy them into a tuple of the
# final size. This takes up to twice as much memory, but will
# have to do until we have real support for genexps.
result = self._transform_list_set_genexpr(node, pos_args, Builtin.list_type)
if result is not node:
return ExprNodes.AsTupleNode(node.pos, arg=result)
return node
def _handle_simple_function_frozenset(self, node, pos_args):
"""Replace frozenset([...]) by frozenset((...)) as tuples are more efficient.
"""
if len(pos_args) != 1:
return node
if pos_args[0].is_sequence_constructor and not pos_args[0].args:
del pos_args[0]
elif isinstance(pos_args[0], ExprNodes.ListNode):
pos_args[0] = pos_args[0].as_tuple()
return node
def _handle_simple_function_list(self, node, pos_args):
if not pos_args:
return ExprNodes.ListNode(node.pos, args=[], constant_result=[])
return self._transform_list_set_genexpr(node, pos_args, Builtin.list_type)
def _handle_simple_function_set(self, node, pos_args):
if not pos_args:
return ExprNodes.SetNode(node.pos, args=[], constant_result=set())
return self._transform_list_set_genexpr(node, pos_args, Builtin.set_type)
def _transform_list_set_genexpr(self, node, pos_args, target_type):
"""Replace set(genexpr) and list(genexpr) by an inlined comprehension.
"""
if len(pos_args) > 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_statements = _find_yield_statements(loop_node)
if not yield_statements:
return node
result_node = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node,
orig_func='set' if target_type is Builtin.set_type else 'list',
comprehension_type=target_type)
for yield_expression, yield_stat_node in yield_statements:
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr=yield_expression,
target=result_node.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
return result_node
def _handle_simple_function_dict(self, node, pos_args):
"""Replace dict( (a,b) for ... ) by an inlined { a:b for ... }
"""
if len(pos_args) == 0:
return ExprNodes.DictNode(node.pos, key_value_pairs=[], constant_result={})
if len(pos_args) > 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_statements = _find_yield_statements(loop_node)
if not yield_statements:
return node
for yield_expression, _ in yield_statements:
if not isinstance(yield_expression, ExprNodes.TupleNode):
return node
if len(yield_expression.args) != 2:
return node
result_node = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node, orig_func='dict',
comprehension_type=Builtin.dict_type)
for yield_expression, yield_stat_node in yield_statements:
append_node = ExprNodes.DictComprehensionAppendNode(
yield_expression.pos,
key_expr=yield_expression.args[0],
value_expr=yield_expression.args[1],
target=result_node.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
return result_node
# specific handlers for general call nodes
def _handle_general_function_dict(self, node, pos_args, kwargs):
"""Replace dict(a=b,c=d,...) by the underlying keyword dict
construction which is done anyway.
"""
if len(pos_args) > 0:
return node
if not isinstance(kwargs, ExprNodes.DictNode):
return node
return kwargs
class InlineDefNodeCalls(Visitor.NodeRefCleanupMixin, Visitor.EnvTransform):
visit_Node = Visitor.VisitorTransform.recurse_to_children
def get_constant_value_node(self, name_node):
if name_node.cf_state is None:
return None
if name_node.cf_state.cf_is_null:
return None
entry = self.current_env().lookup(name_node.name)
if not entry or (not entry.cf_assignments
or len(entry.cf_assignments) != 1):
# not just a single assignment in all closures
return None
return entry.cf_assignments[0].rhs
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
if not self.current_directives.get('optimize.inline_defnode_calls'):
return node
function_name = node.function
if not function_name.is_name:
return node
function = self.get_constant_value_node(function_name)
if not isinstance(function, ExprNodes.PyCFunctionNode):
return node
inlined = ExprNodes.InlinedDefNodeCallNode(
node.pos, function_name=function_name,
function=function, args=node.args)
if inlined.can_be_inlined():
return self.replace(node, inlined)
return node
class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
Visitor.MethodDispatcherTransform):
"""Optimize some common methods calls and instantiation patterns
for builtin types *after* the type analysis phase.
Running after type analysis, this transform can only perform
function replacements that do not alter the function return type
in a way that was not anticipated by the type analysis.
"""
### cleanup to avoid redundant coercions to/from Python types
def visit_PyTypeTestNode(self, node):
"""Flatten redundant type checks after tree changes.
"""
self.visitchildren(node)
return node.reanalyse()
def _visit_TypecastNode(self, node):
# disabled - the user may have had a reason to put a type
# cast, even if it looks redundant to Cython
"""
Drop redundant type casts.
"""
self.visitchildren(node)
if node.type == node.operand.type:
return node.operand
return node
def visit_ExprStatNode(self, node):
"""
Drop dead code and useless coercions.
"""
self.visitchildren(node)
if isinstance(node.expr, ExprNodes.CoerceToPyTypeNode):
node.expr = node.expr.arg
expr = node.expr
if expr is None or expr.is_none or expr.is_literal:
# Expression was removed or is dead code => remove ExprStatNode as well.
return None
if expr.is_name and expr.entry and (expr.entry.is_local or expr.entry.is_arg):
# Ignore dead references to local variables etc.
return None
return node
def visit_CoerceToBooleanNode(self, node):
"""Drop redundant conversion nodes after tree changes.
"""
self.visitchildren(node)
arg = node.arg
if isinstance(arg, ExprNodes.PyTypeTestNode):
arg = arg.arg
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.type in (PyrexTypes.py_object_type, Builtin.bool_type):
return arg.arg.coerce_to_boolean(self.current_env())
return node
PyNumber_Float_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("o", PyrexTypes.py_object_type, None)
])
def visit_CoerceToPyTypeNode(self, node):
"""Drop redundant conversion nodes after tree changes."""
self.visitchildren(node)
arg = node.arg
if isinstance(arg, ExprNodes.CoerceFromPyTypeNode):
arg = arg.arg
if isinstance(arg, ExprNodes.PythonCapiCallNode):
if arg.function.name == 'float' and len(arg.args) == 1:
# undo redundant Py->C->Py coercion
func_arg = arg.args[0]
if func_arg.type is Builtin.float_type:
return func_arg.as_none_safe_node("float() argument must be a string or a number, not 'NoneType'")
elif func_arg.type.is_pyobject:
return ExprNodes.PythonCapiCallNode(
node.pos, '__Pyx_PyNumber_Float', self.PyNumber_Float_func_type,
args=[func_arg],
py_name='float',
is_temp=node.is_temp,
result_is_used=node.result_is_used,
).coerce_to(node.type, self.current_env())
return node
def visit_CoerceFromPyTypeNode(self, node):
"""Drop redundant conversion nodes after tree changes.
Also, optimise away calls to Python's builtin int() and
float() if the result is going to be coerced back into a C
type anyway.
"""
self.visitchildren(node)
arg = node.arg
if not arg.type.is_pyobject:
# no Python conversion left at all, just do a C coercion instead
if node.type != arg.type:
arg = arg.coerce_to(node.type, self.current_env())
return arg
if isinstance(arg, ExprNodes.PyTypeTestNode):
arg = arg.arg
if arg.is_literal:
if (node.type.is_int and isinstance(arg, ExprNodes.IntNode) or
node.type.is_float and isinstance(arg, ExprNodes.FloatNode) or
node.type.is_int and isinstance(arg, ExprNodes.BoolNode)):
return arg.coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.type is PyrexTypes.py_object_type:
if node.type.assignable_from(arg.arg.type):
# completely redundant C->Py->C coercion
return arg.arg.coerce_to(node.type, self.current_env())
elif arg.type is Builtin.unicode_type:
if arg.arg.type.is_unicode_char and node.type.is_unicode_char:
return arg.arg.coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.SimpleCallNode):
if node.type.is_int or node.type.is_float:
return self._optimise_numeric_cast_call(node, arg)
elif arg.is_subscript:
index_node = arg.index
if isinstance(index_node, ExprNodes.CoerceToPyTypeNode):
index_node = index_node.arg
if index_node.type.is_int:
return self._optimise_int_indexing(node, arg, index_node)
return node
PyBytes_GetItemInt_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_char_type, [
PyrexTypes.CFuncTypeArg("bytes", Builtin.bytes_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("check_bounds", PyrexTypes.c_int_type, None),
],
exception_value = "((char)-1)",
exception_check = True)
def _optimise_int_indexing(self, coerce_node, arg, index_node):
env = self.current_env()
bound_check_bool = env.directives['boundscheck'] and 1 or 0
if arg.base.type is Builtin.bytes_type:
if coerce_node.type in (PyrexTypes.c_char_type, PyrexTypes.c_uchar_type):
# bytes[index] -> char
bound_check_node = ExprNodes.IntNode(
coerce_node.pos, value=str(bound_check_bool),
constant_result=bound_check_bool)
node = ExprNodes.PythonCapiCallNode(
coerce_node.pos, "__Pyx_PyBytes_GetItemInt",
self.PyBytes_GetItemInt_func_type,
args=[
arg.base.as_none_safe_node("'NoneType' object is not subscriptable"),
index_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env),
bound_check_node,
],
is_temp=True,
utility_code=UtilityCode.load_cached(
'bytes_index', 'StringTools.c'))
if coerce_node.type is not PyrexTypes.c_char_type:
node = node.coerce_to(coerce_node.type, env)
return node
return coerce_node
float_float_func_types = dict(
(float_type, PyrexTypes.CFuncType(
float_type, [
PyrexTypes.CFuncTypeArg("arg", float_type, None)
]))
for float_type in (PyrexTypes.c_float_type, PyrexTypes.c_double_type, PyrexTypes.c_longdouble_type))
def _optimise_numeric_cast_call(self, node, arg):
function = arg.function
args = None
if isinstance(arg, ExprNodes.PythonCapiCallNode):
args = arg.args
elif isinstance(function, ExprNodes.NameNode):
if function.type.is_builtin_type and isinstance(arg.arg_tuple, ExprNodes.TupleNode):
args = arg.arg_tuple.args
if args is None or len(args) != 1:
return node
func_arg = args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
func_arg = func_arg.arg
elif func_arg.type.is_pyobject:
# play it safe: Python conversion might work on all sorts of things
return node
if function.name == 'int':
if func_arg.type.is_int or node.type.is_int:
if func_arg.type == node.type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
return ExprNodes.TypecastNode(node.pos, operand=func_arg, type=node.type)
elif func_arg.type.is_float and node.type.is_numeric:
if func_arg.type.math_h_modifier == 'l':
# Work around missing Cygwin definition.
truncl = '__Pyx_truncl'
else:
truncl = 'trunc' + func_arg.type.math_h_modifier
return ExprNodes.PythonCapiCallNode(
node.pos, truncl,
func_type=self.float_float_func_types[func_arg.type],
args=[func_arg],
py_name='int',
is_temp=node.is_temp,
result_is_used=node.result_is_used,
).coerce_to(node.type, self.current_env())
elif function.name == 'float':
if func_arg.type.is_float or node.type.is_float:
if func_arg.type == node.type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
return node
def _error_wrong_arg_count(self, function_name, node, args, expected=None):
if not expected: # None or 0
arg_str = ''
elif isinstance(expected, basestring) or expected > 1:
arg_str = '...'
elif expected == 1:
arg_str = 'x'
else:
arg_str = ''
if expected is not None:
expected_str = 'expected %s, ' % expected
else:
expected_str = ''
error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
function_name, arg_str, expected_str, len(args)))
### generic fallbacks
def _handle_function(self, node, function_name, function, arg_list, kwargs):
return node
def _handle_method(self, node, type_name, attr_name, function,
arg_list, is_unbound_method, kwargs):
"""
Try to inject C-API calls for unbound method calls to builtin types.
While the method declarations in Builtin.py already handle this, we
can additionally resolve bound and unbound methods here that were
assigned to variables ahead of time.
"""
if kwargs:
return node
if not function or not function.is_attribute or not function.obj.is_name:
# cannot track unbound method calls over more than one indirection as
# the names might have been reassigned in the meantime
return node
type_entry = self.current_env().lookup(type_name)
if not type_entry:
return node
method = ExprNodes.AttributeNode(
node.function.pos,
obj=ExprNodes.NameNode(
function.pos,
name=type_name,
entry=type_entry,
type=type_entry.type),
attribute=attr_name,
is_called=True).analyse_as_type_attribute(self.current_env())
if method is None:
return self._optimise_generic_builtin_method_call(
node, attr_name, function, arg_list, is_unbound_method)
args = node.args
if args is None and node.arg_tuple:
args = node.arg_tuple.args
call_node = ExprNodes.SimpleCallNode(
node.pos,
function=method,
args=args)
if not is_unbound_method:
call_node.self = function.obj
call_node.analyse_c_function_call(self.current_env())
call_node.analysed = True
return call_node.coerce_to(node.type, self.current_env())
### builtin types
def _optimise_generic_builtin_method_call(self, node, attr_name, function, arg_list, is_unbound_method):
"""
Try to inject an unbound method call for a call to a method of a known builtin type.
This enables caching the underlying C function of the method at runtime.
"""
arg_count = len(arg_list)
if is_unbound_method or arg_count >= 3 or not (function.is_attribute and function.is_py_attr):
return node
if not function.obj.type.is_builtin_type:
return node
if function.obj.type.name in ('basestring', 'type'):
# these allow different actual types => unsafe
return node
return ExprNodes.CachedBuiltinMethodCallNode(
node, function.obj, attr_name, arg_list)
PyObject_Unicode_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_unicode(self, node, function, pos_args):
"""Optimise single argument calls to unicode().
"""
if len(pos_args) != 1:
if len(pos_args) == 0:
return ExprNodes.UnicodeNode(node.pos, value=EncodedString(), constant_result=u'')
return node
arg = pos_args[0]
if arg.type is Builtin.unicode_type:
if not arg.may_be_none():
return arg
cname = "__Pyx_PyUnicode_Unicode"
utility_code = UtilityCode.load_cached('PyUnicode_Unicode', 'StringTools.c')
else:
cname = "__Pyx_PyObject_Unicode"
utility_code = UtilityCode.load_cached('PyObject_Unicode', 'StringTools.c')
return ExprNodes.PythonCapiCallNode(
node.pos, cname, self.PyObject_Unicode_func_type,
args=pos_args,
is_temp=node.is_temp,
utility_code=utility_code,
py_name="unicode")
def visit_FormattedValueNode(self, node):
"""Simplify or avoid plain string formatting of a unicode value.
This seems misplaced here, but plain unicode formatting is essentially
a call to the unicode() builtin, which is optimised right above.
"""
self.visitchildren(node)
if node.value.type is Builtin.unicode_type and not node.c_format_spec and not node.format_spec:
if not node.conversion_char or node.conversion_char == 's':
# value is definitely a unicode string and we don't format it any special
return self._handle_simple_function_unicode(node, None, [node.value])
return node
PyDict_Copy_func_type = PyrexTypes.CFuncType(
Builtin.dict_type, [
PyrexTypes.CFuncTypeArg("dict", Builtin.dict_type, None)
])
def _handle_simple_function_dict(self, node, function, pos_args):
"""Replace dict(some_dict) by PyDict_Copy(some_dict).
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node("'NoneType' is not iterable")
return ExprNodes.PythonCapiCallNode(
node.pos, "PyDict_Copy", self.PyDict_Copy_func_type,
args = [arg],
is_temp = node.is_temp
)
return node
PySequence_List_func_type = PyrexTypes.CFuncType(
Builtin.list_type,
[PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
def _handle_simple_function_list(self, node, function, pos_args):
"""Turn list(ob) into PySequence_List(ob).
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
return ExprNodes.PythonCapiCallNode(
node.pos, "PySequence_List", self.PySequence_List_func_type,
args=pos_args, is_temp=node.is_temp)
PyList_AsTuple_func_type = PyrexTypes.CFuncType(
Builtin.tuple_type, [
PyrexTypes.CFuncTypeArg("list", Builtin.list_type, None)
])
def _handle_simple_function_tuple(self, node, function, pos_args):
"""Replace tuple([...]) by PyList_AsTuple or PySequence_Tuple.
"""
if len(pos_args) != 1 or not node.is_temp:
return node
arg = pos_args[0]
if arg.type is Builtin.tuple_type and not arg.may_be_none():
return arg
if arg.type is Builtin.list_type:
pos_args[0] = arg.as_none_safe_node(
"'NoneType' object is not iterable")
return ExprNodes.PythonCapiCallNode(
node.pos, "PyList_AsTuple", self.PyList_AsTuple_func_type,
args=pos_args, is_temp=node.is_temp)
else:
return ExprNodes.AsTupleNode(node.pos, arg=arg, type=Builtin.tuple_type)
PySet_New_func_type = PyrexTypes.CFuncType(
Builtin.set_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_set(self, node, function, pos_args):
if len(pos_args) != 1:
return node
if pos_args[0].is_sequence_constructor:
# We can optimise set([x,y,z]) safely into a set literal,
# but only if we create all items before adding them -
# adding an item may raise an exception if it is not
# hashable, but creating the later items may have
# side-effects.
args = []
temps = []
for arg in pos_args[0].args:
if not arg.is_simple():
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
args.append(arg)
result = ExprNodes.SetNode(node.pos, is_temp=1, args=args)
self.replace(node, result)
for temp in temps[::-1]:
result = UtilNodes.EvalWithTempExprNode(temp, result)
return result
else:
# PySet_New(it) is better than a generic Python call to set(it)
return self.replace(node, ExprNodes.PythonCapiCallNode(
node.pos, "PySet_New",
self.PySet_New_func_type,
args=pos_args,
is_temp=node.is_temp,
py_name="set"))
PyFrozenSet_New_func_type = PyrexTypes.CFuncType(
Builtin.frozenset_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_frozenset(self, node, function, pos_args):
if not pos_args:
pos_args = [ExprNodes.NullNode(node.pos)]
elif len(pos_args) > 1:
return node
elif pos_args[0].type is Builtin.frozenset_type and not pos_args[0].may_be_none():
return pos_args[0]
# PyFrozenSet_New(it) is better than a generic Python call to frozenset(it)
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyFrozenSet_New",
self.PyFrozenSet_New_func_type,
args=pos_args,
is_temp=node.is_temp,
utility_code=UtilityCode.load_cached('pyfrozenset_new', 'Builtins.c'),
py_name="frozenset")
PyObject_AsDouble_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_double_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value = "((double)-1)",
exception_check = True)
def _handle_simple_function_float(self, node, function, pos_args):
"""Transform float() into either a C type cast or a faster C
function call.
"""
# Note: this requires the float() function to be typed as
# returning a C 'double'
if len(pos_args) == 0:
return ExprNodes.FloatNode(
node, value="0.0", constant_result=0.0
).coerce_to(Builtin.float_type, self.current_env())
elif len(pos_args) != 1:
self._error_wrong_arg_count('float', node, pos_args, '0 or 1')
return node
func_arg = pos_args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
func_arg = func_arg.arg
if func_arg.type is PyrexTypes.c_double_type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_numeric:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_AsDouble",
self.PyObject_AsDouble_func_type,
args = pos_args,
is_temp = node.is_temp,
utility_code = load_c_utility('pyobject_as_double'),
py_name = "float")
PyNumber_Int_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("o", PyrexTypes.py_object_type, None)
])
PyInt_FromDouble_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("value", PyrexTypes.c_double_type, None)
])
def _handle_simple_function_int(self, node, function, pos_args):
"""Transform int() into a faster C function call.
"""
if len(pos_args) == 0:
return ExprNodes.IntNode(node.pos, value="0", constant_result=0,
type=PyrexTypes.py_object_type)
elif len(pos_args) != 1:
return node # int(x, base)
func_arg = pos_args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
if func_arg.arg.type.is_float:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyInt_FromDouble", self.PyInt_FromDouble_func_type,
args=[func_arg.arg], is_temp=True, py_name='int',
utility_code=UtilityCode.load_cached("PyIntFromDouble", "TypeConversion.c"))
else:
return node # handled in visit_CoerceFromPyTypeNode()
if func_arg.type.is_pyobject and node.type.is_pyobject:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyNumber_Int", self.PyNumber_Int_func_type,
args=pos_args, is_temp=True, py_name='int')
return node
def _handle_simple_function_bool(self, node, function, pos_args):
"""Transform bool(x) into a type coercion to a boolean.
"""
if len(pos_args) == 0:
return ExprNodes.BoolNode(
node.pos, value=False, constant_result=False
).coerce_to(Builtin.bool_type, self.current_env())
elif len(pos_args) != 1:
self._error_wrong_arg_count('bool', node, pos_args, '0 or 1')
return node
else:
# => !!<bint>(x) to make sure it's exactly 0 or 1
operand = pos_args[0].coerce_to_boolean(self.current_env())
operand = ExprNodes.NotNode(node.pos, operand = operand)
operand = ExprNodes.NotNode(node.pos, operand = operand)
# coerce back to Python object as that's the result we are expecting
return operand.coerce_to_pyobject(self.current_env())
### builtin functions
Pyx_strlen_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_size_t_type, [
PyrexTypes.CFuncTypeArg("bytes", PyrexTypes.c_const_char_ptr_type, None)
])
Pyx_Py_UNICODE_strlen_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_size_t_type, [
PyrexTypes.CFuncTypeArg("unicode", PyrexTypes.c_const_py_unicode_ptr_type, None)
])
PyObject_Size_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
],
exception_value="-1")
_map_to_capi_len_function = {
Builtin.unicode_type: "__Pyx_PyUnicode_GET_LENGTH",
Builtin.bytes_type: "PyBytes_GET_SIZE",
Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
Builtin.list_type: "PyList_GET_SIZE",
Builtin.tuple_type: "PyTuple_GET_SIZE",
Builtin.set_type: "PySet_GET_SIZE",
Builtin.frozenset_type: "PySet_GET_SIZE",
Builtin.dict_type: "PyDict_Size",
}.get
_ext_types_with_pysize = set(["cpython.array.array"])
def _handle_simple_function_len(self, node, function, pos_args):
"""Replace len(char*) by the equivalent call to strlen(),
len(Py_UNICODE) by the equivalent Py_UNICODE_strlen() and
len(known_builtin_type) by an equivalent C-API call.
"""
if len(pos_args) != 1:
self._error_wrong_arg_count('len', node, pos_args, 1)
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
arg = arg.arg
if arg.type.is_string:
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "strlen", self.Pyx_strlen_func_type,
args = [arg],
is_temp = node.is_temp,
utility_code = UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
elif arg.type.is_pyunicode_ptr:
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py_UNICODE_strlen", self.Pyx_Py_UNICODE_strlen_func_type,
args = [arg],
is_temp = node.is_temp)
elif arg.type.is_memoryviewslice:
func_type = PyrexTypes.CFuncType(
PyrexTypes.c_size_t_type, [
PyrexTypes.CFuncTypeArg("memoryviewslice", arg.type, None)
], nogil=True)
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_MemoryView_Len", func_type,
args=[arg], is_temp=node.is_temp)
elif arg.type.is_pyobject:
cfunc_name = self._map_to_capi_len_function(arg.type)
if cfunc_name is None:
arg_type = arg.type
if ((arg_type.is_extension_type or arg_type.is_builtin_type)
and arg_type.entry.qualified_name in self._ext_types_with_pysize):
cfunc_name = 'Py_SIZE'
else:
return node
arg = arg.as_none_safe_node(
"object of type 'NoneType' has no len()")
new_node = ExprNodes.PythonCapiCallNode(
node.pos, cfunc_name, self.PyObject_Size_func_type,
args=[arg], is_temp=node.is_temp)
elif arg.type.is_unicode_char:
return ExprNodes.IntNode(node.pos, value='1', constant_result=1,
type=node.type)
else:
return node
if node.type not in (PyrexTypes.c_size_t_type, PyrexTypes.c_py_ssize_t_type):
new_node = new_node.coerce_to(node.type, self.current_env())
return new_node
Pyx_Type_func_type = PyrexTypes.CFuncType(
Builtin.type_type, [
PyrexTypes.CFuncTypeArg("object", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_type(self, node, function, pos_args):
"""Replace type(o) by a macro call to Py_TYPE(o).
"""
if len(pos_args) != 1:
return node
node = ExprNodes.PythonCapiCallNode(
node.pos, "Py_TYPE", self.Pyx_Type_func_type,
args = pos_args,
is_temp = False)
return ExprNodes.CastNode(node, PyrexTypes.py_object_type)
Py_type_check_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_isinstance(self, node, function, pos_args):
"""Replace isinstance() checks against builtin types by the
corresponding C-API call.
"""
if len(pos_args) != 2:
return node
arg, types = pos_args
temps = []
if isinstance(types, ExprNodes.TupleNode):
types = types.args
if len(types) == 1 and not types[0].type is Builtin.type_type:
return node # nothing to improve here
if arg.is_attribute or not arg.is_simple():
arg = UtilNodes.ResultRefNode(arg)
temps.append(arg)
elif types.type is Builtin.type_type:
types = [types]
else:
return node
tests = []
test_nodes = []
env = self.current_env()
for test_type_node in types:
builtin_type = None
if test_type_node.is_name:
if test_type_node.entry:
entry = env.lookup(test_type_node.entry.name)
if entry and entry.type and entry.type.is_builtin_type:
builtin_type = entry.type
if builtin_type is Builtin.type_type:
# all types have type "type", but there's only one 'type'
if entry.name != 'type' or not (
entry.scope and entry.scope.is_builtin_scope):
builtin_type = None
if builtin_type is not None:
type_check_function = entry.type.type_check_function(exact=False)
if type_check_function in tests:
continue
tests.append(type_check_function)
type_check_args = [arg]
elif test_type_node.type is Builtin.type_type:
type_check_function = '__Pyx_TypeCheck'
type_check_args = [arg, test_type_node]
else:
if not test_type_node.is_literal:
test_type_node = UtilNodes.ResultRefNode(test_type_node)
temps.append(test_type_node)
type_check_function = 'PyObject_IsInstance'
type_check_args = [arg, test_type_node]
test_nodes.append(
ExprNodes.PythonCapiCallNode(
test_type_node.pos, type_check_function, self.Py_type_check_func_type,
args=type_check_args,
is_temp=True,
))
def join_with_or(a, b, make_binop_node=ExprNodes.binop_node):
or_node = make_binop_node(node.pos, 'or', a, b)
or_node.type = PyrexTypes.c_bint_type
or_node.wrap_operands(env)
return or_node
test_node = reduce(join_with_or, test_nodes).coerce_to(node.type, env)
for temp in temps[::-1]:
test_node = UtilNodes.EvalWithTempExprNode(temp, test_node)
return test_node
def _handle_simple_function_ord(self, node, function, pos_args):
"""Unpack ord(Py_UNICODE) and ord('X').
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.arg.type.is_unicode_char:
return ExprNodes.TypecastNode(
arg.pos, operand=arg.arg, type=PyrexTypes.c_long_type
).coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.UnicodeNode):
if len(arg.value) == 1:
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_int_type,
value=str(ord(arg.value)),
constant_result=ord(arg.value)
).coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.StringNode):
if arg.unicode_value and len(arg.unicode_value) == 1 \
and ord(arg.unicode_value) <= 255: # Py2/3 portability
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_int_type,
value=str(ord(arg.unicode_value)),
constant_result=ord(arg.unicode_value)
).coerce_to(node.type, self.current_env())
return node
### special methods
Pyx_tp_new_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None),
])
Pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None),
PyrexTypes.CFuncTypeArg("kwargs", Builtin.dict_type, None),
])
def _handle_any_slot__new__(self, node, function, args,
is_unbound_method, kwargs=None):
"""Replace 'exttype.__new__(exttype, ...)' by a call to exttype->tp_new()
"""
obj = function.obj
if not is_unbound_method or len(args) < 1:
return node
type_arg = args[0]
if not obj.is_name or not type_arg.is_name:
# play safe
return node
if obj.type != Builtin.type_type or type_arg.type != Builtin.type_type:
# not a known type, play safe
return node
if not type_arg.type_entry or not obj.type_entry:
if obj.name != type_arg.name:
return node
# otherwise, we know it's a type and we know it's the same
# type for both - that should do
elif type_arg.type_entry != obj.type_entry:
# different types - may or may not lead to an error at runtime
return node
args_tuple = ExprNodes.TupleNode(node.pos, args=args[1:])
args_tuple = args_tuple.analyse_types(
self.current_env(), skip_children=True)
if type_arg.type_entry:
ext_type = type_arg.type_entry.type
if (ext_type.is_extension_type and ext_type.typeobj_cname and
ext_type.scope.global_scope() == self.current_env().global_scope()):
# known type in current module
tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
slot_func_cname = TypeSlots.get_slot_function(ext_type.scope, tp_slot)
if slot_func_cname:
cython_scope = self.context.cython_scope
PyTypeObjectPtr = PyrexTypes.CPtrType(
cython_scope.lookup('PyTypeObject').type)
pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
ext_type, [
PyrexTypes.CFuncTypeArg("type", PyTypeObjectPtr, None),
PyrexTypes.CFuncTypeArg("args", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("kwargs", PyrexTypes.py_object_type, None),
])
type_arg = ExprNodes.CastNode(type_arg, PyTypeObjectPtr)
if not kwargs:
kwargs = ExprNodes.NullNode(node.pos, type=PyrexTypes.py_object_type) # hack?
return ExprNodes.PythonCapiCallNode(
node.pos, slot_func_cname,
pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
may_return_none=False,
is_temp=True)
else:
# arbitrary variable, needs a None check for safety
type_arg = type_arg.as_none_safe_node(
"object.__new__(X): X is not a type object (NoneType)")
utility_code = UtilityCode.load_cached('tp_new', 'ObjectHandling.c')
if kwargs:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_tp_new_kwargs", self.Pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
utility_code=utility_code,
is_temp=node.is_temp
)
else:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_tp_new", self.Pyx_tp_new_func_type,
args=[type_arg, args_tuple],
utility_code=utility_code,
is_temp=node.is_temp
)
### methods of builtin types
PyObject_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("item", PyrexTypes.py_object_type, None),
],
exception_value="-1")
def _handle_simple_method_object_append(self, node, function, args, is_unbound_method):
"""Optimistic optimisation as X.append() is almost always
referring to a list.
"""
if len(args) != 2 or node.result_is_used:
return node
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_Append", self.PyObject_Append_func_type,
args=args,
may_return_none=False,
is_temp=node.is_temp,
result_is_used=False,
utility_code=load_c_utility('append')
)
def _handle_simple_method_list_extend(self, node, function, args, is_unbound_method):
"""Replace list.extend([...]) for short sequence literals values by sequential appends
to avoid creating an intermediate sequence argument.
"""
if len(args) != 2:
return node
obj, value = args
if not value.is_sequence_constructor:
return node
items = list(value.args)
if value.mult_factor is not None or len(items) > 8:
# Appending wins for short sequences but slows down when multiple resize operations are needed.
# This seems to be a good enough limit that avoids repeated resizing.
if False and isinstance(value, ExprNodes.ListNode):
# One would expect that tuples are more efficient here, but benchmarking with
# Py3.5 and Py3.7 suggests that they are not. Probably worth revisiting at some point.
# Might be related to the usage of PySequence_FAST() in CPython's list.extend(),
# which is probably tuned more towards lists than tuples (and rightly so).
tuple_node = args[1].as_tuple().analyse_types(self.current_env(), skip_children=True)
Visitor.recursively_replace_node(node, args[1], tuple_node)
return node
wrapped_obj = self._wrap_self_arg(obj, function, is_unbound_method, 'extend')
if not items:
# Empty sequences are not likely to occur, but why waste a call to list.extend() for them?
wrapped_obj.result_is_used = node.result_is_used
return wrapped_obj
cloned_obj = obj = wrapped_obj
if len(items) > 1 and not obj.is_simple():
cloned_obj = UtilNodes.LetRefNode(obj)
# Use ListComp_Append() for all but the last item and finish with PyList_Append()
# to shrink the list storage size at the very end if necessary.
temps = []
arg = items[-1]
if not arg.is_simple():
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyList_Append", self.PyObject_Append_func_type,
args=[cloned_obj, arg],
is_temp=True,
utility_code=load_c_utility("ListAppend"))
for arg in items[-2::-1]:
if not arg.is_simple():
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
new_node = ExprNodes.binop_node(
node.pos, '|',
ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_ListComp_Append", self.PyObject_Append_func_type,
args=[cloned_obj, arg], py_name="extend",
is_temp=True,
utility_code=load_c_utility("ListCompAppend")),
new_node,
type=PyrexTypes.c_returncode_type,
)
new_node.result_is_used = node.result_is_used
if cloned_obj is not obj:
temps.append(cloned_obj)
for temp in temps:
new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
new_node.result_is_used = node.result_is_used
return new_node
PyByteArray_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("value", PyrexTypes.c_int_type, None),
],
exception_value="-1")
PyByteArray_AppendObject_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("value", PyrexTypes.py_object_type, None),
],
exception_value="-1")
def _handle_simple_method_bytearray_append(self, node, function, args, is_unbound_method):
if len(args) != 2:
return node
func_name = "__Pyx_PyByteArray_Append"
func_type = self.PyByteArray_Append_func_type
value = unwrap_coerced_node(args[1])
if value.type.is_int or isinstance(value, ExprNodes.IntNode):
value = value.coerce_to(PyrexTypes.c_int_type, self.current_env())
utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c")
elif value.is_string_literal:
if not value.can_coerce_to_char_literal():
return node
value = value.coerce_to(PyrexTypes.c_char_type, self.current_env())
utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c")
elif value.type.is_pyobject:
func_name = "__Pyx_PyByteArray_AppendObject"
func_type = self.PyByteArray_AppendObject_func_type
utility_code = UtilityCode.load_cached("ByteArrayAppendObject", "StringTools.c")
else:
return node
new_node = ExprNodes.PythonCapiCallNode(
node.pos, func_name, func_type,
args=[args[0], value],
may_return_none=False,
is_temp=node.is_temp,
utility_code=utility_code,
)
if node.result_is_used:
new_node = new_node.coerce_to(node.type, self.current_env())
return new_node
PyObject_Pop_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
])
PyObject_PopIndex_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("py_index", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("c_index", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("is_signed", PyrexTypes.c_int_type, None),
],
has_varargs=True) # to fake the additional macro args that lack a proper C type
def _handle_simple_method_list_pop(self, node, function, args, is_unbound_method):
return self._handle_simple_method_object_pop(
node, function, args, is_unbound_method, is_list=True)
def _handle_simple_method_object_pop(self, node, function, args, is_unbound_method, is_list=False):
"""Optimistic optimisation as X.pop([n]) is almost always
referring to a list.
"""
if not args:
return node
obj = args[0]
if is_list:
type_name = 'List'
obj = obj.as_none_safe_node(
"'NoneType' object has no attribute '%.30s'",
error="PyExc_AttributeError",
format_args=['pop'])
else:
type_name = 'Object'
if len(args) == 1:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py%s_Pop" % type_name,
self.PyObject_Pop_func_type,
args=[obj],
may_return_none=True,
is_temp=node.is_temp,
utility_code=load_c_utility('pop'),
)
elif len(args) == 2:
index = unwrap_coerced_node(args[1])
py_index = ExprNodes.NoneNode(index.pos)
orig_index_type = index.type
if not index.type.is_int:
if isinstance(index, ExprNodes.IntNode):
py_index = index.coerce_to_pyobject(self.current_env())
index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
elif is_list:
if index.type.is_pyobject:
py_index = index.coerce_to_simple(self.current_env())
index = ExprNodes.CloneNode(py_index)
index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
else:
return node
elif not PyrexTypes.numeric_type_fits(index.type, PyrexTypes.c_py_ssize_t_type):
return node
elif isinstance(index, ExprNodes.IntNode):
py_index = index.coerce_to_pyobject(self.current_env())
# real type might still be larger at runtime
if not orig_index_type.is_int:
orig_index_type = index.type
if not orig_index_type.create_to_py_utility_code(self.current_env()):
return node
convert_func = orig_index_type.to_py_function
conversion_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [PyrexTypes.CFuncTypeArg("intval", orig_index_type, None)])
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py%s_PopIndex" % type_name,
self.PyObject_PopIndex_func_type,
args=[obj, py_index, index,
ExprNodes.IntNode(index.pos, value=str(orig_index_type.signed and 1 or 0),
constant_result=orig_index_type.signed and 1 or 0,
type=PyrexTypes.c_int_type),
ExprNodes.RawCNameExprNode(index.pos, PyrexTypes.c_void_type,
orig_index_type.empty_declaration_code()),
ExprNodes.RawCNameExprNode(index.pos, conversion_type, convert_func)],
may_return_none=True,
is_temp=node.is_temp,
utility_code=load_c_utility("pop_index"),
)
return node
single_param_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value = "-1")
def _handle_simple_method_list_sort(self, node, function, args, is_unbound_method):
"""Call PyList_Sort() instead of the 0-argument l.sort().
"""
if len(args) != 1:
return node
return self._substitute_method_call(
node, function, "PyList_Sort", self.single_param_func_type,
'sort', is_unbound_method, args).coerce_to(node.type, self.current_env)
Pyx_PyDict_GetItem_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_dict_get(self, node, function, args, is_unbound_method):
"""Replace dict.get() by a call to PyDict_GetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.get', node, args, "2 or 3")
return node
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_GetItemDefault", self.Pyx_PyDict_GetItem_func_type,
'get', is_unbound_method, args,
may_return_none = True,
utility_code = load_c_utility("dict_getitem_default"))
Pyx_PyDict_SetDefault_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_safe_type", PyrexTypes.c_int_type, None),
])
def _handle_simple_method_dict_setdefault(self, node, function, args, is_unbound_method):
"""Replace dict.setdefault() by calls to PyDict_GetItem() and PyDict_SetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.setdefault', node, args, "2 or 3")
return node
key_type = args[1].type
if key_type.is_builtin_type:
is_safe_type = int(key_type.name in
'str bytes unicode float int long bool')
elif key_type is PyrexTypes.py_object_type:
is_safe_type = -1 # don't know
else:
is_safe_type = 0 # definitely not
args.append(ExprNodes.IntNode(
node.pos, value=str(is_safe_type), constant_result=is_safe_type))
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_SetDefault", self.Pyx_PyDict_SetDefault_func_type,
'setdefault', is_unbound_method, args,
may_return_none=True,
utility_code=load_c_utility('dict_setdefault'))
PyDict_Pop_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_dict_pop(self, node, function, args, is_unbound_method):
"""Replace dict.pop() by a call to _PyDict_Pop().
"""
if len(args) == 2:
args.append(ExprNodes.NullNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.pop', node, args, "2 or 3")
return node
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_Pop", self.PyDict_Pop_func_type,
'pop', is_unbound_method, args,
may_return_none=True,
utility_code=load_c_utility('py_dict_pop'))
Pyx_PyInt_BinopInt_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("op1", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("op2", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("intval", PyrexTypes.c_long_type, None),
PyrexTypes.CFuncTypeArg("inplace", PyrexTypes.c_bint_type, None),
])
Pyx_PyFloat_BinopInt_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("op1", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("op2", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("fval", PyrexTypes.c_double_type, None),
PyrexTypes.CFuncTypeArg("inplace", PyrexTypes.c_bint_type, None),
])
def _handle_simple_method_object___add__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Add', node, function, args, is_unbound_method)
def _handle_simple_method_object___sub__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method)
def _handle_simple_method_object___eq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Eq', node, function, args, is_unbound_method)
def _handle_simple_method_object___neq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Ne', node, function, args, is_unbound_method)
def _handle_simple_method_object___and__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('And', node, function, args, is_unbound_method)
def _handle_simple_method_object___or__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Or', node, function, args, is_unbound_method)
def _handle_simple_method_object___xor__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Xor', node, function, args, is_unbound_method)
def _handle_simple_method_object___rshift__(self, node, function, args, is_unbound_method):
if len(args) != 2 or not isinstance(args[1], ExprNodes.IntNode):
return node
if not args[1].has_constant_result() or not (1 <= args[1].constant_result <= 63):
return node
return self._optimise_num_binop('Rshift', node, function, args, is_unbound_method)
def _handle_simple_method_object___lshift__(self, node, function, args, is_unbound_method):
if len(args) != 2 or not isinstance(args[1], ExprNodes.IntNode):
return node
if not args[1].has_constant_result() or not (1 <= args[1].constant_result <= 63):
return node
return self._optimise_num_binop('Lshift', node, function, args, is_unbound_method)
def _handle_simple_method_object___mod__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('Remainder', node, function, args, is_unbound_method)
def _handle_simple_method_object___floordiv__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('FloorDivide', node, function, args, is_unbound_method)
def _handle_simple_method_object___truediv__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('TrueDivide', node, function, args, is_unbound_method)
def _handle_simple_method_object___div__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('Divide', node, function, args, is_unbound_method)
def _optimise_num_div(self, operator, node, function, args, is_unbound_method):
if len(args) != 2 or not args[1].has_constant_result() or args[1].constant_result == 0:
return node
if isinstance(args[1], ExprNodes.IntNode):
if not (-2**30 <= args[1].constant_result <= 2**30):
return node
elif isinstance(args[1], ExprNodes.FloatNode):
if not (-2**53 <= args[1].constant_result <= 2**53):
return node
else:
return node
return self._optimise_num_binop(operator, node, function, args, is_unbound_method)
def _handle_simple_method_float___add__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Add', node, function, args, is_unbound_method)
def _handle_simple_method_float___sub__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method)
def _handle_simple_method_float___truediv__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('TrueDivide', node, function, args, is_unbound_method)
def _handle_simple_method_float___div__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Divide', node, function, args, is_unbound_method)
def _handle_simple_method_float___mod__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Remainder', node, function, args, is_unbound_method)
def _handle_simple_method_float___eq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Eq', node, function, args, is_unbound_method)
def _handle_simple_method_float___neq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Ne', node, function, args, is_unbound_method)
def _optimise_num_binop(self, operator, node, function, args, is_unbound_method):
"""
Optimise math operators for (likely) float or small integer operations.
"""
if len(args) != 2:
return node
if not node.type.is_pyobject:
return node
# When adding IntNode/FloatNode to something else, assume other operand is also numeric.
# Prefer constants on RHS as they allows better size control for some operators.
num_nodes = (ExprNodes.IntNode, ExprNodes.FloatNode)
if isinstance(args[1], num_nodes):
if args[0].type is not PyrexTypes.py_object_type:
return node
numval = args[1]
arg_order = 'ObjC'
elif isinstance(args[0], num_nodes):
if args[1].type is not PyrexTypes.py_object_type:
return node
numval = args[0]
arg_order = 'CObj'
else:
return node
if not numval.has_constant_result():
return node
is_float = isinstance(numval, ExprNodes.FloatNode)
if is_float:
if operator not in ('Add', 'Subtract', 'Remainder', 'TrueDivide', 'Divide', 'Eq', 'Ne'):
return node
elif operator == 'Divide':
# mixed old-/new-style division is not currently optimised for integers
return node
elif abs(numval.constant_result) > 2**30:
return node
args = list(args)
args.append((ExprNodes.FloatNode if is_float else ExprNodes.IntNode)(
numval.pos, value=numval.value, constant_result=numval.constant_result,
type=PyrexTypes.c_double_type if is_float else PyrexTypes.c_long_type))
inplace = node.inplace if isinstance(node, ExprNodes.NumBinopNode) else False
args.append(ExprNodes.BoolNode(node.pos, value=inplace, constant_result=inplace))
utility_code = TempitaUtilityCode.load_cached(
"PyFloatBinop" if is_float else "PyIntBinop", "Optimize.c",
context=dict(op=operator, order=arg_order))
return self._substitute_method_call(
node, function, "__Pyx_Py%s_%s%s" % ('Float' if is_float else 'Int', operator, arg_order),
self.Pyx_PyFloat_BinopInt_func_type if is_float else self.Pyx_PyInt_BinopInt_func_type,
'__%s__' % operator[:3].lower(), is_unbound_method, args,
may_return_none=True,
with_none_check=False,
utility_code=utility_code)
### unicode type methods
PyUnicode_uchar_predicate_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
])
def _inject_unicode_predicate(self, node, function, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
not ustring.arg.type.is_unicode_char:
return node
uchar = ustring.arg
method_name = function.attribute
if method_name == 'istitle':
# istitle() doesn't directly map to Py_UNICODE_ISTITLE()
utility_code = UtilityCode.load_cached(
"py_unicode_istitle", "StringTools.c")
function_name = '__Pyx_Py_UNICODE_ISTITLE'
else:
utility_code = None
function_name = 'Py_UNICODE_%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function,
function_name, self.PyUnicode_uchar_predicate_func_type,
method_name, is_unbound_method, [uchar],
utility_code = utility_code)
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
_handle_simple_method_unicode_isalnum = _inject_unicode_predicate
_handle_simple_method_unicode_isalpha = _inject_unicode_predicate
_handle_simple_method_unicode_isdecimal = _inject_unicode_predicate
_handle_simple_method_unicode_isdigit = _inject_unicode_predicate
_handle_simple_method_unicode_islower = _inject_unicode_predicate
_handle_simple_method_unicode_isnumeric = _inject_unicode_predicate
_handle_simple_method_unicode_isspace = _inject_unicode_predicate
_handle_simple_method_unicode_istitle = _inject_unicode_predicate
_handle_simple_method_unicode_isupper = _inject_unicode_predicate
PyUnicode_uchar_conversion_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ucs4_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
])
def _inject_unicode_character_conversion(self, node, function, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
not ustring.arg.type.is_unicode_char:
return node
uchar = ustring.arg
method_name = function.attribute
function_name = 'Py_UNICODE_TO%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function,
function_name, self.PyUnicode_uchar_conversion_func_type,
method_name, is_unbound_method, [uchar])
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
_handle_simple_method_unicode_lower = _inject_unicode_character_conversion
_handle_simple_method_unicode_upper = _inject_unicode_character_conversion
_handle_simple_method_unicode_title = _inject_unicode_character_conversion
PyUnicode_Splitlines_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("keepends", PyrexTypes.c_bint_type, None),
])
def _handle_simple_method_unicode_splitlines(self, node, function, args, is_unbound_method):
"""Replace unicode.splitlines(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2):
self._error_wrong_arg_count('unicode.splitlines', node, args, "1 or 2")
return node
self._inject_bint_default_argument(node, args, 1, False)
return self._substitute_method_call(
node, function,
"PyUnicode_Splitlines", self.PyUnicode_Splitlines_func_type,
'splitlines', is_unbound_method, args)
PyUnicode_Split_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("sep", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxsplit", PyrexTypes.c_py_ssize_t_type, None),
]
)
def _handle_simple_method_unicode_split(self, node, function, args, is_unbound_method):
"""Replace unicode.split(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2,3):
self._error_wrong_arg_count('unicode.split', node, args, "1-3")
return node
if len(args) < 2:
args.append(ExprNodes.NullNode(node.pos))
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "-1")
return self._substitute_method_call(
node, function,
"PyUnicode_Split", self.PyUnicode_Split_func_type,
'split', is_unbound_method, args)
PyUnicode_Join_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("seq", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_unicode_join(self, node, function, args, is_unbound_method):
"""
unicode.join() builds a list first => see if we can do this more efficiently
"""
if len(args) != 2:
self._error_wrong_arg_count('unicode.join', node, args, "2")
return node
if isinstance(args[1], ExprNodes.GeneratorExpressionNode):
gen_expr_node = args[1]
loop_node = gen_expr_node.loop
yield_statements = _find_yield_statements(loop_node)
if yield_statements:
inlined_genexpr = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node, orig_func='list',
comprehension_type=Builtin.list_type)
for yield_expression, yield_stat_node in yield_statements:
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr=yield_expression,
target=inlined_genexpr.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
args[1] = inlined_genexpr
return self._substitute_method_call(
node, function,
"PyUnicode_Join", self.PyUnicode_Join_func_type,
'join', is_unbound_method, args)
PyString_Tailmatch_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("str", PyrexTypes.py_object_type, None), # bytes/str/unicode
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
],
exception_value = '-1')
def _handle_simple_method_unicode_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'unicode', 'endswith',
unicode_tailmatch_utility_code, +1)
def _handle_simple_method_unicode_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'unicode', 'startswith',
unicode_tailmatch_utility_code, -1)
def _inject_tailmatch(self, node, function, args, is_unbound_method, type_name,
method_name, utility_code, direction):
"""Replace unicode.startswith(...) and unicode.endswith(...)
by a direct call to the corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('%s.%s' % (type_name, method_name), node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
args.append(ExprNodes.IntNode(
node.pos, value=str(direction), type=PyrexTypes.c_int_type))
method_call = self._substitute_method_call(
node, function,
"__Pyx_Py%s_Tailmatch" % type_name.capitalize(),
self.PyString_Tailmatch_func_type,
method_name, is_unbound_method, args,
utility_code = utility_code)
return method_call.coerce_to(Builtin.bool_type, self.current_env())
PyUnicode_Find_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
],
exception_value = '-2')
def _handle_simple_method_unicode_find(self, node, function, args, is_unbound_method):
return self._inject_unicode_find(
node, function, args, is_unbound_method, 'find', +1)
def _handle_simple_method_unicode_rfind(self, node, function, args, is_unbound_method):
return self._inject_unicode_find(
node, function, args, is_unbound_method, 'rfind', -1)
def _inject_unicode_find(self, node, function, args, is_unbound_method,
method_name, direction):
"""Replace unicode.find(...) and unicode.rfind(...) by a
direct call to the corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.%s' % method_name, node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
args.append(ExprNodes.IntNode(
node.pos, value=str(direction), type=PyrexTypes.c_int_type))
method_call = self._substitute_method_call(
node, function, "PyUnicode_Find", self.PyUnicode_Find_func_type,
method_name, is_unbound_method, args)
return method_call.coerce_to_pyobject(self.current_env())
PyUnicode_Count_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
],
exception_value = '-1')
def _handle_simple_method_unicode_count(self, node, function, args, is_unbound_method):
"""Replace unicode.count(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.count', node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
method_call = self._substitute_method_call(
node, function, "PyUnicode_Count", self.PyUnicode_Count_func_type,
'count', is_unbound_method, args)
return method_call.coerce_to_pyobject(self.current_env())
PyUnicode_Replace_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("replstr", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxcount", PyrexTypes.c_py_ssize_t_type, None),
])
def _handle_simple_method_unicode_replace(self, node, function, args, is_unbound_method):
"""Replace unicode.replace(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (3,4):
self._error_wrong_arg_count('unicode.replace', node, args, "3-4")
return node
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "-1")
return self._substitute_method_call(
node, function, "PyUnicode_Replace", self.PyUnicode_Replace_func_type,
'replace', is_unbound_method, args)
PyUnicode_AsEncodedString_func_type = PyrexTypes.CFuncType(
Builtin.bytes_type, [
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
])
PyUnicode_AsXyzString_func_type = PyrexTypes.CFuncType(
Builtin.bytes_type, [
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
])
_special_encodings = ['UTF8', 'UTF16', 'UTF-16LE', 'UTF-16BE', 'Latin1', 'ASCII',
'unicode_escape', 'raw_unicode_escape']
_special_codecs = [ (name, codecs.getencoder(name))
for name in _special_encodings ]
def _handle_simple_method_unicode_encode(self, node, function, args, is_unbound_method):
"""Replace unicode.encode(...) by a direct C-API call to the
corresponding codec.
"""
if len(args) < 1 or len(args) > 3:
self._error_wrong_arg_count('unicode.encode', node, args, '1-3')
return node
string_node = args[0]
if len(args) == 1:
null_node = ExprNodes.NullNode(node.pos)
return self._substitute_method_call(
node, function, "PyUnicode_AsEncodedString",
self.PyUnicode_AsEncodedString_func_type,
'encode', is_unbound_method, [string_node, null_node, null_node])
parameters = self._unpack_encoding_and_error_mode(node.pos, args)
if parameters is None:
return node
encoding, encoding_node, error_handling, error_handling_node = parameters
if encoding and isinstance(string_node, ExprNodes.UnicodeNode):
# constant, so try to do the encoding at compile time
try:
value = string_node.value.encode(encoding, error_handling)
except:
# well, looks like we can't
pass
else:
value = bytes_literal(value, encoding)
return ExprNodes.BytesNode(string_node.pos, value=value, type=Builtin.bytes_type)
if encoding and error_handling == 'strict':
# try to find a specific encoder function
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None and '-' not in codec_name:
encode_function = "PyUnicode_As%sString" % codec_name
return self._substitute_method_call(
node, function, encode_function,
self.PyUnicode_AsXyzString_func_type,
'encode', is_unbound_method, [string_node])
return self._substitute_method_call(
node, function, "PyUnicode_AsEncodedString",
self.PyUnicode_AsEncodedString_func_type,
'encode', is_unbound_method,
[string_node, encoding_node, error_handling_node])
PyUnicode_DecodeXyz_func_ptr_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("size", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
]))
_decode_c_string_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None),
])
_decode_bytes_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None),
])
_decode_cpp_string_func_type = None # lazy init
def _handle_simple_method_bytes_decode(self, node, function, args, is_unbound_method):
"""Replace char*.decode() by a direct C-API call to the
corresponding codec, possibly resolving a slice on the char*.
"""
if not (1 <= len(args) <= 3):
self._error_wrong_arg_count('bytes.decode', node, args, '1-3')
return node
# normalise input nodes
string_node = args[0]
start = stop = None
if isinstance(string_node, ExprNodes.SliceIndexNode):
index_node = string_node
string_node = index_node.base
start, stop = index_node.start, index_node.stop
if not start or start.constant_result == 0:
start = None
if isinstance(string_node, ExprNodes.CoerceToPyTypeNode):
string_node = string_node.arg
string_type = string_node.type
if string_type in (Builtin.bytes_type, Builtin.bytearray_type):
if is_unbound_method:
string_node = string_node.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=['decode', string_type.name])
else:
string_node = string_node.as_none_safe_node(
"'NoneType' object has no attribute '%.30s'",
error="PyExc_AttributeError",
format_args=['decode'])
elif not string_type.is_string and not string_type.is_cpp_string:
# nothing to optimise here
return node
parameters = self._unpack_encoding_and_error_mode(node.pos, args)
if parameters is None:
return node
encoding, encoding_node, error_handling, error_handling_node = parameters
if not start:
start = ExprNodes.IntNode(node.pos, value='0', constant_result=0)
elif not start.type.is_int:
start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop and not stop.type.is_int:
stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
# try to find a specific encoder function
codec_name = None
if encoding is not None:
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None:
if codec_name in ('UTF16', 'UTF-16LE', 'UTF-16BE'):
codec_cname = "__Pyx_PyUnicode_Decode%s" % codec_name.replace('-', '')
else:
codec_cname = "PyUnicode_Decode%s" % codec_name
decode_function = ExprNodes.RawCNameExprNode(
node.pos, type=self.PyUnicode_DecodeXyz_func_ptr_type, cname=codec_cname)
encoding_node = ExprNodes.NullNode(node.pos)
else:
decode_function = ExprNodes.NullNode(node.pos)
# build the helper function call
temps = []
if string_type.is_string:
# C string
if not stop:
# use strlen() to find the string length, just as CPython would
if not string_node.is_name:
string_node = UtilNodes.LetRefNode(string_node) # used twice
temps.append(string_node)
stop = ExprNodes.PythonCapiCallNode(
string_node.pos, "strlen", self.Pyx_strlen_func_type,
args=[string_node],
is_temp=False,
utility_code=UtilityCode.load_cached("IncludeStringH", "StringTools.c"),
).coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
helper_func_type = self._decode_c_string_func_type
utility_code_name = 'decode_c_string'
elif string_type.is_cpp_string:
# C++ std::string
if not stop:
stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX',
constant_result=ExprNodes.not_a_constant)
if self._decode_cpp_string_func_type is None:
# lazy init to reuse the C++ string type
self._decode_cpp_string_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", string_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", self.PyUnicode_DecodeXyz_func_ptr_type, None),
])
helper_func_type = self._decode_cpp_string_func_type
utility_code_name = 'decode_cpp_string'
else:
# Python bytes/bytearray object
if not stop:
stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX',
constant_result=ExprNodes.not_a_constant)
helper_func_type = self._decode_bytes_func_type
if string_type is Builtin.bytes_type:
utility_code_name = 'decode_bytes'
else:
utility_code_name = 'decode_bytearray'
node = ExprNodes.PythonCapiCallNode(
node.pos, '__Pyx_%s' % utility_code_name, helper_func_type,
args=[string_node, start, stop, encoding_node, error_handling_node, decode_function],
is_temp=node.is_temp,
utility_code=UtilityCode.load_cached(utility_code_name, 'StringTools.c'),
)
for temp in temps[::-1]:
node = UtilNodes.EvalWithTempExprNode(temp, node)
return node
_handle_simple_method_bytearray_decode = _handle_simple_method_bytes_decode
def _find_special_codec_name(self, encoding):
try:
requested_codec = codecs.getencoder(encoding)
except LookupError:
return None
for name, codec in self._special_codecs:
if codec == requested_codec:
if '_' in name:
name = ''.join([s.capitalize()
for s in name.split('_')])
return name
return None
def _unpack_encoding_and_error_mode(self, pos, args):
null_node = ExprNodes.NullNode(pos)
if len(args) >= 2:
encoding, encoding_node = self._unpack_string_and_cstring_node(args[1])
if encoding_node is None:
return None
else:
encoding = None
encoding_node = null_node
if len(args) == 3:
error_handling, error_handling_node = self._unpack_string_and_cstring_node(args[2])
if error_handling_node is None:
return None
if error_handling == 'strict':
error_handling_node = null_node
else:
error_handling = 'strict'
error_handling_node = null_node
return (encoding, encoding_node, error_handling, error_handling_node)
def _unpack_string_and_cstring_node(self, node):
if isinstance(node, ExprNodes.CoerceToPyTypeNode):
node = node.arg
if isinstance(node, ExprNodes.UnicodeNode):
encoding = node.value
node = ExprNodes.BytesNode(
node.pos, value=encoding.as_utf8_string(), type=PyrexTypes.c_const_char_ptr_type)
elif isinstance(node, (ExprNodes.StringNode, ExprNodes.BytesNode)):
encoding = node.value.decode('ISO-8859-1')
node = ExprNodes.BytesNode(
node.pos, value=node.value, type=PyrexTypes.c_const_char_ptr_type)
elif node.type is Builtin.bytes_type:
encoding = None
node = node.coerce_to(PyrexTypes.c_const_char_ptr_type, self.current_env())
elif node.type.is_string:
encoding = None
else:
encoding = node = None
return encoding, node
def _handle_simple_method_str_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'str', 'endswith',
str_tailmatch_utility_code, +1)
def _handle_simple_method_str_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'str', 'startswith',
str_tailmatch_utility_code, -1)
def _handle_simple_method_bytes_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytes', 'endswith',
bytes_tailmatch_utility_code, +1)
def _handle_simple_method_bytes_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytes', 'startswith',
bytes_tailmatch_utility_code, -1)
''' # disabled for now, enable when we consider it worth it (see StringTools.c)
def _handle_simple_method_bytearray_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytearray', 'endswith',
bytes_tailmatch_utility_code, +1)
def _handle_simple_method_bytearray_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytearray', 'startswith',
bytes_tailmatch_utility_code, -1)
'''
### helpers
def _substitute_method_call(self, node, function, name, func_type,
attr_name, is_unbound_method, args=(),
utility_code=None, is_temp=None,
may_return_none=ExprNodes.PythonCapiCallNode.may_return_none,
with_none_check=True):
args = list(args)
if with_none_check and args:
args[0] = self._wrap_self_arg(args[0], function, is_unbound_method, attr_name)
if is_temp is None:
is_temp = node.is_temp
return ExprNodes.PythonCapiCallNode(
node.pos, name, func_type,
args = args,
is_temp = is_temp,
utility_code = utility_code,
may_return_none = may_return_none,
result_is_used = node.result_is_used,
)
def _wrap_self_arg(self, self_arg, function, is_unbound_method, attr_name):
if self_arg.is_literal:
return self_arg
if is_unbound_method:
self_arg = self_arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[attr_name, self_arg.type.name])
else:
self_arg = self_arg.as_none_safe_node(
"'NoneType' object has no attribute '%{0}s'".format('.30' if len(attr_name) <= 30 else ''),
error="PyExc_AttributeError",
format_args=[attr_name])
return self_arg
def _inject_int_default_argument(self, node, args, arg_index, type, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
args.append(ExprNodes.IntNode(node.pos, value=str(default_value),
type=type, constant_result=default_value))
else:
args[arg_index] = args[arg_index].coerce_to(type, self.current_env())
def _inject_bint_default_argument(self, node, args, arg_index, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
default_value = bool(default_value)
args.append(ExprNodes.BoolNode(node.pos, value=default_value,
constant_result=default_value))
else:
args[arg_index] = args[arg_index].coerce_to_boolean(self.current_env())
unicode_tailmatch_utility_code = UtilityCode.load_cached('unicode_tailmatch', 'StringTools.c')
bytes_tailmatch_utility_code = UtilityCode.load_cached('bytes_tailmatch', 'StringTools.c')
str_tailmatch_utility_code = UtilityCode.load_cached('str_tailmatch', 'StringTools.c')
class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
"""Calculate the result of constant expressions to store it in
``expr_node.constant_result``, and replace trivial cases by their
constant result.
General rules:
- We calculate float constants to make them available to the
compiler, but we do not aggregate them into a single literal
node to prevent any loss of precision.
- We recursively calculate constants from non-literal nodes to
make them available to the compiler, but we only aggregate
literal nodes at each step. Non-literal nodes are never merged
into a single node.
"""
def __init__(self, reevaluate=False):
"""
The reevaluate argument specifies whether constant values that were
previously computed should be recomputed.
"""
super(ConstantFolding, self).__init__()
self.reevaluate = reevaluate
def _calculate_const(self, node):
if (not self.reevaluate and
node.constant_result is not ExprNodes.constant_value_not_set):
return
# make sure we always set the value
not_a_constant = ExprNodes.not_a_constant
node.constant_result = not_a_constant
# check if all children are constant
children = self.visitchildren(node)
for child_result in children.values():
if type(child_result) is list:
for child in child_result:
if getattr(child, 'constant_result', not_a_constant) is not_a_constant:
return
elif getattr(child_result, 'constant_result', not_a_constant) is not_a_constant:
return
# now try to calculate the real constant value
try:
node.calculate_constant_result()
# if node.constant_result is not ExprNodes.not_a_constant:
# print node.__class__.__name__, node.constant_result
except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError):
# ignore all 'normal' errors here => no constant result
pass
except Exception:
# this looks like a real error
import traceback, sys
traceback.print_exc(file=sys.stdout)
NODE_TYPE_ORDER = [ExprNodes.BoolNode, ExprNodes.CharNode,
ExprNodes.IntNode, ExprNodes.FloatNode]
def _widest_node_class(self, *nodes):
try:
return self.NODE_TYPE_ORDER[
max(map(self.NODE_TYPE_ORDER.index, map(type, nodes)))]
except ValueError:
return None
def _bool_node(self, node, value):
value = bool(value)
return ExprNodes.BoolNode(node.pos, value=value, constant_result=value)
def visit_ExprNode(self, node):
self._calculate_const(node)
return node
def visit_UnopNode(self, node):
self._calculate_const(node)
if not node.has_constant_result():
if node.operator == '!':
return self._handle_NotNode(node)
return node
if not node.operand.is_literal:
return node
if node.operator == '!':
return self._bool_node(node, node.constant_result)
elif isinstance(node.operand, ExprNodes.BoolNode):
return ExprNodes.IntNode(node.pos, value=str(int(node.constant_result)),
type=PyrexTypes.c_int_type,
constant_result=int(node.constant_result))
elif node.operator == '+':
return self._handle_UnaryPlusNode(node)
elif node.operator == '-':
return self._handle_UnaryMinusNode(node)
return node
_negate_operator = {
'in': 'not_in',
'not_in': 'in',
'is': 'is_not',
'is_not': 'is'
}.get
def _handle_NotNode(self, node):
operand = node.operand
if isinstance(operand, ExprNodes.PrimaryCmpNode):
operator = self._negate_operator(operand.operator)
if operator:
node = copy.copy(operand)
node.operator = operator
node = self.visit_PrimaryCmpNode(node)
return node
def _handle_UnaryMinusNode(self, node):
def _negate(value):
if value.startswith('-'):
value = value[1:]
else:
value = '-' + value
return value
node_type = node.operand.type
if isinstance(node.operand, ExprNodes.FloatNode):
# this is a safe operation
return ExprNodes.FloatNode(node.pos, value=_negate(node.operand.value),
type=node_type,
constant_result=node.constant_result)
if node_type.is_int and node_type.signed or \
isinstance(node.operand, ExprNodes.IntNode) and node_type.is_pyobject:
return ExprNodes.IntNode(node.pos, value=_negate(node.operand.value),
type=node_type,
longness=node.operand.longness,
constant_result=node.constant_result)
return node
def _handle_UnaryPlusNode(self, node):
if (node.operand.has_constant_result() and
node.constant_result == node.operand.constant_result):
return node.operand
return node
def visit_BoolBinopNode(self, node):
self._calculate_const(node)
if not node.operand1.has_constant_result():
return node
if node.operand1.constant_result:
if node.operator == 'and':
return node.operand2
else:
return node.operand1
else:
if node.operator == 'and':
return node.operand1
else:
return node.operand2
def visit_BinopNode(self, node):
self._calculate_const(node)
if node.constant_result is ExprNodes.not_a_constant:
return node
if isinstance(node.constant_result, float):
return node
operand1, operand2 = node.operand1, node.operand2
if not operand1.is_literal or not operand2.is_literal:
return node
# now inject a new constant node with the calculated value
try:
type1, type2 = operand1.type, operand2.type
if type1 is None or type2 is None:
return node
except AttributeError:
return node
if type1.is_numeric and type2.is_numeric:
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
widest_type = PyrexTypes.py_object_type
target_class = self._widest_node_class(operand1, operand2)
if target_class is None:
return node
elif target_class is ExprNodes.BoolNode and node.operator in '+-//<<%**>>':
# C arithmetic results in at least an int type
target_class = ExprNodes.IntNode
elif target_class is ExprNodes.CharNode and node.operator in '+-//<<%**>>&|^':
# C arithmetic results in at least an int type
target_class = ExprNodes.IntNode
if target_class is ExprNodes.IntNode:
unsigned = getattr(operand1, 'unsigned', '') and \
getattr(operand2, 'unsigned', '')
longness = "LL"[:max(len(getattr(operand1, 'longness', '')),
len(getattr(operand2, 'longness', '')))]
new_node = ExprNodes.IntNode(pos=node.pos,
unsigned=unsigned, longness=longness,
value=str(int(node.constant_result)),
constant_result=int(node.constant_result))
# IntNode is smart about the type it chooses, so we just
# make sure we were not smarter this time
if widest_type.is_pyobject or new_node.type.is_pyobject:
new_node.type = PyrexTypes.py_object_type
else:
new_node.type = PyrexTypes.widest_numeric_type(widest_type, new_node.type)
else:
if target_class is ExprNodes.BoolNode:
node_value = node.constant_result
else:
node_value = str(node.constant_result)
new_node = target_class(pos=node.pos, type = widest_type,
value = node_value,
constant_result = node.constant_result)
return new_node
def visit_AddNode(self, node):
self._calculate_const(node)
if node.constant_result is ExprNodes.not_a_constant:
return node
if node.operand1.is_string_literal and node.operand2.is_string_literal:
# some people combine string literals with a '+'
str1, str2 = node.operand1, node.operand2
if isinstance(str1, ExprNodes.UnicodeNode) and isinstance(str2, ExprNodes.UnicodeNode):
bytes_value = None
if str1.bytes_value is not None and str2.bytes_value is not None:
if str1.bytes_value.encoding == str2.bytes_value.encoding:
bytes_value = bytes_literal(
str1.bytes_value + str2.bytes_value,
str1.bytes_value.encoding)
string_value = EncodedString(node.constant_result)
return ExprNodes.UnicodeNode(
str1.pos, value=string_value, constant_result=node.constant_result, bytes_value=bytes_value)
elif isinstance(str1, ExprNodes.BytesNode) and isinstance(str2, ExprNodes.BytesNode):
if str1.value.encoding == str2.value.encoding:
bytes_value = bytes_literal(node.constant_result, str1.value.encoding)
return ExprNodes.BytesNode(str1.pos, value=bytes_value, constant_result=node.constant_result)
# all other combinations are rather complicated
# to get right in Py2/3: encodings, unicode escapes, ...
return self.visit_BinopNode(node)
def visit_MulNode(self, node):
self._calculate_const(node)
if node.operand1.is_sequence_constructor:
return self._calculate_constant_seq(node, node.operand1, node.operand2)
if isinstance(node.operand1, ExprNodes.IntNode) and \
node.operand2.is_sequence_constructor:
return self._calculate_constant_seq(node, node.operand2, node.operand1)
if node.operand1.is_string_literal:
return self._multiply_string(node, node.operand1, node.operand2)
elif node.operand2.is_string_literal:
return self._multiply_string(node, node.operand2, node.operand1)
return self.visit_BinopNode(node)
def _multiply_string(self, node, string_node, multiplier_node):
multiplier = multiplier_node.constant_result
if not isinstance(multiplier, _py_int_types):
return node
if not (node.has_constant_result() and isinstance(node.constant_result, _py_string_types)):
return node
if len(node.constant_result) > 256:
# Too long for static creation, leave it to runtime. (-> arbitrary limit)
return node
build_string = encoded_string
if isinstance(string_node, ExprNodes.BytesNode):
build_string = bytes_literal
elif isinstance(string_node, ExprNodes.StringNode):
if string_node.unicode_value is not None:
string_node.unicode_value = encoded_string(
string_node.unicode_value * multiplier,
string_node.unicode_value.encoding)
elif isinstance(string_node, ExprNodes.UnicodeNode):
if string_node.bytes_value is not None:
string_node.bytes_value = bytes_literal(
string_node.bytes_value * multiplier,
string_node.bytes_value.encoding)
else:
assert False, "unknown string node type: %s" % type(string_node)
string_node.value = build_string(
string_node.value * multiplier,
string_node.value.encoding)
return string_node
def _calculate_constant_seq(self, node, sequence_node, factor):
if factor.constant_result != 1 and sequence_node.args:
if isinstance(factor.constant_result, _py_int_types) and factor.constant_result <= 0:
del sequence_node.args[:]
sequence_node.mult_factor = None
elif sequence_node.mult_factor is not None:
if (isinstance(factor.constant_result, _py_int_types) and
isinstance(sequence_node.mult_factor.constant_result, _py_int_types)):
value = sequence_node.mult_factor.constant_result * factor.constant_result
sequence_node.mult_factor = ExprNodes.IntNode(
sequence_node.mult_factor.pos,
value=str(value), constant_result=value)
else:
# don't know if we can combine the factors, so don't
return self.visit_BinopNode(node)
else:
sequence_node.mult_factor = factor
return sequence_node
def visit_ModNode(self, node):
self.visitchildren(node)
if isinstance(node.operand1, ExprNodes.UnicodeNode) and isinstance(node.operand2, ExprNodes.TupleNode):
if not node.operand2.mult_factor:
fstring = self._build_fstring(node.operand1.pos, node.operand1.value, node.operand2.args)
if fstring is not None:
return fstring
return self.visit_BinopNode(node)
_parse_string_format_regex = (
u'(%(?:' # %...
u'(?:[0-9]+|[ ])?' # width (optional) or space prefix fill character (optional)
u'(?:[.][0-9]+)?' # precision (optional)
u')?.)' # format type (or something different for unsupported formats)
)
def _build_fstring(self, pos, ustring, format_args):
# Issues formatting warnings instead of errors since we really only catch a few errors by accident.
args = iter(format_args)
substrings = []
can_be_optimised = True
for s in re.split(self._parse_string_format_regex, ustring):
if not s:
continue
if s == u'%%':
substrings.append(ExprNodes.UnicodeNode(pos, value=EncodedString(u'%'), constant_result=u'%'))
continue
if s[0] != u'%':
if s[-1] == u'%':
warning(pos, "Incomplete format: '...%s'" % s[-3:], level=1)
can_be_optimised = False
substrings.append(ExprNodes.UnicodeNode(pos, value=EncodedString(s), constant_result=s))
continue
format_type = s[-1]
try:
arg = next(args)
except StopIteration:
warning(pos, "Too few arguments for format placeholders", level=1)
can_be_optimised = False
break
if format_type in u'srfdoxX':
format_spec = s[1:]
if format_type in u'doxX' and u'.' in format_spec:
# Precision is not allowed for integers in format(), but ok in %-formatting.
can_be_optimised = False
elif format_type in u'rs':
format_spec = format_spec[:-1]
substrings.append(ExprNodes.FormattedValueNode(
arg.pos, value=arg,
conversion_char=format_type if format_type in u'rs' else None,
format_spec=ExprNodes.UnicodeNode(
pos, value=EncodedString(format_spec), constant_result=format_spec)
if format_spec else None,
))
else:
# keep it simple for now ...
can_be_optimised = False
if not can_be_optimised:
# Print all warnings we can find before finally giving up here.
return None
try:
next(args)
except StopIteration: pass
else:
warning(pos, "Too many arguments for format placeholders", level=1)
return None
node = ExprNodes.JoinedStrNode(pos, values=substrings)
return self.visit_JoinedStrNode(node)
def visit_FormattedValueNode(self, node):
self.visitchildren(node)
conversion_char = node.conversion_char or 's'
if isinstance(node.format_spec, ExprNodes.UnicodeNode) and not node.format_spec.value:
node.format_spec = None
if node.format_spec is None and isinstance(node.value, ExprNodes.IntNode):
value = EncodedString(node.value.value)
if value.isdigit():
return ExprNodes.UnicodeNode(node.value.pos, value=value, constant_result=value)
if node.format_spec is None and conversion_char == 's':
value = None
if isinstance(node.value, ExprNodes.UnicodeNode):
value = node.value.value
elif isinstance(node.value, ExprNodes.StringNode):
value = node.value.unicode_value
if value is not None:
return ExprNodes.UnicodeNode(node.value.pos, value=value, constant_result=value)
return node
def visit_JoinedStrNode(self, node):
"""
Clean up after the parser by discarding empty Unicode strings and merging
substring sequences. Empty or single-value join lists are not uncommon
because f-string format specs are always parsed into JoinedStrNodes.
"""
self.visitchildren(node)
unicode_node = ExprNodes.UnicodeNode
values = []
for is_unode_group, substrings in itertools.groupby(node.values, lambda v: isinstance(v, unicode_node)):
if is_unode_group:
substrings = list(substrings)
unode = substrings[0]
if len(substrings) > 1:
value = EncodedString(u''.join(value.value for value in substrings))
unode = ExprNodes.UnicodeNode(unode.pos, value=value, constant_result=value)
# ignore empty Unicode strings
if unode.value:
values.append(unode)
else:
values.extend(substrings)
if not values:
value = EncodedString('')
node = ExprNodes.UnicodeNode(node.pos, value=value, constant_result=value)
elif len(values) == 1:
node = values[0]
elif len(values) == 2:
# reduce to string concatenation
node = ExprNodes.binop_node(node.pos, '+', *values)
else:
node.values = values
return node
def visit_MergedDictNode(self, node):
"""Unpack **args in place if we can."""
self.visitchildren(node)
args = []
items = []
def add(arg):
if arg.is_dict_literal:
if items:
items[0].key_value_pairs.extend(arg.key_value_pairs)
else:
items.append(arg)
elif isinstance(arg, ExprNodes.MergedDictNode):
for child_arg in arg.keyword_args:
add(child_arg)
else:
if items:
args.append(items[0])
del items[:]
args.append(arg)
for arg in node.keyword_args:
add(arg)
if items:
args.append(items[0])
if len(args) == 1:
arg = args[0]
if arg.is_dict_literal or isinstance(arg, ExprNodes.MergedDictNode):
return arg
node.keyword_args[:] = args
self._calculate_const(node)
return node
def visit_MergedSequenceNode(self, node):
"""Unpack *args in place if we can."""
self.visitchildren(node)
is_set = node.type is Builtin.set_type
args = []
values = []
def add(arg):
if (is_set and arg.is_set_literal) or (arg.is_sequence_constructor and not arg.mult_factor):
if values:
values[0].args.extend(arg.args)
else:
values.append(arg)
elif isinstance(arg, ExprNodes.MergedSequenceNode):
for child_arg in arg.args:
add(child_arg)
else:
if values:
args.append(values[0])
del values[:]
args.append(arg)
for arg in node.args:
add(arg)
if values:
args.append(values[0])
if len(args) == 1:
arg = args[0]
if ((is_set and arg.is_set_literal) or
(arg.is_sequence_constructor and arg.type is node.type) or
isinstance(arg, ExprNodes.MergedSequenceNode)):
return arg
node.args[:] = args
self._calculate_const(node)
return node
def visit_SequenceNode(self, node):
"""Unpack *args in place if we can."""
self.visitchildren(node)
args = []
for arg in node.args:
if not arg.is_starred:
args.append(arg)
elif arg.target.is_sequence_constructor and not arg.target.mult_factor:
args.extend(arg.target.args)
else:
args.append(arg)
node.args[:] = args
self._calculate_const(node)
return node
def visit_PrimaryCmpNode(self, node):
# calculate constant partial results in the comparison cascade
self.visitchildren(node, ['operand1'])
left_node = node.operand1
cmp_node = node
while cmp_node is not None:
self.visitchildren(cmp_node, ['operand2'])
right_node = cmp_node.operand2
cmp_node.constant_result = not_a_constant
if left_node.has_constant_result() and right_node.has_constant_result():
try:
cmp_node.calculate_cascaded_constant_result(left_node.constant_result)
except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError):
pass # ignore all 'normal' errors here => no constant result
left_node = right_node
cmp_node = cmp_node.cascade
if not node.cascade:
if node.has_constant_result():
return self._bool_node(node, node.constant_result)
return node
# collect partial cascades: [[value, CmpNode...], [value, CmpNode, ...], ...]
cascades = [[node.operand1]]
final_false_result = []
def split_cascades(cmp_node):
if cmp_node.has_constant_result():
if not cmp_node.constant_result:
# False => short-circuit
final_false_result.append(self._bool_node(cmp_node, False))
return
else:
# True => discard and start new cascade
cascades.append([cmp_node.operand2])
else:
# not constant => append to current cascade
cascades[-1].append(cmp_node)
if cmp_node.cascade:
split_cascades(cmp_node.cascade)
split_cascades(node)
cmp_nodes = []
for cascade in cascades:
if len(cascade) < 2:
continue
cmp_node = cascade[1]
pcmp_node = ExprNodes.PrimaryCmpNode(
cmp_node.pos,
operand1=cascade[0],
operator=cmp_node.operator,
operand2=cmp_node.operand2,
constant_result=not_a_constant)
cmp_nodes.append(pcmp_node)
last_cmp_node = pcmp_node
for cmp_node in cascade[2:]:
last_cmp_node.cascade = cmp_node
last_cmp_node = cmp_node
last_cmp_node.cascade = None
if final_false_result:
# last cascade was constant False
cmp_nodes.append(final_false_result[0])
elif not cmp_nodes:
# only constants, but no False result
return self._bool_node(node, True)
node = cmp_nodes[0]
if len(cmp_nodes) == 1:
if node.has_constant_result():
return self._bool_node(node, node.constant_result)
else:
for cmp_node in cmp_nodes[1:]:
node = ExprNodes.BoolBinopNode(
node.pos,
operand1=node,
operator='and',
operand2=cmp_node,
constant_result=not_a_constant)
return node
def visit_CondExprNode(self, node):
self._calculate_const(node)
if not node.test.has_constant_result():
return node
if node.test.constant_result:
return node.true_val
else:
return node.false_val
def visit_IfStatNode(self, node):
self.visitchildren(node)
# eliminate dead code based on constant condition results
if_clauses = []
for if_clause in node.if_clauses:
condition = if_clause.condition
if condition.has_constant_result():
if condition.constant_result:
# always true => subsequent clauses can safely be dropped
node.else_clause = if_clause.body
break
# else: false => drop clause
else:
# unknown result => normal runtime evaluation
if_clauses.append(if_clause)
if if_clauses:
node.if_clauses = if_clauses
return node
elif node.else_clause:
return node.else_clause
else:
return Nodes.StatListNode(node.pos, stats=[])
def visit_SliceIndexNode(self, node):
self._calculate_const(node)
# normalise start/stop values
if node.start is None or node.start.constant_result is None:
start = node.start = None
else:
start = node.start.constant_result
if node.stop is None or node.stop.constant_result is None:
stop = node.stop = None
else:
stop = node.stop.constant_result
# cut down sliced constant sequences
if node.constant_result is not not_a_constant:
base = node.base
if base.is_sequence_constructor and base.mult_factor is None:
base.args = base.args[start:stop]
return base
elif base.is_string_literal:
base = base.as_sliced_node(start, stop)
if base is not None:
return base
return node
def visit_ComprehensionNode(self, node):
self.visitchildren(node)
if isinstance(node.loop, Nodes.StatListNode) and not node.loop.stats:
# loop was pruned already => transform into literal
if node.type is Builtin.list_type:
return ExprNodes.ListNode(
node.pos, args=[], constant_result=[])
elif node.type is Builtin.set_type:
return ExprNodes.SetNode(
node.pos, args=[], constant_result=set())
elif node.type is Builtin.dict_type:
return ExprNodes.DictNode(
node.pos, key_value_pairs=[], constant_result={})
return node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
sequence = node.iterator.sequence
if isinstance(sequence, ExprNodes.SequenceNode):
if not sequence.args:
if node.else_clause:
return node.else_clause
else:
# don't break list comprehensions
return Nodes.StatListNode(node.pos, stats=[])
# iterating over a list literal? => tuples are more efficient
if isinstance(sequence, ExprNodes.ListNode):
node.iterator.sequence = sequence.as_tuple()
return node
def visit_WhileStatNode(self, node):
self.visitchildren(node)
if node.condition and node.condition.has_constant_result():
if node.condition.constant_result:
node.condition = None
node.else_clause = None
else:
return node.else_clause
return node
def visit_ExprStatNode(self, node):
self.visitchildren(node)
if not isinstance(node.expr, ExprNodes.ExprNode):
# ParallelRangeTransform does this ...
return node
# drop unused constant expressions
if node.expr.has_constant_result():
return None
return node
# in the future, other nodes can have their own handler method here
# that can replace them with a constant result node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class FinalOptimizePhase(Visitor.EnvTransform, Visitor.NodeRefCleanupMixin):
"""
This visitor handles several commuting optimizations, and is run
just before the C code generation phase.
The optimizations currently implemented in this class are:
- eliminate None assignment and refcounting for first assignment.
- isinstance -> typecheck for cdef types
- eliminate checks for None and/or types that became redundant after tree changes
- eliminate useless string formatting steps
- replace Python function calls that look like method calls by a faster PyMethodCallNode
"""
in_loop = False
def visit_SingleAssignmentNode(self, node):
"""Avoid redundant initialisation of local variables before their
first assignment.
"""
self.visitchildren(node)
if node.first:
lhs = node.lhs
lhs.lhs_of_first_assignment = True
return node
def visit_SimpleCallNode(self, node):
"""
Replace generic calls to isinstance(x, type) by a more efficient type check.
Replace likely Python method calls by a specialised PyMethodCallNode.
"""
self.visitchildren(node)
function = node.function
if function.type.is_cfunction and function.is_name:
if function.name == 'isinstance' and len(node.args) == 2:
type_arg = node.args[1]
if type_arg.type.is_builtin_type and type_arg.type.name == 'type':
cython_scope = self.context.cython_scope
function.entry = cython_scope.lookup('PyObject_TypeCheck')
function.type = function.entry.type
PyTypeObjectPtr = PyrexTypes.CPtrType(cython_scope.lookup('PyTypeObject').type)
node.args[1] = ExprNodes.CastNode(node.args[1], PyTypeObjectPtr)
elif (node.is_temp and function.type.is_pyobject and self.current_directives.get(
"optimize.unpack_method_calls_in_pyinit"
if not self.in_loop and self.current_env().is_module_scope
else "optimize.unpack_method_calls")):
# optimise simple Python methods calls
if isinstance(node.arg_tuple, ExprNodes.TupleNode) and not (
node.arg_tuple.mult_factor or (node.arg_tuple.is_literal and node.arg_tuple.args)):
# simple call, now exclude calls to objects that are definitely not methods
may_be_a_method = True
if function.type is Builtin.type_type:
may_be_a_method = False
elif function.is_attribute:
if function.entry and function.entry.type.is_cfunction:
# optimised builtin method
may_be_a_method = False
elif function.is_name:
entry = function.entry
if entry.is_builtin or entry.type.is_cfunction:
may_be_a_method = False
elif entry.cf_assignments:
# local functions/classes are definitely not methods
non_method_nodes = (ExprNodes.PyCFunctionNode, ExprNodes.ClassNode, ExprNodes.Py3ClassNode)
may_be_a_method = any(
assignment.rhs and not isinstance(assignment.rhs, non_method_nodes)
for assignment in entry.cf_assignments)
if may_be_a_method:
if (node.self and function.is_attribute and
isinstance(function.obj, ExprNodes.CloneNode) and function.obj.arg is node.self):
# function self object was moved into a CloneNode => undo
function.obj = function.obj.arg
node = self.replace(node, ExprNodes.PyMethodCallNode.from_node(
node, function=function, arg_tuple=node.arg_tuple, type=node.type))
return node
def visit_NumPyMethodCallNode(self, node):
# Exclude from replacement above.
self.visitchildren(node)
return node
def visit_PyTypeTestNode(self, node):
"""Remove tests for alternatively allowed None values from
type tests when we know that the argument cannot be None
anyway.
"""
self.visitchildren(node)
if not node.notnone:
if not node.arg.may_be_none():
node.notnone = True
return node
def visit_NoneCheckNode(self, node):
"""Remove None checks from expressions that definitely do not
carry a None value.
"""
self.visitchildren(node)
if not node.arg.may_be_none():
return node.arg
return node
def visit_LoopNode(self, node):
"""Remember when we enter a loop as some expensive optimisations might still be worth it there.
"""
old_val = self.in_loop
self.in_loop = True
self.visitchildren(node)
self.in_loop = old_val
return node
class ConsolidateOverflowCheck(Visitor.CythonTransform):
"""
This class facilitates the sharing of overflow checking among all nodes
of a nested arithmetic expression. For example, given the expression
a*b + c, where a, b, and x are all possibly overflowing ints, the entire
sequence will be evaluated and the overflow bit checked only at the end.
"""
overflow_bit_node = None
def visit_Node(self, node):
if self.overflow_bit_node is not None:
saved = self.overflow_bit_node
self.overflow_bit_node = None
self.visitchildren(node)
self.overflow_bit_node = saved
else:
self.visitchildren(node)
return node
def visit_NumBinopNode(self, node):
if node.overflow_check and node.overflow_fold:
top_level_overflow = self.overflow_bit_node is None
if top_level_overflow:
self.overflow_bit_node = node
else:
node.overflow_bit_node = self.overflow_bit_node
node.overflow_check = False
self.visitchildren(node)
if top_level_overflow:
self.overflow_bit_node = None
else:
self.visitchildren(node)
return node
| ryfeus/lambda-packs | HDF4_H5_NETCDF/source2.7/Cython/Compiler/Optimize.py | Python | mit | 207,837 | [
"VisIt"
] | 503bb869d102d625515decc9a6cf4356006c24f01229b6f33d64f4e90e8540b9 |
# TODO: Determine which tests are valid for GLSAR, and under what conditions
# TODO: Fix issue with constant and GLS
# TODO: GLS: add options Iterative GLS, for iterative fgls if sigma is None
# TODO: GLS: default if sigma is none should be two-step GLS
# TODO: Check nesting when performing model based tests, lr, wald, lm
"""
This module implements standard regression models:
Generalized Least Squares (GLS)
Ordinary Least Squares (OLS)
Weighted Least Squares (WLS)
Generalized Least Squares with autoregressive error terms GLSAR(p)
Models are specified with an endogenous response variable and an
exogenous design matrix and are fit using their `fit` method.
Subclasses that have more complicated covariance matrices
should write over the 'whiten' method as the fit method
prewhitens the response by calling 'whiten'.
General reference for regression models:
D. C. Montgomery and E.A. Peck. "Introduction to Linear Regression
Analysis." 2nd. Ed., Wiley, 1992.
Econometrics references for regression models:
R. Davidson and J.G. MacKinnon. "Econometric Theory and Methods," Oxford,
2004.
W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
"""
from statsmodels.compat.python import lrange, lzip
from statsmodels.compat.pandas import Appender
import numpy as np
from scipy.linalg import toeplitz
from scipy import stats
from scipy import optimize
from statsmodels.tools.tools import pinv_extended
from statsmodels.tools.decorators import (cache_readonly,
cache_writable)
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.emplike.elregress import _ELRegOpts
import warnings
from statsmodels.tools.sm_exceptions import InvalidTestWarning
from statsmodels.tools.validation import string_like
# need import in module instead of lazily to copy `__doc__`
from statsmodels.regression._prediction import PredictionResults
from . import _prediction as pred
__docformat__ = 'restructuredtext en'
__all__ = ['GLS', 'WLS', 'OLS', 'GLSAR', 'PredictionResults',
'RegressionResultsWrapper']
_fit_regularized_doc =\
r"""
Return a regularized fit to a linear regression model.
Parameters
----------
method : str
Either 'elastic_net' or 'sqrt_lasso'.
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is a
ridge fit, if 1 it is a lasso fit.
start_params : array_like
Starting values for ``params``.
profile_scale : bool
If True the penalized fit is computed using the profile
(concentrated) log-likelihood for the Gaussian model.
Otherwise the fit uses the residual sum of squares.
refit : bool
If True, the model is refit using only the variables that
have non-zero coefficients in the regularized fit. The
refitted model is not regularized.
**kwargs
Additional keyword arguments that contain information used when
constructing a model using the formula interface.
Returns
-------
statsmodels.base.elastic_net.RegularizedResults
The regularized results.
Notes
-----
The elastic net uses a combination of L1 and L2 penalties.
The implementation closely follows the glmnet package in R.
The function that is minimized is:
.. math::
0.5*RSS/n + alpha*((1-L1\_wt)*|params|_2^2/2 + L1\_wt*|params|_1)
where RSS is the usual regression sum of squares, n is the
sample size, and :math:`|*|_1` and :math:`|*|_2` are the L1 and L2
norms.
For WLS and GLS, the RSS is calculated using the whitened endog and
exog data.
Post-estimation results are based on the same data used to
select variables, hence may be subject to overfitting biases.
The elastic_net method uses the following keyword arguments:
maxiter : int
Maximum number of iterations
cnvrg_tol : float
Convergence threshold for line searches
zero_tol : float
Coefficients below this threshold are treated as zero.
The square root lasso approach is a variation of the Lasso
that is largely self-tuning (the optimal tuning parameter
does not depend on the standard deviation of the regression
errors). If the errors are Gaussian, the tuning parameter
can be taken to be
alpha = 1.1 * np.sqrt(n) * norm.ppf(1 - 0.05 / (2 * p))
where n is the sample size and p is the number of predictors.
The square root lasso uses the following keyword arguments:
zero_tol : float
Coefficients below this threshold are treated as zero.
The cvxopt module is required to estimate model using the square root
lasso.
References
----------
.. [*] Friedman, Hastie, Tibshirani (2008). Regularization paths for
generalized linear models via coordinate descent. Journal of
Statistical Software 33(1), 1-22 Feb 2010.
.. [*] A Belloni, V Chernozhukov, L Wang (2011). Square-root Lasso:
pivotal recovery of sparse signals via conic programming.
Biometrika 98(4), 791-806. https://arxiv.org/pdf/1009.5689.pdf
"""
def _get_sigma(sigma, nobs):
"""
Returns sigma (matrix, nobs by nobs) for GLS and the inverse of its
Cholesky decomposition. Handles dimensions and checks integrity.
If sigma is None, returns None, None. Otherwise returns sigma,
cholsigmainv.
"""
if sigma is None:
return None, None
sigma = np.asarray(sigma).squeeze()
if sigma.ndim == 0:
sigma = np.repeat(sigma, nobs)
if sigma.ndim == 1:
if sigma.shape != (nobs,):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = 1/np.sqrt(sigma)
else:
if sigma.shape != (nobs, nobs):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = np.linalg.cholesky(np.linalg.inv(sigma)).T
return sigma, cholsigmainv
class RegressionModel(base.LikelihoodModel):
"""
Base class for linear regression models. Should not be directly called.
Intended for subclassing.
"""
def __init__(self, endog, exog, **kwargs):
super(RegressionModel, self).__init__(endog, exog, **kwargs)
self._data_attr.extend(['pinv_wexog', 'weights'])
def initialize(self):
"""Initialize model components."""
self.wexog = self.whiten(self.exog)
self.wendog = self.whiten(self.endog)
# overwrite nobs from class Model:
self.nobs = float(self.wexog.shape[0])
self._df_model = None
self._df_resid = None
self.rank = None
@property
def df_model(self):
"""
The model degree of freedom.
The dof is defined as the rank of the regressor matrix minus 1 if a
constant is included.
"""
if self._df_model is None:
if self.rank is None:
self.rank = np.linalg.matrix_rank(self.exog)
self._df_model = float(self.rank - self.k_constant)
return self._df_model
@df_model.setter
def df_model(self, value):
self._df_model = value
@property
def df_resid(self):
"""
The residual degree of freedom.
The dof is defined as the number of observations minus the rank of
the regressor matrix.
"""
if self._df_resid is None:
if self.rank is None:
self.rank = np.linalg.matrix_rank(self.exog)
self._df_resid = self.nobs - self.rank
return self._df_resid
@df_resid.setter
def df_resid(self, value):
self._df_resid = value
def whiten(self, x):
"""
Whiten method that must be overwritten by individual models.
Parameters
----------
x : array_like
Data to be whitened.
"""
raise NotImplementedError("Subclasses must implement.")
def fit(self, method="pinv", cov_type='nonrobust', cov_kwds=None,
use_t=None, **kwargs):
"""
Full fit of the model.
The results include an estimate of covariance matrix, (whitened)
residuals and an estimate of scale.
Parameters
----------
method : str, optional
Can be "pinv", "qr". "pinv" uses the Moore-Penrose pseudoinverse
to solve the least squares problem. "qr" uses the QR
factorization.
cov_type : str, optional
See `regression.linear_model.RegressionResults` for a description
of the available covariance estimators.
cov_kwds : list or None, optional
See `linear_model.RegressionResults.get_robustcov_results` for a
description required keywords for alternative covariance
estimators.
use_t : bool, optional
Flag indicating to use the Student's t distribution when computing
p-values. Default behavior depends on cov_type. See
`linear_model.RegressionResults.get_robustcov_results` for
implementation details.
**kwargs
Additional keyword arguments that contain information used when
constructing a model using the formula interface.
Returns
-------
RegressionResults
The model estimation results.
See Also
--------
RegressionResults
The results container.
RegressionResults.get_robustcov_results
A method to change the covariance estimator used when fitting the
model.
Notes
-----
The fit method uses the pseudoinverse of the design/exogenous variables
to solve the least squares minimization.
"""
if method == "pinv":
if not (hasattr(self, 'pinv_wexog') and
hasattr(self, 'normalized_cov_params') and
hasattr(self, 'rank')):
self.pinv_wexog, singular_values = pinv_extended(self.wexog)
self.normalized_cov_params = np.dot(
self.pinv_wexog, np.transpose(self.pinv_wexog))
# Cache these singular values for use later.
self.wexog_singular_values = singular_values
self.rank = np.linalg.matrix_rank(np.diag(singular_values))
beta = np.dot(self.pinv_wexog, self.wendog)
elif method == "qr":
if not (hasattr(self, 'exog_Q') and
hasattr(self, 'exog_R') and
hasattr(self, 'normalized_cov_params') and
hasattr(self, 'rank')):
Q, R = np.linalg.qr(self.wexog)
self.exog_Q, self.exog_R = Q, R
self.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
# Cache singular values from R.
self.wexog_singular_values = np.linalg.svd(R, 0, 0)
self.rank = np.linalg.matrix_rank(R)
else:
Q, R = self.exog_Q, self.exog_R
# used in ANOVA
self.effects = effects = np.dot(Q.T, self.wendog)
beta = np.linalg.solve(R, effects)
else:
raise ValueError('method has to be "pinv" or "qr"')
if self._df_model is None:
self._df_model = float(self.rank - self.k_constant)
if self._df_resid is None:
self.df_resid = self.nobs - self.rank
if isinstance(self, OLS):
lfit = OLSResults(
self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t)
else:
lfit = RegressionResults(
self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t,
**kwargs)
return RegressionResultsWrapper(lfit)
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
params : array_like
Parameters of a linear model.
exog : array_like, optional
Design / exogenous data. Model exog is used if None.
Returns
-------
array_like
An array of fitted values.
Notes
-----
If the model has not yet been fit, params is not optional.
"""
# JP: this does not look correct for GLMAR
# SS: it needs its own predict method
if exog is None:
exog = self.exog
return np.dot(exog, params)
def get_distribution(self, params, scale, exog=None, dist_class=None):
"""
Construct a random number generator for the predictive distribution.
Parameters
----------
params : array_like
The model parameters (regression coefficients).
scale : scalar
The variance parameter.
exog : array_like
The predictor variable matrix.
dist_class : class
A random number generator class. Must take 'loc' and 'scale'
as arguments and return a random number generator implementing
an ``rvs`` method for simulating random values. Defaults to normal.
Returns
-------
gen
Frozen random number generator object with mean and variance
determined by the fitted linear model. Use the ``rvs`` method
to generate random values.
Notes
-----
Due to the behavior of ``scipy.stats.distributions objects``,
the returned random number generator must be called with
``gen.rvs(n)`` where ``n`` is the number of observations in
the data set used to fit the model. If any other value is
used for ``n``, misleading results will be produced.
"""
fit = self.predict(params, exog)
if dist_class is None:
from scipy.stats.distributions import norm
dist_class = norm
gen = dist_class(loc=fit, scale=np.sqrt(scale))
return gen
class GLS(RegressionModel):
__doc__ = r"""
Generalized Least Squares
%(params)s
sigma : scalar or array
The array or scalar `sigma` is the weighting matrix of the covariance.
The default is None for no scaling. If `sigma` is a scalar, it is
assumed that `sigma` is an n x n diagonal matrix with the given
scalar, `sigma` as the value of each diagonal element. If `sigma`
is an n-length vector, then `sigma` is assumed to be a diagonal
matrix with the given `sigma` on the diagonal. This should be the
same as WLS.
%(extra_params)s
Attributes
----------
pinv_wexog : ndarray
`pinv_wexog` is the p x n Moore-Penrose pseudoinverse of `wexog`.
cholsimgainv : ndarray
The transpose of the Cholesky decomposition of the pseudoinverse.
df_model : float
p - 1, where p is the number of regressors including the intercept.
of freedom.
df_resid : float
Number of observations n less the number of parameters p.
llf : float
The value of the likelihood function of the fitted model.
nobs : float
The number of observations n.
normalized_cov_params : ndarray
p x p array :math:`(X^{T}\Sigma^{-1}X)^{-1}`
results : RegressionResults instance
A property that returns the RegressionResults class if fit.
sigma : ndarray
`sigma` is the n x n covariance structure of the error terms.
wexog : ndarray
Design matrix whitened by `cholsigmainv`
wendog : ndarray
Response variable whitened by `cholsigmainv`
See Also
--------
WLS : Fit a linear model using Weighted Least Squares.
OLS : Fit a linear model using Ordinary Least Squares.
Notes
-----
If sigma is a function of the data making one of the regressors
a constant, then the current postestimation statistics will not be correct.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load(as_pandas=False)
>>> data.exog = sm.add_constant(data.exog)
>>> ols_resid = sm.OLS(data.endog, data.exog).fit().resid
>>> res_fit = sm.OLS(ols_resid[1:], ols_resid[:-1]).fit()
>>> rho = res_fit.params
`rho` is a consistent estimator of the correlation of the residuals from
an OLS fit of the longley data. It is assumed that this is the true rho
of the AR process data.
>>> from scipy.linalg import toeplitz
>>> order = toeplitz(np.arange(16))
>>> sigma = rho**order
`sigma` is an n x n matrix of the autocorrelation structure of the
data.
>>> gls_model = sm.GLS(data.endog, data.exog, sigma=sigma)
>>> gls_results = gls_model.fit()
>>> print(gls_results.summary())
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, sigma=None, missing='none', hasconst=None,
**kwargs):
# TODO: add options igls, for iterative fgls if sigma is None
# TODO: default if sigma is none should be two-step GLS
sigma, cholsigmainv = _get_sigma(sigma, len(endog))
super(GLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, sigma=sigma,
cholsigmainv=cholsigmainv, **kwargs)
# store attribute names for data arrays
self._data_attr.extend(['sigma', 'cholsigmainv'])
def whiten(self, x):
"""
GLS whiten method.
Parameters
----------
x : array_like
Data to be whitened.
Returns
-------
ndarray
The value np.dot(cholsigmainv,X).
See Also
--------
GLS : Fit a linear model using Generalized Least Squares.
"""
x = np.asarray(x)
if self.sigma is None or self.sigma.shape == ():
return x
elif self.sigma.ndim == 1:
if x.ndim == 1:
return x * self.cholsigmainv
else:
return x * self.cholsigmainv[:, None]
else:
return np.dot(self.cholsigmainv, x)
def loglike(self, params):
r"""
Compute the value of the Gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `endog`.
Parameters
----------
params : array_like
The model parameters.
Returns
-------
float
The value of the log-likelihood function for a GLS Model.
Notes
-----
The log-likelihood function for the normal distribution is
.. math:: -\frac{n}{2}\log\left(\left(Y-\hat{Y}\right)^{\prime}
\left(Y-\hat{Y}\right)\right)
-\frac{n}{2}\left(1+\log\left(\frac{2\pi}{n}\right)\right)
-\frac{1}{2}\log\left(\left|\Sigma\right|\right)
Y and Y-hat are whitened.
"""
# TODO: combine this with OLS/WLS loglike and add _det_sigma argument
nobs2 = self.nobs / 2.0
SSR = np.sum((self.wendog - np.dot(self.wexog, params))**2, axis=0)
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with likelihood constant
if np.any(self.sigma):
# FIXME: robust-enough check? unneeded if _det_sigma gets defined
if self.sigma.ndim == 2:
det = np.linalg.slogdet(self.sigma)
llf -= .5*det[1]
else:
llf -= 0.5*np.sum(np.log(self.sigma))
# with error covariance matrix
return llf
def hessian_factor(self, params, scale=None, observed=True):
"""
Compute weights for calculating Hessian.
Parameters
----------
params : ndarray
The parameter at which Hessian is evaluated.
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
ndarray
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`.
"""
if self.sigma is None or self.sigma.shape == ():
return np.ones(self.exog.shape[0])
elif self.sigma.ndim == 1:
return self.cholsigmainv
else:
return np.diag(self.cholsigmainv)
@Appender(_fit_regularized_doc)
def fit_regularized(self, method="elastic_net", alpha=0.,
L1_wt=1., start_params=None, profile_scale=False,
refit=False, **kwargs):
if not np.isscalar(alpha):
alpha = np.asarray(alpha)
# Need to adjust since RSS/n term in elastic net uses nominal
# n in denominator
if self.sigma is not None:
alpha = alpha * np.sum(1 / np.diag(self.sigma)) / len(self.endog)
rslt = OLS(self.wendog, self.wexog).fit_regularized(
method=method, alpha=alpha,
L1_wt=L1_wt,
start_params=start_params,
profile_scale=profile_scale,
refit=refit, **kwargs)
from statsmodels.base.elastic_net import (
RegularizedResults, RegularizedResultsWrapper)
rrslt = RegularizedResults(self, rslt.params)
return RegularizedResultsWrapper(rrslt)
class WLS(RegressionModel):
__doc__ = """
Weighted Least Squares
The weights are presumed to be (proportional to) the inverse of
the variance of the observations. That is, if the variables are
to be transformed by 1/sqrt(W) you must supply weights = 1/W.
%(params)s
weights : array_like, optional
A 1d array of weights. If you supply 1/W then the variables are
pre- multiplied by 1/sqrt(W). If no weights are supplied the
default value is 1 and WLS results are the same as OLS.
%(extra_params)s
Attributes
----------
weights : ndarray
The stored weights supplied as an argument.
See Also
--------
GLS : Fit a linear model using Generalized Least Squares.
OLS : Fit a linear model using Ordinary Least Squares.
Notes
-----
If the weights are a function of the data, then the post estimation
statistics such as fvalue and mse_model might not be correct, as the
package does not yet support no-constant regression.
Examples
--------
>>> import statsmodels.api as sm
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> wls_model = sm.WLS(Y,X, weights=list(range(1,8)))
>>> results = wls_model.fit()
>>> results.params
array([ 2.91666667, 0.0952381 ])
>>> results.tvalues
array([ 2.0652652 , 0.35684428])
>>> print(results.t_test([1, 0]))
<T test: effect=array([ 2.91666667]), sd=array([[ 1.41224801]]), t=array([[ 2.0652652]]), p=array([[ 0.04690139]]), df_denom=5>
>>> print(results.f_test([0, 1]))
<F test: F=array([[ 0.12733784]]), p=[[ 0.73577409]], df_denom=5, df_num=1>
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, weights=1., missing='none', hasconst=None,
**kwargs):
weights = np.array(weights)
if weights.shape == ():
if (missing == 'drop' and 'missing_idx' in kwargs and
kwargs['missing_idx'] is not None):
# patsy may have truncated endog
weights = np.repeat(weights, len(kwargs['missing_idx']))
else:
weights = np.repeat(weights, len(endog))
# handle case that endog might be of len == 1
if len(weights) == 1:
weights = np.array([weights.squeeze()])
else:
weights = weights.squeeze()
super(WLS, self).__init__(endog, exog, missing=missing,
weights=weights, hasconst=hasconst, **kwargs)
nobs = self.exog.shape[0]
weights = self.weights
if weights.size != nobs and weights.shape[0] != nobs:
raise ValueError('Weights must be scalar or same length as design')
def whiten(self, x):
"""
Whitener for WLS model, multiplies each column by sqrt(self.weights).
Parameters
----------
x : array_like
Data to be whitened.
Returns
-------
array_like
The whitened values sqrt(weights)*X.
"""
x = np.asarray(x)
if x.ndim == 1:
return x * np.sqrt(self.weights)
elif x.ndim == 2:
return np.sqrt(self.weights)[:, None] * x
def loglike(self, params):
r"""
Compute the value of the gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `Y`.
Parameters
----------
params : array_like
The parameter estimates.
Returns
-------
float
The value of the log-likelihood function for a WLS Model.
Notes
--------
.. math:: -\frac{n}{2}\log SSR
-\frac{n}{2}\left(1+\log\left(\frac{2\pi}{n}\right)\right)
-\frac{1}{2}\log\left(\left|W\right|\right)
where :math:`W` is a diagonal weight matrix matrix and
:math:`SSR=\left(Y-\hat{Y}\right)^\prime W \left(Y-\hat{Y}\right)` is
the sum of the squared weighted residuals.
"""
nobs2 = self.nobs / 2.0
SSR = np.sum((self.wendog - np.dot(self.wexog, params))**2, axis=0)
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with constant
llf += 0.5 * np.sum(np.log(self.weights))
return llf
def hessian_factor(self, params, scale=None, observed=True):
"""
Compute the weights for calculating the Hessian.
Parameters
----------
params : ndarray
The parameter at which Hessian is evaluated.
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
ndarray
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`.
"""
return self.weights
@Appender(_fit_regularized_doc)
def fit_regularized(self, method="elastic_net", alpha=0.,
L1_wt=1., start_params=None, profile_scale=False,
refit=False, **kwargs):
# Docstring attached below
if not np.isscalar(alpha):
alpha = np.asarray(alpha)
# Need to adjust since RSS/n in elastic net uses nominal n in
# denominator
alpha = alpha * np.sum(self.weights) / len(self.weights)
rslt = OLS(self.wendog, self.wexog).fit_regularized(
method=method, alpha=alpha,
L1_wt=L1_wt,
start_params=start_params,
profile_scale=profile_scale,
refit=refit, **kwargs)
from statsmodels.base.elastic_net import (
RegularizedResults, RegularizedResultsWrapper)
rrslt = RegularizedResults(self, rslt.params)
return RegularizedResultsWrapper(rrslt)
class OLS(WLS):
__doc__ = """
Ordinary Least Squares
%(params)s
%(extra_params)s
Attributes
----------
weights : scalar
Has an attribute weights = array(1.0) due to inheritance from WLS.
See Also
--------
WLS : Fit a linear model using Weighted Least Squares.
GLS : Fit a linear model using Generalized Least Squares.
Notes
-----
No constant is added by the model unless you are using formulas.
Examples
--------
>>> import statsmodels.api as sm
>>> import numpy as np
>>> duncan_prestige = sm.datasets.get_rdataset("Duncan", "carData")
>>> Y = duncan_prestige.data['income']
>>> X = duncan_prestige.data['education']
>>> X = sm.add_constant(X)
>>> model = sm.OLS(Y,X)
>>> results = model.fit()
>>> results.params
const 10.603498
education 0.594859
dtype: float64
>>> results.tvalues
const 2.039813
education 6.892802
dtype: float64
>>> print(results.t_test([1, 0]))
Test for Constraints
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
c0 10.6035 5.198 2.040 0.048 0.120 21.087
==============================================================================
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[159.63031026]]), p=1.2607168903696672e-20, df_denom=43, df_num=2>
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog=None, missing='none', hasconst=None,
**kwargs):
super(OLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, **kwargs)
if "weights" in self._init_keys:
self._init_keys.remove("weights")
def loglike(self, params, scale=None):
"""
The likelihood function for the OLS model.
Parameters
----------
params : array_like
The coefficients with which to estimate the log-likelihood.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
float
The likelihood function evaluated at params.
"""
nobs2 = self.nobs / 2.0
nobs = float(self.nobs)
resid = self.endog - np.dot(self.exog, params)
if hasattr(self, 'offset'):
resid -= self.offset
ssr = np.sum(resid**2)
if scale is None:
# profile log likelihood
llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2
else:
# log-likelihood
llf = -nobs2 * np.log(2 * np.pi * scale) - ssr / (2*scale)
return llf
def whiten(self, x):
"""
OLS model whitener does nothing.
Parameters
----------
x : array_like
Data to be whitened.
Returns
-------
array_like
The input array unmodified.
See Also
--------
OLS : Fit a linear model using Ordinary Least Squares.
"""
return x
def score(self, params, scale=None):
"""
Evaluate the score function at a given point.
The score corresponds to the profile (concentrated)
log-likelihood in which the scale parameter has been profiled
out.
Parameters
----------
params : array_like
The parameter vector at which the score function is
computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
ndarray
The score vector.
"""
if not hasattr(self, "_wexog_xprod"):
self._setup_score_hess()
xtxb = np.dot(self._wexog_xprod, params)
sdr = -self._wexog_x_wendog + xtxb
if scale is None:
ssr = self._wendog_xprod - 2 * np.dot(self._wexog_x_wendog.T,
params)
ssr += np.dot(params, xtxb)
return -self.nobs * sdr / ssr
else:
return -sdr / scale
def _setup_score_hess(self):
y = self.wendog
if hasattr(self, 'offset'):
y = y - self.offset
self._wendog_xprod = np.sum(y * y)
self._wexog_xprod = np.dot(self.wexog.T, self.wexog)
self._wexog_x_wendog = np.dot(self.wexog.T, y)
def hessian(self, params, scale=None):
"""
Evaluate the Hessian function at a given point.
Parameters
----------
params : array_like
The parameter vector at which the Hessian is computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
ndarray
The Hessian matrix.
"""
if not hasattr(self, "_wexog_xprod"):
self._setup_score_hess()
xtxb = np.dot(self._wexog_xprod, params)
if scale is None:
ssr = self._wendog_xprod - 2 * np.dot(self._wexog_x_wendog.T,
params)
ssr += np.dot(params, xtxb)
ssrp = -2*self._wexog_x_wendog + 2*xtxb
hm = self._wexog_xprod / ssr - np.outer(ssrp, ssrp) / ssr**2
return -self.nobs * hm / 2
else:
return -self._wexog_xprod / scale
def hessian_factor(self, params, scale=None, observed=True):
"""
Calculate the weights for the Hessian.
Parameters
----------
params : ndarray
The parameter at which Hessian is evaluated.
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
ndarray
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`.
"""
return np.ones(self.exog.shape[0])
@Appender(_fit_regularized_doc)
def fit_regularized(self, method="elastic_net", alpha=0.,
L1_wt=1., start_params=None, profile_scale=False,
refit=False, **kwargs):
# In the future we could add support for other penalties, e.g. SCAD.
if method not in ("elastic_net", "sqrt_lasso"):
msg = "Unknown method '%s' for fit_regularized" % method
raise ValueError(msg)
# Set default parameters.
defaults = {"maxiter": 50, "cnvrg_tol": 1e-10,
"zero_tol": 1e-8}
defaults.update(kwargs)
if method == "sqrt_lasso":
from statsmodels.base.elastic_net import (
RegularizedResults, RegularizedResultsWrapper
)
params = self._sqrt_lasso(alpha, refit, defaults["zero_tol"])
results = RegularizedResults(self, params)
return RegularizedResultsWrapper(results)
from statsmodels.base.elastic_net import fit_elasticnet
if L1_wt == 0:
return self._fit_ridge(alpha)
# If a scale parameter is passed in, the non-profile
# likelihood (residual sum of squares divided by -2) is used,
# otherwise the profile likelihood is used.
if profile_scale:
loglike_kwds = {}
score_kwds = {}
hess_kwds = {}
else:
loglike_kwds = {"scale": 1}
score_kwds = {"scale": 1}
hess_kwds = {"scale": 1}
return fit_elasticnet(self, method=method,
alpha=alpha,
L1_wt=L1_wt,
start_params=start_params,
loglike_kwds=loglike_kwds,
score_kwds=score_kwds,
hess_kwds=hess_kwds,
refit=refit,
check_step=False,
**defaults)
def _sqrt_lasso(self, alpha, refit, zero_tol):
try:
import cvxopt
except ImportError:
msg = 'sqrt_lasso fitting requires the cvxopt module'
raise ValueError(msg)
n = len(self.endog)
p = self.exog.shape[1]
h0 = cvxopt.matrix(0., (2*p+1, 1))
h1 = cvxopt.matrix(0., (n+1, 1))
h1[1:, 0] = cvxopt.matrix(self.endog, (n, 1))
G0 = cvxopt.spmatrix([], [], [], (2*p+1, 2*p+1))
for i in range(1, 2*p+1):
G0[i, i] = -1
G1 = cvxopt.matrix(0., (n+1, 2*p+1))
G1[0, 0] = -1
G1[1:, 1:p+1] = self.exog
G1[1:, p+1:] = -self.exog
c = cvxopt.matrix(alpha / n, (2*p + 1, 1))
c[0] = 1 / np.sqrt(n)
from cvxopt import solvers
solvers.options["show_progress"] = False
rslt = solvers.socp(c, Gl=G0, hl=h0, Gq=[G1], hq=[h1])
x = np.asarray(rslt['x']).flat
bp = x[1:p+1]
bn = x[p+1:]
params = bp - bn
if not refit:
return params
ii = np.flatnonzero(np.abs(params) > zero_tol)
rfr = OLS(self.endog, self.exog[:, ii]).fit()
params *= 0
params[ii] = rfr.params
return params
def _fit_ridge(self, alpha):
"""
Fit a linear model using ridge regression.
Parameters
----------
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
Notes
-----
Equivalent to fit_regularized with L1_wt = 0 (but implemented
more efficiently).
"""
u, s, vt = np.linalg.svd(self.exog, 0)
v = vt.T
q = np.dot(u.T, self.endog) * s
s2 = s * s
if np.isscalar(alpha):
sd = s2 + alpha * self.nobs
params = q / sd
params = np.dot(v, params)
else:
alpha = np.asarray(alpha)
vtav = self.nobs * np.dot(vt, alpha[:, None] * v)
d = np.diag(vtav) + s2
np.fill_diagonal(vtav, d)
r = np.linalg.solve(vtav, q)
params = np.dot(v, r)
from statsmodels.base.elastic_net import RegularizedResults
return RegularizedResults(self, params)
class GLSAR(GLS):
__doc__ = """
Generalized Least Squares with AR covariance structure
%(params)s
rho : int
The order of the autoregressive covariance.
%(extra_params)s
Notes
-----
GLSAR is considered to be experimental.
The linear autoregressive process of order p--AR(p)--is defined as:
TODO
Examples
--------
>>> import statsmodels.api as sm
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> Y = [1,3,4,5,8,10,9]
>>> model = sm.GLSAR(Y, X, rho=2)
>>> for i in range(6):
... results = model.fit()
... print("AR coefficients: {0}".format(model.rho))
... rho, sigma = sm.regression.yule_walker(results.resid,
... order=model.order)
... model = sm.GLSAR(Y, X, rho)
...
AR coefficients: [ 0. 0.]
AR coefficients: [-0.52571491 -0.84496178]
AR coefficients: [-0.6104153 -0.86656458]
AR coefficients: [-0.60439494 -0.857867 ]
AR coefficients: [-0.6048218 -0.85846157]
AR coefficients: [-0.60479146 -0.85841922]
>>> results.params
array([-0.66661205, 1.60850853])
>>> results.tvalues
array([ -2.10304127, 21.8047269 ])
>>> print(results.t_test([1, 0]))
<T test: effect=array([-0.66661205]), sd=array([[ 0.31697526]]), t=array([[-2.10304127]]), p=array([[ 0.06309969]]), df_denom=3>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 1815.23061844]]), p=[[ 0.00002372]], df_denom=3, df_num=2>
Or, equivalently
>>> model2 = sm.GLSAR(Y, X, rho=2)
>>> res = model2.iterative_fit(maxiter=6)
>>> model2.rho
array([-0.60479146, -0.85841922])
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + base._extra_param_doc}
# TODO: Complete docstring
def __init__(self, endog, exog=None, rho=1, missing='none', hasconst=None,
**kwargs):
# this looks strange, interpreting rho as order if it is int
if isinstance(rho, (int, np.integer)):
self.order = int(rho)
self.rho = np.zeros(self.order, np.float64)
else:
self.rho = np.squeeze(np.asarray(rho))
if len(self.rho.shape) not in [0, 1]:
raise ValueError("AR parameters must be a scalar or a vector")
if self.rho.shape == ():
self.rho.shape = (1,)
self.order = self.rho.shape[0]
if exog is None:
# JP this looks wrong, should be a regression on constant
# results for rho estimate now identical to yule-walker on y
# super(AR, self).__init__(endog, add_constant(endog))
super(GLSAR, self).__init__(endog, np.ones((endog.shape[0], 1)),
missing=missing, hasconst=None,
**kwargs)
else:
super(GLSAR, self).__init__(endog, exog, missing=missing,
**kwargs)
def iterative_fit(self, maxiter=3, rtol=1e-4, **kwargs):
"""
Perform an iterative two-stage procedure to estimate a GLS model.
The model is assumed to have AR(p) errors, AR(p) parameters and
regression coefficients are estimated iteratively.
Parameters
----------
maxiter : int, optional
The number of iterations.
rtol : float, optional
Relative tolerance between estimated coefficients to stop the
estimation. Stops if max(abs(last - current) / abs(last)) < rtol.
**kwargs
Additional keyword arguments passed to `fit`.
Returns
-------
RegressionResults
The results computed using an iterative fit.
"""
# TODO: update this after going through example.
converged = False
i = -1 # need to initialize for maxiter < 1 (skip loop)
history = {'params': [], 'rho': [self.rho]}
for i in range(maxiter - 1):
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
results = self.fit()
history['params'].append(results.params)
if i == 0:
last = results.params
else:
diff = np.max(np.abs(last - results.params) / np.abs(last))
if diff < rtol:
converged = True
break
last = results.params
self.rho, _ = yule_walker(results.resid,
order=self.order, df=None)
history['rho'].append(self.rho)
# why not another call to self.initialize
# Use kwarg to insert history
if not converged and maxiter > 0:
# maxiter <= 0 just does OLS
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
# if converged then this is a duplicate fit, because we did not
# update rho
results = self.fit(history=history, **kwargs)
results.iter = i + 1
# add last fit to history, not if duplicate fit
if not converged:
results.history['params'].append(results.params)
results.iter += 1
results.converged = converged
return results
def whiten(self, x):
"""
Whiten a series of columns according to an AR(p) covariance structure.
Whitening using this method drops the initial p observations.
Parameters
----------
x : array_like
The data to be whitened.
Returns
-------
ndarray
The whitened data.
"""
# TODO: notation for AR process
x = np.asarray(x, np.float64)
_x = x.copy()
# the following loops over the first axis, works for 1d and nd
for i in range(self.order):
_x[(i + 1):] = _x[(i + 1):] - self.rho[i] * x[0:-(i + 1)]
return _x[self.order:]
def yule_walker(x, order=1, method="adjusted", df=None, inv=False,
demean=True):
"""
Estimate AR(p) parameters from a sequence using the Yule-Walker equations.
Adjusted or maximum-likelihood estimator (mle)
Parameters
----------
x : array_like
A 1d array.
order : int, optional
The order of the autoregressive process. Default is 1.
method : str, optional
Method can be 'adjusted' or 'mle' and this determines
denominator in estimate of autocorrelation function (ACF) at
lag k. If 'mle', the denominator is n=X.shape[0], if 'adjusted'
the denominator is n-k. The default is adjusted.
df : int, optional
Specifies the degrees of freedom. If `df` is supplied, then it
is assumed the X has `df` degrees of freedom rather than `n`.
Default is None.
inv : bool
If inv is True the inverse of R is also returned. Default is
False.
demean : bool
True, the mean is subtracted from `X` before estimation.
Returns
-------
rho : ndarray
AR(p) coefficients computed using the Yule-Walker method.
sigma : float
The estimate of the residual standard deviation.
See Also
--------
burg : Burg's AR estimator.
Notes
-----
See https://en.wikipedia.org/wiki/Autoregressive_moving_average_model for
further details.
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load(as_pandas=False)
>>> rho, sigma = sm.regression.yule_walker(data.endog, order=4,
... method="mle")
>>> rho
array([ 1.28310031, -0.45240924, -0.20770299, 0.04794365])
>>> sigma
16.808022730464351
"""
# TODO: define R better, look back at notes and technical notes on YW.
# First link here is useful
# http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm
method = string_like(
method, "method", options=("adjusted", "unbiased", "mle")
)
if method == "unbiased":
warnings.warn(
"unbiased is deprecated in factor of adjusted to reflect that the "
"term is adjusting the sample size used in the autocovariance "
"calculation rather than estimating an unbiased autocovariance. "
"After release 0.13, using 'unbiased' will raise.",
FutureWarning,
)
method = "adjusted"
if method not in ("adjusted", "mle"):
raise ValueError("ACF estimation method must be 'adjusted' or 'MLE'")
x = np.array(x, dtype=np.float64)
if demean:
x -= x.mean()
n = df or x.shape[0]
# this handles df_resid ie., n - p
adj_needed = method == "adjusted"
if x.ndim > 1 and x.shape[1] != 1:
raise ValueError("expecting a vector to estimate AR parameters")
r = np.zeros(order+1, np.float64)
r[0] = (x ** 2).sum() / n
for k in range(1, order+1):
r[k] = (x[0:-k] * x[k:]).sum() / (n - k * adj_needed)
R = toeplitz(r[:-1])
rho = np.linalg.solve(R, r[1:])
sigmasq = r[0] - (r[1:]*rho).sum()
if inv:
return rho, np.sqrt(sigmasq), np.linalg.inv(R)
else:
return rho, np.sqrt(sigmasq)
def burg(endog, order=1, demean=True):
"""
Compute Burg's AP(p) parameter estimator.
Parameters
----------
endog : array_like
The endogenous variable.
order : int, optional
Order of the AR. Default is 1.
demean : bool, optional
Flag indicating to subtract the mean from endog before estimation.
Returns
-------
rho : ndarray
The AR(p) coefficients computed using Burg's algorithm.
sigma2 : float
The estimate of the residual variance.
See Also
--------
yule_walker : Estimate AR parameters using the Yule-Walker method.
Notes
-----
AR model estimated includes a constant that is estimated using the sample
mean (see [1]_). This value is not reported.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer.
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load(as_pandas=True)
>>> rho, sigma2 = sm.regression.linear_model.burg(data.endog, order=4)
>>> rho
array([ 1.30934186, -0.48086633, -0.20185982, 0.05501941])
>>> sigma2
271.2467306963966
"""
# Avoid circular imports
from statsmodels.tsa.stattools import levinson_durbin_pacf, pacf_burg
endog = np.squeeze(np.asarray(endog))
if endog.ndim != 1:
raise ValueError('endog must be 1-d or squeezable to 1-d.')
order = int(order)
if order < 1:
raise ValueError('order must be an integer larger than 1')
if demean:
endog = endog - endog.mean()
pacf, sigma = pacf_burg(endog, order, demean=demean)
ar, _ = levinson_durbin_pacf(pacf)
return ar, sigma[-1]
class RegressionResults(base.LikelihoodModelResults):
r"""
This class summarizes the fit of a linear regression model.
It handles the output of contrasts, estimates of covariance, etc.
Parameters
----------
model : RegressionModel
The regression model instance.
params : ndarray
The estimated parameters.
normalized_cov_params : ndarray
The normalized covariance parameters.
scale : float
The estimated scale of the residuals.
cov_type : str
The covariance estimator used in the results.
cov_kwds : dict
Additional keywords used in the covariance specification.
use_t : bool
Flag indicating to use the Student's t in inference.
**kwargs
Additional keyword arguments used to initialize the results.
Attributes
----------
pinv_wexog
See model class docstring for implementation details.
cov_type
Parameter covariance estimator used for standard errors and t-stats.
df_model
Model degrees of freedom. The number of regressors `p`. Does not
include the constant if one is present.
df_resid
Residual degrees of freedom. `n - p - 1`, if a constant is present.
`n - p` if a constant is not included.
het_scale
adjusted squared residuals for heteroscedasticity robust standard
errors. Is only available after `HC#_se` or `cov_HC#` is called.
See HC#_se for more information.
history
Estimation history for iterative estimators.
model
A pointer to the model instance that called fit() or results.
params
The linear coefficients that minimize the least squares
criterion. This is usually called Beta for the classical
linear model.
"""
_cache = {} # needs to be a class attribute for scale setter?
def __init__(self, model, params, normalized_cov_params=None, scale=1.,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
super(RegressionResults, self).__init__(
model, params, normalized_cov_params, scale)
# Keep wresid since needed by predict
self._data_in_cache.remove("wresid")
self._cache = {}
if hasattr(model, 'wexog_singular_values'):
self._wexog_singular_values = model.wexog_singular_values
else:
self._wexog_singular_values = None
self.df_model = model.df_model
self.df_resid = model.df_resid
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {
'description': 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
if use_t is None:
use_t = True # TODO: class default
self.use_t = use_t
else:
if cov_kwds is None:
cov_kwds = {}
if 'use_t' in cov_kwds:
# TODO: we want to get rid of 'use_t' in cov_kwds
use_t_2 = cov_kwds.pop('use_t')
if use_t is None:
use_t = use_t_2
# TODO: warn or not?
self.get_robustcov_results(cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
for key in kwargs:
setattr(self, key, kwargs[key])
def conf_int(self, alpha=.05, cols=None):
"""
Compute the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. The default
`alpha` = .05 returns a 95% confidence interval.
cols : array_like, optional
Columns to included in returned confidence intervals.
Returns
-------
array_like
The confidence intervals.
Notes
-----
The confidence interval is based on Student's t-distribution.
"""
# keep method for docstring for now
ci = super(RegressionResults, self).conf_int(alpha=alpha, cols=cols)
return ci
@cache_readonly
def nobs(self):
"""Number of observations n."""
return float(self.model.wexog.shape[0])
@cache_readonly
def fittedvalues(self):
"""The predicted values for the original (unwhitened) design."""
return self.model.predict(self.params, self.model.exog)
@cache_readonly
def wresid(self):
"""
The residuals of the transformed/whitened regressand and regressor(s).
"""
return self.model.wendog - self.model.predict(
self.params, self.model.wexog)
@cache_readonly
def resid(self):
"""The residuals of the model."""
return self.model.endog - self.model.predict(
self.params, self.model.exog)
# TODO: fix writable example
@cache_writable()
def scale(self):
"""
A scale factor for the covariance matrix.
The Default value is ssr/(n-p). Note that the square root of `scale`
is often called the standard error of the regression.
"""
wresid = self.wresid
return np.dot(wresid, wresid) / self.df_resid
@cache_readonly
def ssr(self):
"""Sum of squared (whitened) residuals."""
wresid = self.wresid
return np.dot(wresid, wresid)
@cache_readonly
def centered_tss(self):
"""The total (weighted) sum of squares centered about the mean."""
model = self.model
weights = getattr(model, 'weights', None)
sigma = getattr(model, 'sigma', None)
if weights is not None:
mean = np.average(model.endog, weights=weights)
return np.sum(weights * (model.endog - mean)**2)
elif sigma is not None:
# Exactly matches WLS when sigma is diagonal
iota = np.ones_like(model.endog)
iota = model.whiten(iota)
mean = model.wendog.dot(iota) / iota.dot(iota)
err = model.endog - mean
err = model.whiten(err)
return np.sum(err**2)
else:
centered_endog = model.wendog - model.wendog.mean()
return np.dot(centered_endog, centered_endog)
@cache_readonly
def uncentered_tss(self):
"""
Uncentered sum of squares.
The sum of the squared values of the (whitened) endogenous response
variable.
"""
wendog = self.model.wendog
return np.dot(wendog, wendog)
@cache_readonly
def ess(self):
"""
The explained sum of squares.
If a constant is present, the centered total sum of squares minus the
sum of squared residuals. If there is no constant, the uncentered total
sum of squares is used.
"""
if self.k_constant:
return self.centered_tss - self.ssr
else:
return self.uncentered_tss - self.ssr
@cache_readonly
def rsquared(self):
"""
R-squared of the model.
This is defined here as 1 - `ssr`/`centered_tss` if the constant is
included in the model and 1 - `ssr`/`uncentered_tss` if the constant is
omitted.
"""
if self.k_constant:
return 1 - self.ssr/self.centered_tss
else:
return 1 - self.ssr/self.uncentered_tss
@cache_readonly
def rsquared_adj(self):
"""
Adjusted R-squared.
This is defined here as 1 - (`nobs`-1)/`df_resid` * (1-`rsquared`)
if a constant is included and 1 - `nobs`/`df_resid` * (1-`rsquared`) if
no constant is included.
"""
return 1 - (np.divide(self.nobs - self.k_constant, self.df_resid)
* (1 - self.rsquared))
@cache_readonly
def mse_model(self):
"""
Mean squared error the model.
The explained sum of squares divided by the model degrees of freedom.
"""
if np.all(self.df_model == 0.0):
return np.full_like(self.ess, np.nan)
return self.ess/self.df_model
@cache_readonly
def mse_resid(self):
"""
Mean squared error of the residuals.
The sum of squared residuals divided by the residual degrees of
freedom.
"""
if np.all(self.df_resid == 0.0):
return np.full_like(self.ssr, np.nan)
return self.ssr/self.df_resid
@cache_readonly
def mse_total(self):
"""
Total mean squared error.
The uncentered total sum of squares divided by the number of
observations.
"""
if np.all(self.df_resid + self.df_model == 0.0):
return np.full_like(self.centered_tss, np.nan)
if self.k_constant:
return self.centered_tss / (self.df_resid + self.df_model)
else:
return self.uncentered_tss / (self.df_resid + self.df_model)
@cache_readonly
def fvalue(self):
"""
F-statistic of the fully specified model.
Calculated as the mean squared error of the model divided by the mean
squared error of the residuals if the nonrobust covariance is used.
Otherwise computed using a Wald-like quadratic form that tests whether
all coefficients (excluding the constant) are zero.
"""
if hasattr(self, 'cov_type') and self.cov_type != 'nonrobust':
# with heteroscedasticity or correlation robustness
k_params = self.normalized_cov_params.shape[0]
mat = np.eye(k_params)
const_idx = self.model.data.const_idx
# TODO: What if model includes implicit constant, e.g. all
# dummies but no constant regressor?
# TODO: Restats as LM test by projecting orthogonalizing
# to constant?
if self.model.data.k_constant == 1:
# if constant is implicit, return nan see #2444
if const_idx is None:
return np.nan
idx = lrange(k_params)
idx.pop(const_idx)
mat = mat[idx] # remove constant
if mat.size == 0: # see #3642
return np.nan
ft = self.f_test(mat)
# using backdoor to set another attribute that we already have
self._cache['f_pvalue'] = float(ft.pvalue)
return float(ft.fvalue)
else:
# for standard homoscedastic case
return self.mse_model/self.mse_resid
@cache_readonly
def f_pvalue(self):
"""The p-value of the F-statistic."""
# Special case for df_model 0
if self.df_model == 0:
return np.full_like(self.fvalue, np.nan)
return stats.f.sf(self.fvalue, self.df_model, self.df_resid)
@cache_readonly
def bse(self):
"""The standard errors of the parameter estimates."""
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def aic(self):
r"""
Akaike's information criteria.
For a model with a constant :math:`-2llf + 2(df\_model + 1)`. For a
model without a constant :math:`-2llf + 2(df\_model)`.
"""
return -2 * self.llf + 2 * (self.df_model + self.k_constant)
@cache_readonly
def bic(self):
r"""
Bayes' information criteria.
For a model with a constant :math:`-2llf + \log(n)(df\_model+1)`.
For a model without a constant :math:`-2llf + \log(n)(df\_model)`.
"""
return (-2 * self.llf + np.log(self.nobs) * (self.df_model +
self.k_constant))
@cache_readonly
def eigenvals(self):
"""
Return eigenvalues sorted in decreasing order.
"""
if self._wexog_singular_values is not None:
eigvals = self._wexog_singular_values ** 2
else:
eigvals = np.linalg.linalg.eigvalsh(np.dot(self.model.wexog.T,
self.model.wexog))
return np.sort(eigvals)[::-1]
@cache_readonly
def condition_number(self):
"""
Return condition number of exogenous matrix.
Calculated as ratio of largest to smallest eigenvalue.
"""
eigvals = self.eigenvals
return np.sqrt(eigvals[0]/eigvals[-1])
# TODO: make these properties reset bse
def _HCCM(self, scale):
H = np.dot(self.model.pinv_wexog,
scale[:, None] * self.model.pinv_wexog.T)
return H
def _abat_diagonal(self, a, b):
# equivalent to np.diag(a @ b @ a.T)
return np.einsum('ij,ik,kj->i', a, a, b)
@cache_readonly
def cov_HC0(self):
"""
Heteroscedasticity robust covariance matrix. See HC0_se.
"""
self.het_scale = self.wresid**2
cov_HC0 = self._HCCM(self.het_scale)
return cov_HC0
@cache_readonly
def cov_HC1(self):
"""
Heteroscedasticity robust covariance matrix. See HC1_se.
"""
self.het_scale = self.nobs/(self.df_resid)*(self.wresid**2)
cov_HC1 = self._HCCM(self.het_scale)
return cov_HC1
@cache_readonly
def cov_HC2(self):
"""
Heteroscedasticity robust covariance matrix. See HC2_se.
"""
wexog = self.model.wexog
h = self._abat_diagonal(wexog, self.normalized_cov_params)
self.het_scale = self.wresid**2/(1-h)
cov_HC2 = self._HCCM(self.het_scale)
return cov_HC2
@cache_readonly
def cov_HC3(self):
"""
Heteroscedasticity robust covariance matrix. See HC3_se.
"""
wexog = self.model.wexog
h = self._abat_diagonal(wexog, self.normalized_cov_params)
self.het_scale = (self.wresid / (1 - h))**2
cov_HC3 = self._HCCM(self.het_scale)
return cov_HC3
@cache_readonly
def HC0_se(self):
"""
White's (1980) heteroskedasticity robust standard errors.
Notes
-----
Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)
where e_i = resid[i].
When HC0_se or cov_HC0 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is just
resid**2.
"""
return np.sqrt(np.diag(self.cov_HC0))
@cache_readonly
def HC1_se(self):
"""
MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as sqrt(diag(n/(n-p)*HC_0).
When HC1_se or cov_HC1 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
n/(n-p)*resid**2.
"""
return np.sqrt(np.diag(self.cov_HC1))
@cache_readonly
def HC2_se(self):
"""
MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
When HC2_se or cov_HC2 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii).
"""
return np.sqrt(np.diag(self.cov_HC2))
@cache_readonly
def HC3_se(self):
"""
MacKinnon and White's (1985) heteroskedasticity robust standard errors.
Notes
-----
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T.
When HC3_se or cov_HC3 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii)^(2).
"""
return np.sqrt(np.diag(self.cov_HC3))
@cache_readonly
def resid_pearson(self):
"""
Residuals, normalized to have unit variance.
Returns
-------
array_like
The array `wresid` normalized by the sqrt of the scale to have
unit variance.
"""
if not hasattr(self, 'resid'):
raise ValueError('Method requires residuals.')
eps = np.finfo(self.wresid.dtype).eps
if np.sqrt(self.scale) < 10 * eps * self.model.endog.mean():
# do not divide if scale is zero close to numerical precision
warnings.warn(
"All residuals are 0, cannot compute normed residuals.",
RuntimeWarning
)
return self.wresid
else:
return self.wresid / np.sqrt(self.scale)
def _is_nested(self, restricted):
"""
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
nested : bool
True if nested, otherwise false
Notes
-----
A most nests another model if the regressors in the smaller
model are spanned by the regressors in the larger model and
the regressand is identical.
"""
if self.model.nobs != restricted.model.nobs:
return False
full_rank = self.model.rank
restricted_rank = restricted.model.rank
if full_rank <= restricted_rank:
return False
restricted_exog = restricted.model.wexog
full_wresid = self.wresid
scores = restricted_exog * full_wresid[:, None]
score_l2 = np.sqrt(np.mean(scores.mean(0) ** 2))
# TODO: Could be improved, and may fail depending on scale of
# regressors
return np.allclose(score_l2, 0)
def compare_lm_test(self, restricted, demean=True, use_lr=False):
"""
Use Lagrange Multiplier test to test a set of linear restrictions.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the
current model. The result instance of the restricted model
is required to have two attributes, residual sum of
squares, `ssr`, residual degrees of freedom, `df_resid`.
demean : bool
Flag indicating whether the demean the scores based on the
residuals from the restricted model. If True, the covariance of
the scores are used and the LM test is identical to the large
sample version of the LR test.
use_lr : bool
A flag indicating whether to estimate the covariance of the model
scores using the unrestricted model. Setting the to True improves
the power of the test.
Returns
-------
lm_value : float
The test statistic which has a chi2 distributed.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in df
between models.
Notes
-----
The LM test examines whether the scores from the restricted model are
0. If the null is true, and the restrictions are valid, then the
parameters of the restricted model should be close to the minimum of
the sum of squared errors, and so the scores should be close to zero,
on average.
"""
import statsmodels.stats.sandwich_covariance as sw
from numpy.linalg import inv
if not self._is_nested(restricted):
raise ValueError("Restricted model is not nested by full model.")
wresid = restricted.wresid
wexog = self.model.wexog
scores = wexog * wresid[:, None]
n = self.nobs
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
s = scores.mean(axis=0)
if use_lr:
scores = wexog * self.wresid[:, None]
demean = False
if demean:
scores = scores - scores.mean(0)[None, :]
# Form matters here. If homoskedastics can be sigma^2 (X'X)^-1
# If Heteroskedastic then the form below is fine
# If HAC then need to use HAC
# If Cluster, should use cluster
cov_type = getattr(self, 'cov_type', 'nonrobust')
if cov_type == 'nonrobust':
sigma2 = np.mean(wresid**2)
xpx = np.dot(wexog.T, wexog) / n
s_inv = inv(sigma2 * xpx)
elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
s_inv = inv(np.dot(scores.T, scores) / n)
elif cov_type == 'HAC':
maxlags = self.cov_kwds['maxlags']
s_inv = inv(sw.S_hac_simple(scores, maxlags) / n)
elif cov_type == 'cluster':
# cluster robust standard errors
groups = self.cov_kwds['groups']
# TODO: Might need demean option in S_crosssection by group?
s_inv = inv(sw.S_crosssection(scores, groups))
else:
raise ValueError('Only nonrobust, HC, HAC and cluster are ' +
'currently connected')
lm_value = n * (s @ s_inv @ s.T)
p_value = stats.chi2.sf(lm_value, df_diff)
return lm_value, p_value, df_diff
def compare_f_test(self, restricted):
"""
Use F test to test whether restricted model is correct.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the
current model. The result instance of the restricted model
is required to have two attributes, residual sum of
squares, `ssr`, residual degrees of freedom, `df_resid`.
Returns
-------
f_value : float
The test statistic which has an F distribution.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in
df between models.
Notes
-----
See mailing list discussion October 17,
This test compares the residual sum of squares of the two
models. This is not a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results under
the assumption of homoscedasticity and no autocorrelation
(sphericity).
"""
has_robust1 = getattr(self, 'cov_type', 'nonrobust') != 'nonrobust'
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=
'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('F test for comparison is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
ssr_full = self.ssr
ssr_restr = restricted.ssr
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
f_value = (ssr_restr - ssr_full) / df_diff / ssr_full * df_full
p_value = stats.f.sf(f_value, df_diff, df_full)
return f_value, p_value, df_diff
def compare_lr_test(self, restricted, large_sample=False):
"""
Likelihood ratio test to test whether restricted model is correct.
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current model.
The result instance of the restricted model is required to have two
attributes, residual sum of squares, `ssr`, residual degrees of
freedom, `df_resid`.
large_sample : bool
Flag indicating whether to use a heteroskedasticity robust version
of the LR test, which is a modified LM test.
Returns
-------
lr_stat : float
The likelihood ratio which is chisquare distributed with df_diff
degrees of freedom.
p_value : float
The p-value of the test statistic.
df_diff : int
The degrees of freedom of the restriction, i.e. difference in df
between models.
Notes
-----
The exact likelihood ratio is valid for homoskedastic data,
and is defined as
.. math:: D=-2\\log\\left(\\frac{\\mathcal{L}_{null}}
{\\mathcal{L}_{alternative}}\\right)
where :math:`\\mathcal{L}` is the likelihood of the
model. With :math:`D` distributed as chisquare with df equal
to difference in number of parameters or equivalently
difference in residual degrees of freedom.
The large sample version of the likelihood ratio is defined as
.. math:: D=n s^{\\prime}S^{-1}s
where :math:`s=n^{-1}\\sum_{i=1}^{n} s_{i}`
.. math:: s_{i} = x_{i,alternative} \\epsilon_{i,null}
is the average score of the model evaluated using the
residuals from null model and the regressors from the
alternative model and :math:`S` is the covariance of the
scores, :math:`s_{i}`. The covariance of the scores is
estimated using the same estimator as in the alternative
model.
This test compares the loglikelihood of the two models. This
may not be a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results
without taking unspecified heteroscedasticity or correlation
into account.
This test compares the loglikelihood of the two models. This
may not be a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results
without taking unspecified heteroscedasticity or correlation
into account.
is the average score of the model evaluated using the
residuals from null model and the regressors from the
alternative model and :math:`S` is the covariance of the
scores, :math:`s_{i}`. The covariance of the scores is
estimated using the same estimator as in the alternative
model.
"""
# TODO: put into separate function, needs tests
# See mailing list discussion October 17,
if large_sample:
return self.compare_lm_test(restricted, use_lr=True)
has_robust1 = (getattr(self, 'cov_type', 'nonrobust') != 'nonrobust')
has_robust2 = (
getattr(restricted, 'cov_type', 'nonrobust') != 'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('Likelihood Ratio test is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
llf_full = self.llf
llf_restr = restricted.llf
df_full = self.df_resid
df_restr = restricted.df_resid
lrdf = (df_restr - df_full)
lrstat = -2*(llf_restr - llf_full)
lr_pvalue = stats.chi2.sf(lrstat, lrdf)
return lrstat, lr_pvalue, lrdf
def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwargs):
"""
Create new results instance with robust covariance as default.
Parameters
----------
cov_type : str
The type of robust sandwich estimator to use. See Notes below.
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
If `use_t` is None, then an appropriate default is used, which is
`True` if the cov_type is nonrobust, and `False` in all other
cases.
**kwargs
Required or optional arguments for robust covariance calculation.
See Notes below.
Returns
-------
RegressionResults
This method creates a new results instance with the
requested robust covariance as the default covariance of
the parameters. Inferential statistics like p-values and
hypothesis tests will be based on this covariance matrix.
Notes
-----
The following covariance types and required or optional arguments are
currently available:
- 'fixed scale' uses a predefined scale
``scale``: float, optional
Argument to set the scale. Default is 1.
- 'HC0', 'HC1', 'HC2', 'HC3': heteroscedasticity robust covariance
- no keyword arguments
- 'HAC': heteroskedasticity-autocorrelation robust covariance
``maxlag`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
kernels currently available kernels are ['bartlett', 'uniform'],
default is Bartlett
``use_correction``: bool, optional
If true, use small sample correction
- 'cluster': clustered covariance estimator
``groups`` : array_like[int], required :
Integer-valued index of clusters or groups.
``use_correction``: bool, optional
If True the sandwich covariance is calculated with a small
sample correction.
If False the sandwich covariance is calculated without
small sample correction.
``df_correction``: bool, optional
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is also
adjusted. When `use_t` is also True, then pvalues are
computed using the Student's t distribution using the
corrected values. These may differ substantially from
p-values based on the normal is the number of groups is
small.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum': Driscoll and Kraay, heteroscedasticity and
autocorrelation robust covariance for panel data
# TODO: more options needed here
``time`` : array_like, required
index of time periods
``maxlag`` : integer, required
number of lags to use
``kernel`` : {callable, str}, optional
The available kernels are ['bartlett', 'uniform']. The default is
Bartlett.
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the the sandwich covariance is calculated without small
sample correction. If `use_correction = 'cluster'` (default),
then the same small sample correction as in the case of
`covtype='cluster'` is used.
``df_correction`` : bool, optional
The adjustment to df_resid, see cov_type 'cluster' above
- 'hac-panel': heteroscedasticity and autocorrelation robust standard
errors in panel data. The data needs to be sorted in this case, the
time series for each panel unit or cluster need to be stacked. The
membership to a time series of an individual or group can be either
specified by group indicators or by increasing time periods. One of
``groups`` or ``time`` is required. # TODO: we need more options here
``groups`` : array_like[int]
indicator for groups
``time`` : array_like[int]
index of time periods
``maxlag`` : int, required
number of lags to use
``kernel`` : {callable, str}, optional
Available kernels are ['bartlett', 'uniform'], default
is Bartlett
``use_correction`` : {False, 'hac', 'cluster'}, optional
If False the sandwich covariance is calculated without
small sample correction.
``df_correction`` : bool, optional
Adjustment to df_resid, see cov_type 'cluster' above
**Reminder**: ``use_correction`` in "hac-groupsum" and "hac-panel" is
not bool, needs to be in {False, 'hac', 'cluster'}.
.. todo:: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx`
"""
import statsmodels.stats.sandwich_covariance as sw
from statsmodels.base.covtype import normalize_cov_type, descriptions
cov_type = normalize_cov_type(cov_type)
if 'kernel' in kwargs:
kwargs['weights_func'] = kwargs.pop('kernel')
if 'weights_func' in kwargs and not callable(kwargs['weights_func']):
kwargs['weights_func'] = sw.kernel_dict[kwargs['weights_func']]
# TODO: make separate function that returns a robust cov plus info
use_self = kwargs.pop('use_self', False)
if use_self:
res = self
else:
res = self.__class__(
self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
res.cov_type = cov_type
# use_t might already be defined by the class, and already set
if use_t is None:
use_t = self.use_t
res.cov_kwds = {'use_t': use_t} # store for information
res.use_t = use_t
adjust_df = False
if cov_type in ['cluster', 'hac-panel', 'hac-groupsum']:
df_correction = kwargs.get('df_correction', None)
# TODO: check also use_correction, do I need all combinations?
if df_correction is not False: # i.e. in [None, True]:
# user did not explicitely set it to False
adjust_df = True
res.cov_kwds['adjust_df'] = adjust_df
# verify and set kwargs, and calculate cov
# TODO: this should be outsourced in a function so we can reuse it in
# other models
# TODO: make it DRYer repeated code for checking kwargs
if cov_type in ['fixed scale', 'fixed_scale']:
res.cov_kwds['description'] = descriptions['fixed_scale']
res.cov_kwds['scale'] = scale = kwargs.get('scale', 1.)
res.cov_params_default = scale * res.normalized_cov_params
elif cov_type.upper() in ('HC0', 'HC1', 'HC2', 'HC3'):
if kwargs:
raise ValueError('heteroscedasticity robust covariance '
'does not use keywords')
res.cov_kwds['description'] = descriptions[cov_type.upper()]
res.cov_params_default = getattr(self, 'cov_' + cov_type.upper())
elif cov_type.lower() == 'hac':
# TODO: check if required, default in cov_hac_simple
maxlags = kwargs['maxlags']
res.cov_kwds['maxlags'] = maxlags
weights_func = kwargs.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
use_correction = kwargs.get('use_correction', False)
res.cov_kwds['use_correction'] = use_correction
res.cov_kwds['description'] = descriptions['HAC'].format(
maxlags=maxlags,
correction=['without', 'with'][use_correction])
res.cov_params_default = sw.cov_hac_simple(
self, nlags=maxlags, weights_func=weights_func,
use_correction=use_correction)
elif cov_type.lower() == 'cluster':
# cluster robust standard errors, one- or two-way
groups = kwargs['groups']
if not hasattr(groups, 'shape'):
groups = np.asarray(groups).T
if groups.ndim >= 2:
groups = groups.squeeze()
res.cov_kwds['groups'] = groups
use_correction = kwargs.get('use_correction', True)
res.cov_kwds['use_correction'] = use_correction
if groups.ndim == 1:
if adjust_df:
# need to find number of groups
# duplicate work
self.n_groups = n_groups = len(np.unique(groups))
res.cov_params_default = sw.cov_cluster(
self, groups, use_correction=use_correction)
elif groups.ndim == 2:
if hasattr(groups, 'values'):
groups = groups.values
if adjust_df:
# need to find number of groups
# duplicate work
n_groups0 = len(np.unique(groups[:, 0]))
n_groups1 = len(np.unique(groups[:, 1]))
self.n_groups = (n_groups0, n_groups1)
n_groups = min(n_groups0, n_groups1) # use for adjust_df
# Note: sw.cov_cluster_2groups has 3 returns
res.cov_params_default = sw.cov_cluster_2groups(
self, groups, use_correction=use_correction)[0]
else:
raise ValueError('only two groups are supported')
res.cov_kwds['description'] = descriptions['cluster']
elif cov_type.lower() == 'hac-panel':
# cluster robust standard errors
res.cov_kwds['time'] = time = kwargs.get('time', None)
res.cov_kwds['groups'] = groups = kwargs.get('groups', None)
# TODO: nlags is currently required
# nlags = kwargs.get('nlags', True)
# res.cov_kwds['nlags'] = nlags
# TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwargs['maxlags']
use_correction = kwargs.get('use_correction', 'hac')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwargs.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if groups is not None:
groups = np.asarray(groups)
tt = (np.nonzero(groups[:-1] != groups[1:])[0] + 1).tolist()
nobs_ = len(groups)
elif time is not None:
time = np.asarray(time)
# TODO: clumsy time index in cov_nw_panel
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1).tolist()
nobs_ = len(time)
else:
raise ValueError('either time or groups needs to be given')
groupidx = lzip([0] + tt, tt + [nobs_])
self.n_groups = n_groups = len(groupidx)
res.cov_params_default = sw.cov_nw_panel(self, maxlags, groupidx,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = descriptions['HAC-Panel']
elif cov_type.lower() == 'hac-groupsum':
# Driscoll-Kraay standard errors
res.cov_kwds['time'] = time = kwargs['time']
# TODO: nlags is currently required
# nlags = kwargs.get('nlags', True)
# res.cov_kwds['nlags'] = nlags
# TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwargs['maxlags']
use_correction = kwargs.get('use_correction', 'cluster')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwargs.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if adjust_df:
# need to find number of groups
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1)
self.n_groups = n_groups = len(tt) + 1
res.cov_params_default = sw.cov_nw_groupsum(
self, maxlags, time, weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = descriptions['HAC-Groupsum']
else:
raise ValueError('cov_type not recognized. See docstring for ' +
'available options and spelling')
if adjust_df:
# Note: df_resid is used for scale and others, add new attribute
res.df_resid_inference = n_groups - 1
return res
@Appender(pred.get_prediction.__doc__)
def get_prediction(self, exog=None, transform=True, weights=None,
row_labels=None, **kwargs):
return pred.get_prediction(
self, exog=exog, transform=transform, weights=weights,
row_labels=row_labels, **kwargs)
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the Regression Results.
Parameters
----------
yname : str, optional
Name of endogenous (response) variable. The Default is `y`.
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
Returns
-------
Summary
Instance holding the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : A class that holds summary results.
"""
from statsmodels.stats.stattools import (
jarque_bera, omni_normtest, durbin_watson)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
# TODO: Avoid adding attributes in non-__init__
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[-1])
# TODO not used yet
# diagn_left_header = ['Models stats']
# diagn_right_header = ['Residual stats']
# TODO: requiring list/iterable is a bit annoying
# need more control over formatting
# TODO: default do not work if it's not identically spelled
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
rsquared_type = '' if self.k_constant else ' (uncentered)'
top_right = [('R-squared' + rsquared_type + ':',
["%#8.3f" % self.rsquared]),
('Adj. R-squared' + rsquared_type + ':',
["%#8.3f" % self.rsquared_adj]),
('F-statistic:', ["%#8.4g" % self.fvalue]),
('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
('Log-Likelihood:', None),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:',
["%#8.3f" % durbin_watson(self.wresid)]
),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
# create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
yname=yname, xname=xname,
title="")
# add warnings/notes, added to text format only
etext = []
if not self.k_constant:
etext.append(
"R² is computed without centering (uncentered) since the "
"model does not contain a constant."
)
if hasattr(self, 'cov_type'):
etext.append(self.cov_kwds['description'])
if self.model.exog.shape[0] < self.model.exog.shape[1]:
wstr = "The input rank is higher than the number of observations."
etext.append(wstr)
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: # TODO: what is recommended?
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
etext = ["[{0}] {1}".format(i + 1, text)
for i, text in enumerate(etext)]
etext.insert(0, "Notes:")
smry.add_extra_txt(etext)
return smry
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""
Experimental summary function to summarize the regression results.
Parameters
----------
yname : str
The name of the dependent variable (optional).
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model.
title : str, optional
Title for the top table. If not None, then this replaces the
default title.
alpha : float
The significance level for the confidence intervals.
float_format : str
The format for floats in parameters summary.
Returns
-------
Summary
Instance holding the summary tables and text, which can be printed
or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary
A class that holds summary results.
"""
# Diagnostics
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest,
durbin_watson)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
dw = durbin_watson(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
eigvals = np.sort(eigvals) # in increasing order
diagnostic = dict([
('Omnibus:', "%.3f" % omni),
('Prob(Omnibus):', "%.3f" % omnipv),
('Skew:', "%.3f" % skew),
('Kurtosis:', "%.3f" % kurtosis),
('Durbin-Watson:', "%.3f" % dw),
('Jarque-Bera (JB):', "%.3f" % jb),
('Prob(JB):', "%.3f" % jbpv),
('Condition No.:', "%.0f" % condno)
])
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
smry.add_dict(diagnostic)
# Warnings
if eigvals[-1] < 1e-10:
warn = "The smallest eigenvalue is %6.3g. This might indicate that\
there are strong multicollinearity problems or that the design\
matrix is singular." % eigvals[-1]
smry.add_text(warn)
if condno > 1000:
warn = "* The condition number is large (%.g). This might indicate \
strong multicollinearity or other numerical problems." % condno
smry.add_text(warn)
return smry
class OLSResults(RegressionResults):
"""
Results class for for an OLS model.
Parameters
----------
model : RegressionModel
The regression model instance.
params : ndarray
The estimated parameters.
normalized_cov_params : ndarray
The normalized covariance parameters.
scale : float
The estimated scale of the residuals.
cov_type : str
The covariance estimator used in the results.
cov_kwds : dict
Additional keywords used in the covariance specification.
use_t : bool
Flag indicating to use the Student's t in inference.
**kwargs
Additional keyword arguments used to initialize the results.
See Also
--------
RegressionResults
Results store for WLS and GLW models.
Notes
-----
Most of the methods and attributes are inherited from RegressionResults.
The special methods that are only available for OLS are:
- get_influence
- outlier_test
- el_test
- conf_int_el
"""
def get_influence(self):
"""
Calculate influence and outlier measures.
Returns
-------
OLSInfluence
The instance containing methods to calculate the main influence and
outlier measures for the OLS regression.
See Also
--------
statsmodels.stats.outliers_influence.OLSInfluence
A class that exposes methods to examine observation influence.
"""
from statsmodels.stats.outliers_influence import OLSInfluence
return OLSInfluence(self)
def outlier_test(self, method='bonf', alpha=.05, labels=None,
order=False, cutoff=None):
"""
Test observations for outliers according to method.
Parameters
----------
method : str
The method to use in the outlier test. Must be one of:
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
The familywise error rate (FWER).
labels : None or array_like
If `labels` is not None, then it will be used as index to the
returned pandas DataFrame. See also Returns below.
order : bool
Whether or not to order the results by the absolute value of the
studentized residuals. If labels are provided they will also be
sorted.
cutoff : None or float in [0, 1]
If cutoff is not None, then the return only includes observations
with multiple testing corrected p-values strictly below the cutoff.
The returned array or dataframe can be empty if t.
Returns
-------
array_like
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
from statsmodels.stats.outliers_influence import outlier_test
return outlier_test(self, method, alpha, labels=labels,
order=order, cutoff=cutoff)
def el_test(self, b0_vals, param_nums, return_weights=0, ret_params=0,
method='nm', stochastic_exog=1):
"""
Test single or joint hypotheses using Empirical Likelihood.
Parameters
----------
b0_vals : 1darray
The hypothesized value of the parameter to be tested.
param_nums : 1darray
The parameter number to be tested.
return_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. The default is False.
ret_params : bool
If true, returns the parameter vector that maximizes the likelihood
ratio at b0_vals. Also returns the weights. The default is False.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
The default is 'nm'.
stochastic_exog : bool
When True, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. The default is True.
Returns
-------
tuple
The p-value and -2 times the log-likelihood ratio for the
hypothesized values.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load(as_pandas=False)
>>> endog = data.endog
>>> exog = sm.add_constant(data.exog)
>>> model = sm.OLS(endog, exog)
>>> fitted = model.fit()
>>> fitted.params
>>> array([-39.91967442, 0.7156402 , 1.29528612, -0.15212252])
>>> fitted.rsquared
>>> 0.91357690446068196
>>> # Test that the slope on the first variable is 0
>>> fitted.el_test([0], [1])
>>> (27.248146353888796, 1.7894660442330235e-07)
"""
params = np.copy(self.params)
opt_fun_inst = _ELRegOpts() # to store weights
if len(param_nums) == len(params):
llr = opt_fun_inst._opt_nuis_regress(
[],
param_nums=param_nums,
endog=self.model.endog,
exog=self.model.exog,
nobs=self.model.nobs,
nvar=self.model.exog.shape[1],
params=params,
b0_vals=b0_vals,
stochastic_exog=stochastic_exog)
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
x0 = np.delete(params, param_nums)
args = (param_nums, self.model.endog, self.model.exog,
self.model.nobs, self.model.exog.shape[1], params,
b0_vals, stochastic_exog)
if method == 'nm':
llr = optimize.fmin(opt_fun_inst._opt_nuis_regress, x0,
maxfun=10000, maxiter=10000, full_output=1,
disp=0, args=args)[1]
if method == 'powell':
llr = optimize.fmin_powell(opt_fun_inst._opt_nuis_regress, x0,
full_output=1, disp=0,
args=args)[1]
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if ret_params:
return llr, pval, opt_fun_inst.new_weights, opt_fun_inst.new_params
elif return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
def conf_int_el(self, param_num, sig=.05, upper_bound=None,
lower_bound=None, method='nm', stochastic_exog=True):
"""
Compute the confidence interval using Empirical Likelihood.
Parameters
----------
param_num : float
The parameter for which the confidence interval is desired.
sig : float
The significance level. Default is 0.05.
upper_bound : float
The maximum value the upper limit can be. Default is the
99.9% confidence value under OLS assumptions.
lower_bound : float
The minimum value the lower limit can be. Default is the 99.9%
confidence value under OLS assumptions.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
The default is 'nm'.
stochastic_exog : bool
When True, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. The default is True.
Returns
-------
lowerl : float
The lower bound of the confidence interval.
upperl : float
The upper bound of the confidence interval.
See Also
--------
el_test : Test parameters using Empirical Likelihood.
Notes
-----
This function uses brentq to find the value of beta where
test_beta([beta], param_num)[1] is equal to the critical value.
The function returns the results of each iteration of brentq at each
value of beta.
The current function value of the last printed optimization should be
the critical value at the desired significance level. For alpha=.05,
the value is 3.841459.
To ensure optimization terminated successfully, it is suggested to do
el_test([lower_limit], [param_num]).
If the optimization does not terminate successfully, consider switching
optimization algorithms.
If optimization is still not successful, try changing the values of
start_int_params. If the current function value repeatedly jumps
from a number between 0 and the critical value and a very large number
(>50), the starting parameters of the interior minimization need
to be changed.
"""
r0 = stats.chi2.ppf(1 - sig, 1)
if upper_bound is None:
upper_bound = self.conf_int(.01)[param_num][1]
if lower_bound is None:
lower_bound = self.conf_int(.01)[param_num][0]
def f(b0):
return self.el_test(np.array([b0]), np.array([param_num]),
method=method,
stochastic_exog=stochastic_exog)[0] - r0
lowerl = optimize.brenth(f, lower_bound,
self.params[param_num])
upperl = optimize.brenth(f, self.params[param_num],
upper_bound)
# ^ Seems to be faster than brentq in most cases
return (lowerl, upperl)
class RegressionResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'chisq': 'columns',
'sresid': 'rows',
'weights': 'rows',
'wresid': 'rows',
'bcov_unscaled': 'cov',
'bcov_scaled': 'cov',
'HC0_se': 'columns',
'HC1_se': 'columns',
'HC2_se': 'columns',
'HC3_se': 'columns',
'norm_resid': 'rows',
}
_wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(
base.LikelihoodResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(RegressionResultsWrapper,
RegressionResults)
| jseabold/statsmodels | statsmodels/regression/linear_model.py | Python | bsd-3-clause | 113,147 | [
"Gaussian"
] | ae953fa26eee8502e4ffa5b65a282f95a88224dc19464ed471a46a1a360a3db2 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
from matplotlib.testing.decorators import image_comparison, knownfailureif
from matplotlib.delaunay.triangulate import Triangulation
from matplotlib import pyplot as plt
import matplotlib as mpl
def constant(x, y):
return np.ones(x.shape, x.dtype)
constant.title = 'Constant'
def xramp(x, y):
return x
xramp.title = 'X Ramp'
def yramp(x, y):
return y
yramp.title = 'Y Ramp'
def exponential(x, y):
x = x*9
y = y*9
x1 = x+1.0
x2 = x-2.0
x4 = x-4.0
x7 = x-7.0
y1 = x+1.0
y2 = y-2.0
y3 = y-3.0
y7 = y-7.0
f = (0.75 * np.exp(-(x2*x2+y2*y2)/4.0) +
0.75 * np.exp(-x1*x1/49.0 - y1/10.0) +
0.5 * np.exp(-(x7*x7 + y3*y3)/4.0) -
0.2 * np.exp(-x4*x4 -y7*y7))
return f
exponential.title = 'Exponential and Some Gaussians'
def cliff(x, y):
f = np.tanh(9.0*(y-x) + 1.0)/9.0
return f
cliff.title = 'Cliff'
def saddle(x, y):
f = (1.25 + np.cos(5.4*y))/(6.0 + 6.0*(3*x-1.0)**2)
return f
saddle.title = 'Saddle'
def gentle(x, y):
f = np.exp(-5.0625*((x-0.5)**2+(y-0.5)**2))/3.0
return f
gentle.title = 'Gentle Peak'
def steep(x, y):
f = np.exp(-20.25*((x-0.5)**2+(y-0.5)**2))/3.0
return f
steep.title = 'Steep Peak'
def sphere(x, y):
circle = 64-81*((x-0.5)**2 + (y-0.5)**2)
f = np.where(circle >= 0, np.sqrt(np.clip(circle,0,100)) - 0.5, 0.0)
return f
sphere.title = 'Sphere'
def trig(x, y):
f = 2.0*np.cos(10.0*x)*np.sin(10.0*y) + np.sin(10.0*x*y)
return f
trig.title = 'Cosines and Sines'
def gauss(x, y):
x = 5.0-10.0*x
y = 5.0-10.0*y
g1 = np.exp(-x*x/2)
g2 = np.exp(-y*y/2)
f = g1 + 0.75*g2*(1 + g1)
return f
gauss.title = 'Gaussian Peak and Gaussian Ridges'
def cloverleaf(x, y):
ex = np.exp((10.0-20.0*x)/3.0)
ey = np.exp((10.0-20.0*y)/3.0)
logitx = 1.0/(1.0+ex)
logity = 1.0/(1.0+ey)
f = (((20.0/3.0)**3 * ex*ey)**2 * (logitx*logity)**5 *
(ex-2.0*logitx)*(ey-2.0*logity))
return f
cloverleaf.title = 'Cloverleaf'
def cosine_peak(x, y):
circle = np.hypot(80*x-40.0, 90*y-45.)
f = np.exp(-0.04*circle) * np.cos(0.15*circle)
return f
cosine_peak.title = 'Cosine Peak'
allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss, cloverleaf, cosine_peak]
class LinearTester(object):
name = 'Linear'
def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0), nrange=101, npoints=250):
self.xrange = xrange
self.yrange = yrange
self.nrange = nrange
self.npoints = npoints
rng = np.random.RandomState(1234567890)
self.x = rng.uniform(xrange[0], xrange[1], size=npoints)
self.y = rng.uniform(yrange[0], yrange[1], size=npoints)
self.tri = Triangulation(self.x, self.y)
def replace_data(self, dataset):
self.x = dataset.x
self.y = dataset.y
self.tri = Triangulation(self.x, self.y)
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.linear_extrapolator(z, bbox=self.xrange+self.yrange)
def plot(self, func, interp=True, plotter='imshow'):
if interp:
lpi = self.interpolator(func)
z = lpi[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
else:
y, x = np.mgrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
z = func(x, y)
z = np.where(np.isinf(z), 0.0, z)
extent = (self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1])
fig = plt.figure()
plt.hot() # Some like it hot
if plotter == 'imshow':
plt.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower')
elif plotter == 'contour':
Y, X = np.ogrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
plt.contour(np.ravel(X), np.ravel(Y), z, 20)
x = self.x
y = self.y
lc = mpl.collections.LineCollection(np.array([((x[i], y[i]), (x[j], y[j]))
for i, j in self.tri.edge_db]), colors=[(0,0,0,0.2)])
ax = plt.gca()
ax.add_collection(lc)
if interp:
title = '%s Interpolant' % self.name
else:
title = 'Reference'
if hasattr(func, 'title'):
plt.title('%s: %s' % (func.title, title))
else:
plt.title(title)
class NNTester(LinearTester):
name = 'Natural Neighbors'
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.nn_extrapolator(z, bbox=self.xrange+self.yrange)
def make_all_2d_testfuncs(allfuncs=allfuncs):
def make_test(func):
filenames = [
'%s-%s' % (func.__name__, x) for x in
['ref-img', 'nn-img', 'lin-img', 'ref-con', 'nn-con', 'lin-con']]
# We only generate PNGs to save disk space -- we just assume
# that any backend differences are caught by other tests.
@image_comparison(filenames, extensions=['png'],
freetype_version=('2.4.5', '2.4.9'),
remove_text=True)
def reference_test():
nnt.plot(func, interp=False, plotter='imshow')
nnt.plot(func, interp=True, plotter='imshow')
lpt.plot(func, interp=True, plotter='imshow')
nnt.plot(func, interp=False, plotter='contour')
nnt.plot(func, interp=True, plotter='contour')
lpt.plot(func, interp=True, plotter='contour')
tester = reference_test
tester.__name__ = str('test_%s' % func.__name__)
return tester
nnt = NNTester(npoints=1000)
lpt = LinearTester(npoints=1000)
for func in allfuncs:
globals()['test_%s' % func.__name__] = make_test(func)
make_all_2d_testfuncs()
# 1d and 0d grid tests
ref_interpolator = Triangulation([0,10,10,0],
[0,0,10,10]).linear_interpolator([1,10,5,2.0])
def test_1d_grid():
res = ref_interpolator[3:6:2j,1:1:1j]
assert np.allclose(res, [[1.6],[1.9]], rtol=0)
def test_0d_grid():
res = ref_interpolator[3:3:1j,1:1:1j]
assert np.allclose(res, [[1.6]], rtol=0)
@image_comparison(baseline_images=['delaunay-1d-interp'], extensions=['png'])
def test_1d_plots():
x_range = slice(0.25,9.75,20j)
x = np.mgrid[x_range]
ax = plt.gca()
for y in xrange(2,10,2):
plt.plot(x, ref_interpolator[x_range,y:y:1j])
ax.set_xticks([])
ax.set_yticks([])
| yavalvas/yav_com | build/matplotlib/lib/matplotlib/tests/test_delaunay.py | Python | mit | 6,833 | [
"Gaussian"
] | 0943ec6af17666830a8aaeae52d2aed77e0262de50430f7ef3feec41c8004ed7 |
import numpy.random as rand
# Creating a 100-element array with random values
# from a standard normal distribution or, in other
# words, a Gaussian distribution.
# The sigma is 1 and the mean is 0.
a = rand.randn(100)
# Here we generate an index for filtering
# out undesired elements.
index = a > 0.2
b = a[index]
# We execute some operation on the desired elements.
b = b ** 2 - 2
# Then we put the modified elements back into the
# original array.
a[index] = b
| ebressert/ScipyNumpy_book_examples | python_examples/numpy_22_ex2.py | Python | mit | 469 | [
"Gaussian"
] | f0fecd404aea88133f84b5606fbf1a7eafa165b6afa6c99e9911b6ae1b53d554 |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Documentation is intended to be processed by Epydoc.
"""
Introduction
============
The Munkres module provides an implementation of the Munkres algorithm
(also called the Hungarian algorithm or the Kuhn-Munkres algorithm),
useful for solving the Assignment Problem.
Assignment Problem
==================
Let *C* be an *n*\ x\ *n* matrix representing the costs of each of *n* workers
to perform any of *n* jobs. The assignment problem is to assign jobs to
workers in a way that minimizes the total cost. Since each worker can perform
only one job and each job can be assigned to only one worker the assignments
represent an independent set of the matrix *C*.
One way to generate the optimal set is to create all permutations of
the indexes necessary to traverse the matrix so that no row and column
are used more than once. For instance, given this matrix (expressed in
Python)::
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
You could use this code to generate the traversal indexes::
def permute(a, results):
if len(a) == 1:
results.insert(len(results), a)
else:
for i in range(0, len(a)):
element = a[i]
a_copy = [a[j] for j in range(0, len(a)) if j != i]
subresults = []
permute(a_copy, subresults)
for subresult in subresults:
result = [element] + subresult
results.insert(len(results), result)
results = []
permute(range(len(matrix)), results) # [0, 1, 2] for a 3x3 matrix
After the call to permute(), the results matrix would look like this::
[[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 0, 1],
[2, 1, 0]]
You could then use that index matrix to loop over the original cost matrix
and calculate the smallest cost of the combinations::
n = len(matrix)
minval = sys.maxsize
for row in range(n):
cost = 0
for col in range(n):
cost += matrix[row][col]
minval = min(cost, minval)
print minval
While this approach works fine for small matrices, it does not scale. It
executes in O(*n*!) time: Calculating the permutations for an *n*\ x\ *n*
matrix requires *n*! operations. For a 12x12 matrix, that's 479,001,600
traversals. Even if you could manage to perform each traversal in just one
millisecond, it would still take more than 133 hours to perform the entire
traversal. A 20x20 matrix would take 2,432,902,008,176,640,000 operations. At
an optimistic millisecond per operation, that's more than 77 million years.
The Munkres algorithm runs in O(*n*\ ^3) time, rather than O(*n*!). This
package provides an implementation of that algorithm.
This version is based on
http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html.
This version was written for Python by Brian Clapper from the (Ada) algorithm
at the above web site. (The ``Algorithm::Munkres`` Perl version, in CPAN, was
clearly adapted from the same web site.)
Usage
=====
Construct a Munkres object::
from munkres import Munkres
m = Munkres()
Then use it to compute the lowest cost assignment from a cost matrix. Here's
a sample program::
from munkres import Munkres, print_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
m = Munkres()
indexes = m.compute(matrix)
print_matrix(matrix, msg='Lowest cost through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total cost: %d' % total
Running that program produces::
Lowest cost through this matrix:
[5, 9, 1]
[10, 3, 2]
[8, 7, 4]
(0, 0) -> 5
(1, 1) -> 3
(2, 2) -> 4
total cost=12
The instantiated Munkres object can be used multiple times on different
matrices.
Non-square Cost Matrices
========================
The Munkres algorithm assumes that the cost matrix is square. However, it's
possible to use a rectangular matrix if you first pad it with 0 values to make
it square. This module automatically pads rectangular cost matrices to make
them square.
Notes:
- The module operates on a *copy* of the caller's matrix, so any padding will
not be seen by the caller.
- The cost matrix must be rectangular or square. An irregular matrix will
*not* work.
Calculating Profit, Rather than Cost
====================================
The cost matrix is just that: A cost matrix. The Munkres algorithm finds
the combination of elements (one from each row and column) that results in
the smallest cost. It's also possible to use the algorithm to maximize
profit. To do that, however, you have to convert your profit matrix to a
cost matrix. The simplest way to do that is to subtract all elements from a
large value. For example::
from munkres import Munkres, print_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
cost_matrix = []
for row in matrix:
cost_row = []
for col in row:
cost_row += [sys.maxsize - col]
cost_matrix += [cost_row]
m = Munkres()
indexes = m.compute(cost_matrix)
print_matrix(matrix, msg='Highest profit through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total profit=%d' % total
Running that program produces::
Highest profit through this matrix:
[5, 9, 1]
[10, 3, 2]
[8, 7, 4]
(0, 1) -> 9
(1, 0) -> 10
(2, 2) -> 4
total profit=23
The ``munkres`` module provides a convenience method for creating a cost
matrix from a profit matrix. Since it doesn't know whether the matrix contains
floating point numbers, decimals, or integers, you have to provide the
conversion function; but the convenience method takes care of the actual
creation of the cost matrix::
import munkres
cost_matrix = munkres.make_cost_matrix(matrix,
lambda cost: sys.maxsize - cost)
So, the above profit-calculation program can be recast as::
from munkres import Munkres, print_matrix, make_cost_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
cost_matrix = make_cost_matrix(matrix, lambda cost: sys.maxsize - cost)
m = Munkres()
indexes = m.compute(cost_matrix)
print_matrix(matrix, msg='Lowest cost through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total profit=%d' % total
References
==========
1. http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html
2. Harold W. Kuhn. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. Harold W. Kuhn. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. Munkres, J. Algorithms for the Assignment and Transportation Problems.
*Journal of the Society of Industrial and Applied Mathematics*,
5(1):32-38, March, 1957.
5. http://en.wikipedia.org/wiki/Hungarian_algorithm
Copyright and License
=====================
This software is released under a BSD license, adapted from
<http://opensource.org/licenses/bsd-license.php>
Copyright (c) 2008 Brian M. Clapper
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name "clapper.org" nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
__docformat__ = 'restructuredtext'
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import sys
import copy
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['Munkres', 'make_cost_matrix']
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
# Info about the module
__version__ = "1.0.6"
__author__ = "Brian Clapper, bmc@clapper.org"
__url__ = "http://software.clapper.org/munkres/"
__copyright__ = "(c) 2008 Brian M. Clapper"
__license__ = "BSD-style license"
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class Munkres:
"""
Calculate the Munkres solution to the classical assignment problem.
See the module documentation for usage.
"""
def __init__(self):
"""Create a new instance"""
self.C = None
self.row_covered = []
self.col_covered = []
self.n = 0
self.Z0_r = 0
self.Z0_c = 0
self.marked = None
self.path = None
def make_cost_matrix(profit_matrix, inversion_function):
"""
**DEPRECATED**
Please use the module function ``make_cost_matrix()``.
"""
import munkres
return munkres.make_cost_matrix(profit_matrix, inversion_function)
make_cost_matrix = staticmethod(make_cost_matrix)
def pad_matrix(self, matrix, pad_value=0):
"""
Pad a possibly non-square matrix to make it square.
:Parameters:
matrix : list of lists
matrix to pad
pad_value : int
value to use to pad the matrix
:rtype: list of lists
:return: a new, possibly padded, matrix
"""
max_columns = 0
total_rows = len(matrix)
for row in matrix:
max_columns = max(max_columns, len(row))
total_rows = max(max_columns, total_rows)
new_matrix = []
for row in matrix:
row_len = len(row)
new_row = row[:]
if total_rows > row_len:
# Row too short. Pad it.
new_row += [0] * (total_rows - row_len)
new_matrix += [new_row]
while len(new_matrix) < total_rows:
new_matrix += [[0] * total_rows]
return new_matrix
def compute(self, cost_matrix):
"""
Compute the indexes for the lowest-cost pairings between rows and
columns in the database. Returns a list of (row, column) tuples
that can be used to traverse the matrix.
:Parameters:
cost_matrix : list of lists
The cost matrix. If this cost matrix is not square, it
will be padded with zeros, via a call to ``pad_matrix()``.
(This method does *not* modify the caller's matrix. It
operates on a copy of the matrix.)
**WARNING**: This code handles square and rectangular
matrices. It does *not* handle irregular matrices.
:rtype: list
:return: A list of ``(row, column)`` tuples that describe the lowest
cost path through the matrix
"""
self.C = self.pad_matrix(cost_matrix)
self.n = len(self.C)
self.original_length = len(cost_matrix)
self.original_width = len(cost_matrix[0])
self.row_covered = [False for i in range(self.n)]
self.col_covered = [False for i in range(self.n)]
self.Z0_r = 0
self.Z0_c = 0
self.path = self.__make_matrix(self.n * 2, 0)
self.marked = self.__make_matrix(self.n, 0)
done = False
step = 1
steps = { 1 : self.__step1,
2 : self.__step2,
3 : self.__step3,
4 : self.__step4,
5 : self.__step5,
6 : self.__step6 }
while not done:
try:
func = steps[step]
step = func()
except KeyError:
done = True
# Look for the starred columns
results = []
for i in range(self.original_length):
for j in range(self.original_width):
if self.marked[i][j] == 1:
results += [(i, j)]
return results
def __copy_matrix(self, matrix):
"""Return an exact copy of the supplied matrix"""
return copy.deepcopy(matrix)
def __make_matrix(self, n, val):
"""Create an *n*x*n* matrix, populating it with the specific value."""
matrix = []
for i in range(n):
matrix += [[val for j in range(n)]]
return matrix
def __step1(self):
"""
For each row of the matrix, find the smallest element and
subtract it from every element in its row. Go to Step 2.
"""
C = self.C
n = self.n
for i in range(n):
minval = min(self.C[i])
# Find the minimum value for this row and subtract that minimum
# from every element in the row.
for j in range(n):
self.C[i][j] -= minval
return 2
def __step2(self):
"""
Find a zero (Z) in the resulting matrix. If there is no starred
zero in its row or column, star Z. Repeat for each element in the
matrix. Go to Step 3.
"""
n = self.n
for i in range(n):
for j in range(n):
if (self.C[i][j] == 0) and \
(not self.col_covered[j]) and \
(not self.row_covered[i]):
self.marked[i][j] = 1
self.col_covered[j] = True
self.row_covered[i] = True
self.__clear_covers()
return 3
def __step3(self):
"""
Cover each column containing a starred zero. If K columns are
covered, the starred zeros describe a complete set of unique
assignments. In this case, Go to DONE, otherwise, Go to Step 4.
"""
n = self.n
count = 0
for i in range(n):
for j in range(n):
if self.marked[i][j] == 1:
self.col_covered[j] = True
count += 1
if count >= n:
step = 7 # done
else:
step = 4
return step
def __step4(self):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
step = 0
done = False
row = -1
col = -1
star_col = -1
while not done:
(row, col) = self.__find_a_zero()
if row < 0:
done = True
step = 6
else:
self.marked[row][col] = 2
star_col = self.__find_star_in_row(row)
if star_col >= 0:
col = star_col
self.row_covered[row] = True
self.col_covered[col] = False
else:
done = True
self.Z0_r = row
self.Z0_c = col
step = 5
return step
def __step5(self):
"""
Construct a series of alternating primed and starred zeros as
follows. Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred zero
of the series, star each primed zero of the series, erase all
primes and uncover every line in the matrix. Return to Step 3
"""
count = 0
path = self.path
path[count][0] = self.Z0_r
path[count][1] = self.Z0_c
done = False
while not done:
row = self.__find_star_in_col(path[count][1])
if row >= 0:
count += 1
path[count][0] = row
path[count][1] = path[count-1][1]
else:
done = True
if not done:
col = self.__find_prime_in_row(path[count][0])
count += 1
path[count][0] = path[count-1][0]
path[count][1] = col
self.__convert_path(path, count)
self.__clear_covers()
self.__erase_primes()
return 3
def __step6(self):
"""
Add the value found in Step 4 to every element of each covered
row, and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered
lines.
"""
minval = self.__find_smallest()
for i in range(self.n):
for j in range(self.n):
if self.row_covered[i]:
self.C[i][j] += minval
if not self.col_covered[j]:
self.C[i][j] -= minval
return 4
def __find_smallest(self):
"""Find the smallest uncovered value in the matrix."""
minval = sys.maxsize
for i in range(self.n):
for j in range(self.n):
if (not self.row_covered[i]) and (not self.col_covered[j]):
if minval > self.C[i][j]:
minval = self.C[i][j]
return minval
def __find_a_zero(self):
"""Find the first uncovered element with value 0"""
row = -1
col = -1
i = 0
n = self.n
done = False
while not done:
j = 0
while True:
if (self.C[i][j] == 0) and \
(not self.row_covered[i]) and \
(not self.col_covered[j]):
row = i
col = j
done = True
j += 1
if j >= n:
break
i += 1
if i >= n:
done = True
return (row, col)
def __find_star_in_row(self, row):
"""
Find the first starred element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 1:
col = j
break
return col
def __find_star_in_col(self, col):
"""
Find the first starred element in the specified row. Returns
the row index, or -1 if no starred element was found.
"""
row = -1
for i in range(self.n):
if self.marked[i][col] == 1:
row = i
break
return row
def __find_prime_in_row(self, row):
"""
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 2:
col = j
break
return col
def __convert_path(self, path, count):
for i in range(count+1):
if self.marked[path[i][0]][path[i][1]] == 1:
self.marked[path[i][0]][path[i][1]] = 0
else:
self.marked[path[i][0]][path[i][1]] = 1
def __clear_covers(self):
"""Clear all covered matrix cells"""
for i in range(self.n):
self.row_covered[i] = False
self.col_covered[i] = False
def __erase_primes(self):
"""Erase all prime markings"""
for i in range(self.n):
for j in range(self.n):
if self.marked[i][j] == 2:
self.marked[i][j] = 0
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def make_cost_matrix(profit_matrix, inversion_function):
"""
Create a cost matrix from a profit matrix by calling
'inversion_function' to invert each value. The inversion
function must take one numeric argument (of any type) and return
another numeric argument which is presumed to be the cost inverse
of the original profit.
This is a static method. Call it like this:
.. python::
cost_matrix = Munkres.make_cost_matrix(matrix, inversion_func)
For example:
.. python::
cost_matrix = Munkres.make_cost_matrix(matrix, lambda x : sys.maxsize - x)
:Parameters:
profit_matrix : list of lists
The matrix to convert from a profit to a cost matrix
inversion_function : function
The function to use to invert each entry in the profit matrix
:rtype: list of lists
:return: The converted matrix
"""
cost_matrix = []
for row in profit_matrix:
cost_matrix.append([inversion_function(value) for value in row])
return cost_matrix
def print_matrix(matrix, msg=None):
"""
Convenience function: Displays the contents of a matrix of integers.
:Parameters:
matrix : list of lists
Matrix to print
msg : str
Optional message to print before displaying the matrix
"""
import math
if msg is not None:
print(msg)
# Calculate the appropriate format width.
width = 0
for row in matrix:
for val in row:
width = max(width, int(math.log10(val)) + 1)
# Make the format string
format = '%%%dd' % width
# Print the matrix
for row in matrix:
sep = '['
for val in row:
sys.stdout.write(sep + format % val)
sep = ', '
sys.stdout.write(']\n')
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
if __name__ == '__main__':
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850), # expected cost
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452), # expected cost
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15)]
m = Munkres()
for cost_matrix, expected_total in matrices:
print_matrix(cost_matrix, msg='cost matrix')
indexes = m.compute(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r][c]
total_cost += x
print(('(%d, %d) -> %d' % (r, c, x)))
print(('lowest cost=%d' % total_cost))
assert expected_total == total_cost
| rembo10/headphones | lib/munkres.py | Python | gpl-3.0 | 24,733 | [
"Brian"
] | 802e1c3ddf28362083f27ca22e846524a52aca9aedbd8349b3de615dac183299 |
from nanopore.analyses.abstractAnalysis import AbstractAnalysis
from nanopore.analyses.utils import AlignedPair, getFastaDictionary, getFastqDictionary, samIterator
import os
import pysam
import xml.etree.cElementTree as ET
from jobTree.src.bioio import reverseComplement, prettyXml, system
from itertools import product
class SubstitutionMatrix():
"""Represents a nucleotide substitution matrix. Also allows
for recording matches against Ns.
"""
def __init__(self):
self.matrix = [0.0]*25 #Includes alignments between wildcard characters.
def addAlignedPair(self, refBase, readBase):
self.matrix[self._index(refBase) * 5 + self._index(readBase)] += 1
def getCount(self, refBase, readBase):
return self.matrix[self._index(refBase) * 5 + self._index(readBase)]
def getFreqs(self, refBase, bases):
"""
Get list of relative frequencies for a refBase against all bases (passed as string)
"""
freqs = list()
for b in bases:
freqs.append(self.getCount(refBase, b))
if sum(freqs) == 0:
return [ 0.0 ] * len(freqs)
return [x / sum(freqs) for x in freqs]
def getXML(self):
def _identity(matches, mismatches):
if matches + mismatches == 0:
return "NaN"
return matches/(mismatches+matches)
matches = sum([ self.getCount(base, base) for base in "ACTG" ])
mismatches = sum([ sum([ self.getCount(refBase, readBase) for readBase in "ACTG" if readBase != refBase ]) for refBase in "ACTG" ])
node = ET.Element("substitutions", { "matches":str(matches), "mismatches":str(mismatches), "identity":str(_identity(matches, mismatches)) })
overallMatches = 0
overallMismatches = 0
for refBase in "ACGTN":
matches = self.getCount(refBase, refBase)
mismatches = sum([ self.getCount(refBase, readBase) for readBase in "ACTG" if readBase != refBase ])
baseNode = ET.SubElement(node, refBase, { "matches":str(matches), "mismatches":str(mismatches), "identity":str(_identity(matches, mismatches)) })
for readBase in "ACGTN":
ET.SubElement(baseNode, readBase, { "count":str(self.getCount(refBase, readBase)) })
return node
@staticmethod
def _index(base):
base = base.upper()
if base not in "ACGT":
return 4
return { 'A':0, 'C':1, 'G':2, 'T':3 }[base]
class Substitutions(AbstractAnalysis):
"""Calculates stats on substitutions
"""
def run(self, kmer=5):
AbstractAnalysis.run(self) #Call base method to do some logging
refSequences = getFastaDictionary(self.referenceFastaFile) #Hash of names to sequences
readSequences = getFastqDictionary(self.readFastqFile) #Hash of names to sequences
sM = SubstitutionMatrix() #The thing to store the counts in
sam = pysam.Samfile(self.samFile, "r" )
for aR in samIterator(sam): #Iterate on the sam lines
for aP in AlignedPair.iterator(aR, refSequences[sam.getrname(aR.rname)], readSequences[aR.qname]): #Walk through the matches mismatches:
sM.addAlignedPair(aP.getRefBase(), aP.getReadBase())
sam.close()
#Write out the substitution info
open(os.path.join(self.outputDir, "substitutions.xml"), 'w').write(prettyXml(sM.getXML()))
bases = "ACGT"
outf = open(os.path.join(self.outputDir, "subst.tsv"), "w")
outf.write("A\tC\tG\tT\n")
for x in bases:
freqs = sM.getFreqs(x, bases)
outf.write("{}\t{}\n".format(x, "\t".join(map(str,freqs)), "\n"))
outf.close()
analysis = self.outputDir.split("/")[-2].split("_")[-1] + "_Substitution_Levels"
system("Rscript nanopore/analyses/substitution_plot.R {} {} {}".format(os.path.join(self.outputDir, "subst.tsv"), os.path.join(self.outputDir, "substitution_plot.pdf"), analysis))
self.finish() | mitenjain/nanopore | nanopore/analyses/substitutions.py | Python | mit | 4,018 | [
"pysam"
] | 642a33605294a9e0cf6ff5535a14edc4dbd8730adb36c9f77b0bbe5cba81dec6 |
#!/usr/bin/env python
#
# $File: statNeTemporal.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
pop = sim.Population([2000], loci=[1]*50)
pop.setVirtualSplitter(sim.RangeSplitter([0, 500]))
pop.evolve(
initOps=[
sim.InitSex(),
sim.InitGenotype(freq=[0.3, 0.7]),
sim.Stat(effectiveSize=range(50), subPops=[(0,0)],
vars='Ne_temporal_base'),
],
preOps=[
sim.Stat(effectiveSize=range(50), subPops=[(0,0)],
vars=['Ne_waples89_P1', 'Ne_tempoFS_P1'], step=20),
sim.PyEval(r'"Waples Ne: %.1f (%.1f - %.1f), TempoFS: '
r'%.1f (%.1f - %.1f), at generation %d\n" % '
'tuple(Ne_waples89_P1 + Ne_tempoFS_P1 + [gen])', step=20)
],
matingScheme=sim.RandomMating(),
gen = 101
)
| BoPeng/simuPOP | docs/statNeTemporal.py | Python | gpl-2.0 | 1,789 | [
"VisIt"
] | ce140e351d22f27f44d20b8a4cfb191cadbdc288a9fe6688a1fa905fe68a8cd1 |
#!/usr/bin/env python2.6
"""
Oct 5 2012
cp from parseLiteratureBlast.py
Parser Blast output file into a table of clones that have hits matches certain keywords
Filtering criteria includes:
1/ Match score (E.g minPositives = 90%)
2/ Keywords (E.g: autologous keywords)
3/ Min sequence length (to avoid short sequences which are easier to be false negatives)
4/ Min number of samples
nknguyen at soe ucsc edu
Sep 1
"""
import os, sys, re
from Bio.Blast import NCBIXML
import immunoseq.lib.immunoseqLib as iseqlib
def reformatTitle(title):
title = title.replace("_", " ")
title = title.replace(";", " ")
return title
def checkKeywords(title, keywords):
t = title.lower()
for keyword in keywords:
if re.search(keyword, t):
return True
return False
def getCloneInfo(clonestr):
#>adapt11D,adapt15D,adapt16D;111621|TRBV19|TRBJ2-1,TRBJ2-7|TRBD1-1;5,45504,1427;size=45504
items = clonestr.lstrip('>').split(';')
samples = items[0].split(',')
size = int( items[-1].lstrip('size=') )
sizes = items[-2].split(',')
sample2size = {}
for i, s in enumerate(samples):
sample2size[s] = int(sizes[i])
#print sample2size
genes = items[1].split(',,')[0].split('|')
vs = genes[1].split(',')
js = genes[2].split(',')
vs = ','.join([v.lstrip("TRBV") for v in vs])
js = ','.join([j.lstrip("TRBJ") for j in js])
ds = ''
if len(genes) > 3:
ds = genes[3].split(',')
ds = ','.join([d.lstrip("TRBD") for d in ds])
return vs, js, ds, sample2size
def parsePaperInfo(paperStr):
#gnl|BL ORD ID|14836 AJ224294|Silins S.L. Submitted (12-FEB-1998) to the EMBL/GenBank/DDBJ databases. Silins S.L., Queensland Institute of Medical Research, The Bancroft Centre, 300 Herston Road, Brisbane, AUSTRALIA 4029 Silins S.L., Cross S.M., Krauer K.G., Moss D.J., Schmidt C.W., Misko I.S. "A functional link for major TCR expansions in healthy adults caused by persistent EBV-infection" J. Clin. Invest. 102(8):1551-1558(1998).|
items = paperStr.split('"')
diseases = ["Rheumatic Heart Disease", "Autoimmune", "Ankylosing Spondylitis", "Rheumatoid Arthritis", "Reactive Arthritis", "Multiple Sclerosis", "Psoriatic Arthritis", "Spondyloarthropathy", 'Lupus', "Diabetes", "Vitiligo"]
disease2short = {"Rheumatic Heart Disease": "RHD", "Ankylosing Spondylitis":"AS", "Rheumatoid Arthritis":"RA", "Reactive Arthritis":"ReA", "Multiple Sclerosis":"MS", "Psoriatic Arthritis":"PA", "Spondyloarthropathy":"AS, SpA", 'Lupus':'SLE', 'Vitiligo': 'V'}
title = items[1].lower()
matchDiseases = []
for d in diseases:
if d.lower() in title:
if d in disease2short:
matchDiseases.append( disease2short[d] )
else:
matchDiseases.append(d)
#return items[1]
if len(matchDiseases) > 0:
return ', '.join( matchDiseases )
else:
return items[1]
def checkNumSamples(title):
items = title.split(';')
samples = items[0].lstrip('>').split(',')
sample2patient={'SBC8': 'B', 'asBD':'B', 'asBR':'B', 'adaptBDdraw2':'B', 'asBDdraw2':'B',
'SBC7': '1', 'as1D':'1', 'as1R':'1',
'adapt11D': '11', 'as11R': '11'}
patients = []
for s in samples:
if s not in sample2patient:
patient = s
else:
patient = sample2patient[s]
if patient not in patients:
patients.append(patient)
return len(patients)
def checkNumPatients(title, group2sample2host, minPatientCount, minControlCount, maxPatientCount, maxControlCount):
items = title.split(';')
samples = items[0].lstrip('>').split(',')
sizes = [int(size) for size in items[-2].split(',')]
#controls = ["B", "adaptBD", "asBD", "20", "as20D", "adapt20D", "adaptBDdraw2", 'asBDdraw2']
#sample2patient={'SBC8': 'B', 'asBD':'B', 'asBR':'B', 'adaptBDdraw2':'B', 'asBDdraw2':'B',
# 'SBC7': '1', 'as1D':'1', 'as1R':'1',
# 'adapt11D': '11', 'as11R': '11'}
sample2patient = group2sample2host['patient']
sample2control = group2sample2host['control']
patients = []
controls = []
numPatientOutofrange = 0
numControlOutofrange = 0
for i, s in enumerate(samples):
size = sizes[i]
if s in sample2patient and s not in patients:
if size >= minPatientCount and size <= maxPatientCount:
patients.append(s)
else:
numPatientOutofrange += 1
elif s in sample2control and s not in controls:
if size >= minControlCount and size <= maxControlCount:
controls.append(s)
else:
numControlOutofrange += 1
return len(patients), len(controls), numPatientOutofrange, numControlOutofrange
#def readNcbiXml(infile, minPositives, minNumsams, minLen, minNumpatients, minNumcontrols, minPatientCount, minControlCount, group2sample2host):
def readNcbiXml(options, group2sample2host):
#infile, minPositives, minNumsams, minLen, minNumpatients, minNumcontrols, minPatientCount, minControlCount, group2sample2host):
#clones, clone2hits = readNcbiXml(options.infile, options.minPos, options.minNumSamples, options.minLen, options.minNumPatients, options.minNumControls, options.minPatientCount, options.minControlCount, group2sample2host)
#Read in blast-output file:
rh = open(options.infile)
records = NCBIXML.parse( rh)
clone2hits = {} #key = cloneName, val = [ (list of hit papers, identity) ]
clones = []
for record in records:
clone = record.query
numsams = checkNumSamples(clone)
if numsams < options.minNumSamples:
continue
numpatients, numcontrols, numPoutofrange, numCoutofrange = checkNumPatients(clone, group2sample2host, options.minPatientCount, options.minControlCount, options.maxPatientCount, options.maxControlCount)
if numpatients < options.minNumPatients or numcontrols < options.minNumControls or numPoutofrange > options.maxPatientOutofrange or numCoutofrange > options.maxControlOutofrange:
#print numpatients
#print numcontrols
continue
if record.query_length < options.minLen:
continue
clones.append(clone)
for aln in record.alignments:
for hit in aln.hsps:
if float(hit.positives)/len(hit.query) < options.minPos:
#if float(hit.identities)/len(hit.query) < minPositives:
continue
if clone in clone2hits:
clone2hits[ clone ].append( (reformatTitle(aln.title), hit.identities, hit.query, hit.match, hit.sbjct) )
else:
clone2hits[ clone ] = [ (reformatTitle(aln.title), hit.identities, hit.query, hit.match, hit.sbjct) ]
return clones, clone2hits
def getPc(count, total):
if total == 0:
return 0
return 100.0*count/total
def getDefaultKeywords():
autoimmuneKeywords=['arthritis', 'ankylosing', 'spondy', 'autoreactive', 'autoantigen', 'reactive arthritis', 'rheumatoid arthritis', 'multiple sclerosis', 'self', 'cross-reactive', 'mimicry', 'synovial', 'crohn', 'psoriasis', 'inflammatory bowel disease', 'ibd', 'ulcerative colitis', 'uveitis']
b27Keywords=['b27']
pathogenKeywords=['chlamydia', 'salmonella', 'yersinia', 'shigella', 'campylobacter', 'vipr1', 'ebv', 'epstein-barr', 'lmp2']
group2keywords = {'autoimmune': autoimmuneKeywords, 'b27': b27Keywords, 'pathogen': pathogenKeywords}
return group2keywords
def printTab(clones, clone2hits, group2keywords, options, outbasename):
outfile = "%s.txt" % outbasename
fh = open(outfile, 'w')
fh.write("#MinPositives: %f; MinNumberOfSamples: %d; MinLen: %d\n" %(options.minPos, options.minNumSamples, options.minLen))
fh.write("#Keywords:\n")
for g, k in group2keywords.iteritems():
fh.write("#\t%s:\t%s\n" %(g, ','.join(k)))
cutoff = 1
numAuto = 0 #number of clones with at least one hit passed cutoff and matches one of the autoimmuneKeywords
numB27 = 0
numPathogen = 0
for clone in clones:
#vs, ds, js, sample2size = getCloneInfo(clone)
items = clone.split(';')
id = '.'.join( [items[0], items[1]] )
if clone in clone2hits:
hits = clone2hits[ clone ]
matchAuto = False
matchB27 = False
matchPathogen = False
for i, hit in enumerate(hits):
#Check to see if any keywords matched:
if not matchAuto and checkKeywords(hit[0], group2keywords['autoimmune']):
matchAuto = True
if not matchB27 and checkKeywords(hit[0], group2keywords['b27']):
matchB27 = True
if not matchPathogen and checkKeywords(hit[0], group2keywords['pathogen']):
matchPathogen = True
if matchAuto or matchB27 or matchPathogen:
fh.write("\n>%s\n" %clone)
for i, hit in enumerate(hits):
hasKeyword = False
for g, keywords in group2keywords.iteritems():
if checkKeywords(hit[0], keywords):
hasKeyword = True
break
if hasKeyword:
fh.write("\t%d/ %s\n" %(i, hit[0]))
fh.write("\t\t%s\n" %hit[2])
fh.write("\t\t%s\n" %hit[3])
fh.write("\t\t%s\n" %hit[4])
if matchAuto:
numAuto += 1
elif matchB27:
numB27 += 1
elif matchPathogen:
numPathogen += 1
total = len(clones)
numhits = len(clone2hits)
fh.write("\n### Summary ###\n")
fh.write("Total\tNumHits\t% hits/total\tnumAuto\t% auto/total\t% auto/hits\tnumB27\t% b27/total\t% b27/hits\tnumPathogen\t% pathogen/total\t% pathogen/hits\n")
fh.write("%d\t%d\t%f\t%d\t%f\t%f\t%d\t%f\t%f\t%d\t%f\t%f\n" %(total, numhits, getPc(numhits, total), numAuto, getPc(numAuto, total), getPc(numAuto, numhits), numB27, getPc(numB27, total), getPc(numB27, numhits), numPathogen, getPc(numPathogen, total), getPc(numPathogen, numhits)) )
fh.close()
####### LATEX TABLE ########
def myTabHeader(f, samples):
f.write("\\begin{sidewaystable}\n")
#f.write("\\begin{table}\n")
f.write("\\centering\n")
f.write("\\scalebox{0.9}{%\n")
#f.write("\\begin{tabular}{c|c|c|%s|c|c|c}\n" %( "|".join(["c" for s in samples]) ) )
f.write("\\begin{tabular}{l|l|l|%s|l|l|l}\n" %( "|".join(["l" for s in samples]) ) )
#f.write(" \\multicolumn{3}{c|}{Clones} & \\multicolumn{%d}{c|}{Samples} & \\multicolumn{3}{c}{Hits} \\\\\n" %(len(samples)) )
f.write(" \\multicolumn{3}{c|}{\\textbf{Clones}} & \\multicolumn{%d}{c|}{\\textbf{Samples}} & \\multicolumn{3}{c}{\\textbf{Hits}} \\\\\n" %(len(samples)) )
#f.write("\\cline{2-%d}\n" %( len(colnames)*2 + 1 ))
f.write("\\hline\n")
#f.write("V & CDR3 & J & %s & CDR3 & Alignment & Disease \\\\\n" %(" & ".join(samples)))
f.write("\\textbf{V} & \\textbf{CDR3} & \\textbf{J} & \\textbf{%s} & \\textbf{CDR3} & \\textbf{Alignment} & \\textbf{Disease} \\\\\n" %("} & \\textbf{".join(samples)))
f.write("\\hline\n")
def tab(f, clones, clone2hits, group2keywords, options, samples):
for clone in clones:
vs, js, ds, sample2size = getCloneInfo(clone)
if clone in clone2hits:
hits = clone2hits[clone]
hitsWithKeyword = [] #list of hits that have at least 1 keyword
for hit in hits:
for g, kw in group2keywords.iteritems():
if g == 'b27' or g == 'pathogen':
continue
if checkKeywords(hit[0], kw):
hitsWithKeyword.append(hit)
break
if len(hitsWithKeyword) == 0: #no hit with keyword
continue
seq = hits[0][2]
numrow = len(hitsWithKeyword)
#First line
f.write("\\multirow{%d}{*}{%s} & \\multirow{%d}{*}{%s} & \\multirow{%d}{*}{%s} & " %(numrow, vs, numrow, seq, numrow, js) ) #Write V, CDR3, J
for s in samples:
name = iseqlib.properName2name(s)
if name in sample2size:
count = sample2size[name]
f.write("\\multirow{%d}{*}{%d} & " % (numrow, count))
else:
f.write("\\multirow{%d}{*}{} & " % (numrow))
f.write("%s & %s & %s \\\\\n " %( hitsWithKeyword[0][4], hitsWithKeyword[0][3], parsePaperInfo(hitsWithKeyword[0][0]) ))
#Other hits:
for i in xrange(1, numrow):
f.write("\\cline{%d-%d}\n" %(3 + len(samples) + 1, 3 + len(samples) + 3))
f.write(" &"*( 3 + len(samples) ) )
h = hitsWithKeyword[i]
f.write( "%s & %s & %s \\\\\n" %(h[4], h[3], parsePaperInfo(h[0])) )
f.write("\\hline\n")
def printTexTab(clones, clone2hits, group2keywords, options, outbasename):
outfile = "%s.tex" %outbasename
f = open(outfile, 'w')
iseqlib.writeDocumentStart(f)
samples = ['AS1', 'AS2', 'AS3', 'AS4', 'AS5', 'H1', 'H2']
myTabHeader(f, samples)
tab(f, clones, clone2hits, group2keywords, options, samples)
label = ''
captionStr = ''
#iseqlib.tableCloser(f, captionStr, label)
iseqlib.sidewaystableCloser(f, captionStr, label)
iseqlib.writeDocumentEnd(f)
f.close()
####### LATEX TABLE FORMAT 0 ===============
def myTabHeader0(f):
#f.write("\\begin{sidewaystable}\n")
f.write("\\begin{table}\n")
f.write("\\centering\n")
f.write("\\scalebox{0.4}{%\n")
f.write("\\begin{tabular}{c|c|c|c|c|c|c|c}\n")
f.write(" \\multicolumn{3}{c|}{Clones} & \\multicolumn{2}{c|}{Samples} & \\multicolumn{3}{c}{Hits} \\\\\n")
#f.write("\\cline{2-%d}\n" %( len(colnames)*2 + 1 ))
f.write("\\hline\n")
f.write("V & CDR3 & J & Name & Size & CDR3 & Alignment & Paper \\\\\n")
f.write("\\hline\n")
def tab0(f, clones, clone2hits, group2keywords, options):
for clone in clones:
vs, js, ds, sample2size = getCloneInfo(clone)
if clone in clone2hits:
hits = clone2hits[clone]
hitsWithKeyword = [] #list of hits that have at least 1 keyword
for hit in hits:
for g, kw in group2keywords.iteritems():
if g == 'b27' or g == 'pathogen':
continue
if checkKeywords(hit[0], kw):
hitsWithKeyword.append(hit)
break
if len(hitsWithKeyword) == 0: #no hit with keyword
continue
seq = hits[0][2]
samples = sorted( [iseqlib.properName(s) for s in sample2size.keys()] )
numrow = max( [len(samples), len(hitsWithKeyword)] )
f.write("\\multirow{%d}{*}{%s} & \\multirow{%d}{*}{%s} & \\multirow{%d}{*}{%s} & " %(numrow, vs, numrow, seq, numrow, js) ) #Write V, CDR3, J
#f.write("%s & %d & %s & %s & %s \\\\\n " %( samples[0], sample2size[iseqlib.properName2name(samples[0])], hitsWithKeyword[0][4], hitsWithKeyword[0][3], hitsWithKeyword[0][0] )) #first row
f.write("%s & %d & %s & %s & %s \\\\\n " %( samples[0], sample2size[iseqlib.properName2name(samples[0])], hitsWithKeyword[0][4], hitsWithKeyword[0][3], parsePaperInfo(hitsWithKeyword[0][0]) ))
for i in xrange(1, numrow):
f.write("\\cline{4-8}\n")
f.write(" & & & ")
if i < len(samples):
s = samples[i]
f.write(" %s & %d &" %(s, sample2size[iseqlib.properName2name(s)]) )
else:
f.write(" & & ")
if i < len(hitsWithKeyword):
h = hitsWithKeyword[i]
#f.write( "%s & %s & %s \\\\\n" %(h[4], h[3], h[0]) )
f.write( "%s & %s & %s \\\\\n" %(h[4], h[3], parsePaperInfo(h[0])) )
else:
f.write(" & & \\\\\n")
f.write("\\hline\n")
def printTexTab0(clones, clone2hits, group2keywords, options, outbasename):
outfile = "%s.tex" %outbasename
f = open(outfile, 'w')
iseqlib.writeDocumentStart(f)
myTabHeader(f)
tab(f, clones, clone2hits, group2keywords, options)
label = ''
captionStr = ''
iseqlib.tableCloser(f, captionStr, label)
#iseqlib.sidewaystableCloser(f, captionStr, label)
iseqlib.writeDocumentEnd(f)
f.close()
def readSample2host(file):
f = open(file, 'r')
group2sample2host = {}
for line in f:
items = line.strip().split()
if len(items) < 3:
continue
group = items[0]
sample = items[1]
host = items[2]
if group not in group2sample2host:
group2sample2host[group] = {sample:host}
else:
group2sample2host[group][sample] = host
f.close()
return group2sample2host
def addOptions(parser):
parser.add_option('-i', '--infile', dest='infile', help='Input xml file')
parser.add_option('-o', '--outdir', dest='outdir', help='Output directory')
parser.add_option('-b', '--basename', dest='basename', default='hits', help='Output files basename. Default=%default')
parser.add_option('-p', '--positive', dest='minPos', type='float', default=0.9, help='Minimum portion of positive matches. Default=%default')
parser.add_option('-k', '--keywords', dest='keywords', default=None, help='Only hits matching at least one keyword are reported')
parser.add_option('-l', '--len', dest='minLen', type='int', default=10, help='Minimum sequence length to be included in the output. Default=%default')
parser.add_option('-s', '--samples', dest='minNumSamples', type='int', default=1, help='Minimum number of samples containing the sequence. Default=%default')
parser.add_option('--patients', dest='minNumPatients', type='int', default=0, help='Minimum number of patients containing the sequence. Default=%default')
parser.add_option('--controls', dest='minNumControls', type='int', default=0, help='Minimum number of controls containing the sequence. Default=%default')
parser.add_option('--minPatientCount', dest='minPatientCount', type='int', default=1, help='Minimum size a clone must have in a patient sample to be considered as "present" in that sample. Default=%default')
parser.add_option('--minControlCount', dest='minControlCount', type='int', default=1, help='Minimum size a clone must have in a control sample to be considered as "present" in that sample. Default=%default')
parser.add_option('--maxPatientCount', dest='maxPatientCount', type='int', default=10000000, help='Maximun size a clone must have in a patient sample to be considered as "present" in that sample. Default=%default')
parser.add_option('--maxControlCount', dest='maxControlCount', type='int', default=10000000, help='Maximun size a clone must have in a control sample to be considered as "present" in that sample. Default=%default')
parser.add_option('--maxPatientOutofrange', dest='maxPatientOutofrange', type='int', default=100, help='Max number of patients with outofrange counts allowed. Default=%default')
parser.add_option('--maxControlOutofrange', dest='maxControlOutofrange', type='int', default=100, help='Max number of controls with outofrange counts allowed. Default=%default')
parser.add_option('--sample2host', dest='sample2host', help='Optional. File contains mapping between samples and host. Format:<Group> <sample> <host>. Ex: control asBD B ')
def main():
parser = iseqlib.initOptions()
addOptions(parser)
options, args = parser.parse_args()
group2keywords = {} #key = keywordGroup, val = list of keywords
if options.keywords:
if options.keywords == '-':
group2keywords = getDefaultKeywords()
else:
group2keywords, kw2group = iseqlib.readGroup2samples(options.keywords)
group2sample2host = {}
if options.sample2host:
group2sample2host = readSample2host(options.sample2host)
#clones, clone2hits = readNcbiXml(options.infile, options.minPos, options.minNumSamples, options.minLen, options.minNumPatients, options.minNumControls, options.minPatientCount, options.minControlCount, group2sample2host)
clones, clone2hits = readNcbiXml(options, group2sample2host)
outbasename = os.path.join(options.outdir, options.basename)
printTab(clones, clone2hits, group2keywords, options, outbasename)
printTexTab(clones, clone2hits, group2keywords, options, outbasename)
if __name__ == '__main__':
main()
| ngannguyen/immunoseq | src/parseLiteratureBlast_toTex.py | Python | mit | 21,026 | [
"BLAST"
] | f45a51e94a19ef883fda4a8c75991456eacb56e0046ef9501a2ae6bd760e0191 |
from os import path
from numpy import log10, array, float, NaN, nanmin, nanmax, savetxt, hstack, float
from pysces import ModelMap, ParScanner, Scanner
from sympy import sympify, diff, Symbol
from ._thermokin_file_tools import get_subs_dict, get_reqn_path, \
get_all_terms, get_term_types_from_raw_data, create_reqn_data, \
write_reqn_file, create_gamma_keq_reqn_data, term_to_file
from ..latextools import LatexExpr
from ..modeltools import make_path, get_file_path
from ..utils.misc import do_safe_state, get_value, silence_print, print_f, \
is_number, stringify, scanner_range_setup, DotDict, formatter_factory, \
find_min, find_max
from ..utils.plotting import Data2D
__author__ = 'carl'
__all__ = ['ThermoKin']
def mult(lst):
"""
Multiplies values of a list with each other and returns the result.
Parameters
----------
lst : list of numbers
Returns
-------
number
Same type as numbers in ``lst``.
"""
ans = 1
for each in lst:
ans *= each
return ans
def get_repr_latex(obj):
"""
Creates the string that will be returned by the ``__repr_latex__``
method of any of objects of ``Thermokin``. The value of the
``value`` field is used to dermine the float format.
Parameters
----------
obj : RateTerm, Term or RateEqn
Returns
-------
str
"""
if obj.value == 0:
fmt = '$%s = %s = %.3f$'
elif abs(obj.value) < 0.001 or abs(obj.value) > 10000:
fmt = '$%s = %s = %.3e$'
else:
fmt = '$%s = %s = %.3f$'
return fmt % (obj.latex_name,
obj.latex_expression,
obj.value)
@silence_print
def silent_state(mod):
mod.doMca()
mod.doState()
class ThermoKin(object):
def __init__(self, mod, path_to_reqn_file=None, overwrite=False,
warnings=True, ltxe=None):
super(ThermoKin, self).__init__()
self.mod = mod
silent_state(mod)
self._analysis_method = 'thermokin'
self._working_dir = make_path(self.mod, self._analysis_method)
if ltxe:
self._ltxe = ltxe
else:
self._ltxe = LatexExpr(mod)
if path_to_reqn_file:
self._path_to = path_to_reqn_file
else:
self._path_to = get_reqn_path(self.mod)
self._do_auto_actions(overwrite, warnings)
self._raw_data, self._add_raw_data = get_all_terms(self._path_to)
self._do_gamma_keq(overwrite, warnings)
term_types = get_term_types_from_raw_data(self._raw_data).union(
get_term_types_from_raw_data(self._add_raw_data))
self._ltxe.add_term_types(term_types)
self._populate_object()
self._populate_ec_results()
def _do_gamma_keq(self, overwrite, warnings):
if overwrite:
return None
gamma_keq_todo = []
add_raw_data = self._add_raw_data
for reaction in self._raw_data.iterkeys():
if not add_raw_data.get(reaction) or not add_raw_data.get(
reaction).get('gamma_keq'):
gamma_keq_todo.append(reaction)
if len(gamma_keq_todo) != 0:
reaction_printout = ', '.join(gamma_keq_todo[:-1]) + ' or ' + \
gamma_keq_todo[-1]
print_f('%s does not contain Gamma/Keq terms for %s:' % (
self._path_to, reaction_printout), warnings)
gamma_keq_data, messages = create_gamma_keq_reqn_data(self.mod)
for required in gamma_keq_todo:
print_f('{:10.10}: {}'.format(required, messages[required]),
warnings)
if required not in add_raw_data:
add_raw_data[required] = {}
add_raw_data[required]['gamma_keq'] = gamma_keq_data[required]
def _do_auto_actions(self, overwrite, warnings):
condition_1 = path.exists(self._path_to) and overwrite
condition_2 = not path.exists(self._path_to)
if condition_1:
print_f(
'The file %s will be overwritten with automatically generated file.' % self._path_to,
warnings)
elif condition_2:
print_f('A new file will be created at "%s".' % self._path_to,
warnings)
if condition_1 or condition_2:
ma_terms, vc_binding_terms, gamma_keq_terms, messages = create_reqn_data(
self.mod)
for k, v in messages.iteritems():
print_f('{:10.10}: {}'.format(k, v), warnings)
write_reqn_file(self._path_to, self.mod.ModelFile, ma_terms,
vc_binding_terms, gamma_keq_terms, messages)
def _populate_object(self):
self.reaction_results = DotDict()
self.reaction_results._make_repr('"$" + v.latex_name + "$"', 'v.value',
formatter_factory())
for reaction, terms_dict in self._raw_data.iteritems():
additional_terms = self._add_raw_data.get(reaction)
reqn_obj = RateEqn(self.mod,
reaction,
terms_dict,
self._ltxe,
additional_terms)
setattr(self, 'J_' + reaction, reqn_obj)
self.reaction_results['J_' + reaction] = reqn_obj
for term in reqn_obj.terms.itervalues():
self.reaction_results[term.name] = term
def _populate_ec_results(self):
self.ec_results = DotDict()
self.ec_results._make_repr('"$" + v.latex_name + "$"', 'v.value',
formatter_factory())
for rate_eqn in self.reaction_results.itervalues():
self.ec_results.update(rate_eqn.ec_results)
def save_results(self, file_name=None, separator=',',fmt='%.9f'):
file_name = get_file_path(working_dir=self._working_dir,
internal_filename='tk_summary',
fmt='csv',
file_name=file_name, )
values = []
max_len = 0
for reaction_name in sorted(self.reaction_results.keys()):
cols = (reaction_name,
self.reaction_results[reaction_name].value,
self.reaction_results[reaction_name].latex_name,
self.reaction_results[reaction_name].latex_expression)
values.append(cols)
if len(cols[3]) > max_len:
max_len = len(cols[3])
for elasticity_name in sorted(
[ec for ec in self.ec_results.keys() if ec.startswith('ec')]):
if self.ec_results[elasticity_name].expression != 0:
related_ecs = sorted([ec for ec in self.ec_results.keys() if
elasticity_name in ec])
for related_ec_name in related_ecs:
cols = (related_ec_name,
self.ec_results[related_ec_name].value,
self.ec_results[related_ec_name].latex_name,
self.ec_results[related_ec_name].latex_expression)
values.append(cols)
if len(cols[3]) > max_len:
max_len = len(cols[3])
str_fmt = 'S%s' % max_len
head = ['name', 'value', 'latex_name', 'latex_expression']
X = array(values,
dtype=[(head[0], str_fmt),
(head[1], 'float'),
(head[2], str_fmt),
(head[3], str_fmt)])
try:
savetxt(fname=file_name,
X=X,
header=separator.join(head),
delimiter=separator,
fmt=['%s', fmt, '%s', '%s'], )
except IOError as e:
print e.strerror
class RateEqn(object):
def __init__(self, mod, name, term_dict, ltxe, additional_terms=None):
super(RateEqn, self).__init__()
self.mod = mod
self.terms = DotDict()
self.terms._make_repr('"$" + v.latex_name + "$"', 'v.value',
formatter_factory())
self._unfac_expression = 1
self.name = 'J_' + name
self._rname = name
self._ltxe = ltxe
for val in term_dict.itervalues():
self._unfac_expression = self._unfac_expression * (sympify(val))
for term_name, expression in term_dict.iteritems():
term = RateTerm(parent=self,
mod=self.mod,
name='J_%s_%s' % (self._rname, term_name),
rname=term_name,
expression=expression,
ltxe=self._ltxe)
setattr(self, term_name, term)
self.terms[term_name] = term
if additional_terms:
for term_name, expression in additional_terms.iteritems():
term = AdditionalRateTerm(parent=self,
mod=self.mod,
name='J_%s_%s' % (
self._rname, term_name),
rname=term_name,
expression=expression,
ltxe=self._ltxe)
setattr(self, term_name, term)
self.terms[term_name] = term
self._value = None
self._str_expression_ = None
self._expression = None
self._latex_expression = None
self._latex_name = None
self.ec_results = DotDict()
self.ec_results._make_repr('"$" + v.latex_name + "$"', 'v.value',
formatter_factory())
self._populate_ec_results()
def _populate_ec_results(self):
expression_symbols = self._unfac_expression.atoms(Symbol)
for each in expression_symbols:
each = sympify(each)
ec = diff(self._unfac_expression, each) * \
(each / self._unfac_expression)
ec_name = 'ec%s_%s' % (self._rname, each)
self.ec_results[ec_name] = Term(self, self.mod, ec_name,
self._rname, ec,
self._ltxe)
for each in self.terms.itervalues():
self.ec_results.update(each.ec_results)
def _repr_latex_(self):
return get_repr_latex(self)
@property
def _str_expression(self):
if not self._str_expression_:
self._str_expression_ = str(self._unfac_expression)
return self._str_expression_
@property
def expression(self):
if not self._expression:
self._expression = self._unfac_expression.factor()
return self._expression
@property
def value(self):
self._calc_value()
return self._value
@property
def latex_name(self):
if not self._latex_name:
self._latex_name = self._ltxe.expression_to_latex(self.name)
return self._latex_name
@property
def latex_expression(self):
if not self._latex_expression:
self._latex_expression = self._ltxe.expression_to_latex(
self.expression,
mul_symbol='dot')
return self._latex_expression
def _calc_value(self):
subs_dict = get_subs_dict(self._unfac_expression, self.mod)
for each in self.terms.itervalues():
if type(each) is not AdditionalRateTerm:
each._calc_value(subs_dict)
self._value = mult([each._value for each in self.terms.itervalues() if
type(each) is not AdditionalRateTerm])
def _valscan_x(self, parameter, scan_range):
scan_res = [list() for _ in range(len(self.terms.values()) + 2)]
scan_res[0] = scan_range
for parvalue in scan_range:
state_valid = do_safe_state(self.mod, parameter, parvalue)
for i, term in enumerate(self.terms.values()):
if state_valid:
scan_res[i + 1].append(term.value)
else:
scan_res[i + 1].append(NaN)
if state_valid:
scan_res[i + 2].append(self.value)
else:
scan_res[i + 2].append(NaN)
return scan_res
def _valscan(self,
parameter,
scan_range,
par_scan=False,
par_engine='multiproc'):
# choose between parscanner or scanner
if par_scan:
# This is experimental
scanner = ParScanner(self.mod, par_engine)
else:
scanner = Scanner(self.mod)
scanner.quietRun = True
# parameter scan setup and execution
start, end, points, log = scanner_range_setup(scan_range)
scanner.addScanParameter(parameter,
start=start,
end=end,
points=points,
log=log)
needed_symbols = [parameter] + \
stringify(list(self.expression.atoms(Symbol)))
scanner.addUserOutput(*needed_symbols)
scanner.Run()
# getting term/reaction values via substitution
subs_dict = {}
for i, symbol in enumerate(scanner.UserOutputList):
subs_dict[symbol] = scanner.UserOutputResults[:, i]
term_expressions = [term.expression for term in self.terms.values()]\
+ [self.expression]
term_str_expressions = stringify(term_expressions)
parameter_values = subs_dict[parameter].reshape(points, 1)
scan_res = []
# collecting results in an array
for expr in term_str_expressions:
scan_res.append(get_value(expr, subs_dict))
scan_res = array(scan_res).transpose()
scan_res = hstack([parameter_values, scan_res])
return scan_res
def _ecscan(self,
parameter,
scan_range,
par_scan=False,
par_engine='multiproc'):
# choose between parscanner or scanner
if par_scan:
# This is experimental
scanner = ParScanner(self.mod, par_engine)
else:
scanner = Scanner(self.mod)
scanner.quietRun = True
# parameter scan setup and execution
start, end, points, log = scanner_range_setup(scan_range)
scanner.addScanParameter(parameter,
start=start,
end=end,
points=points,
log=log)
needed_symbols = [parameter] + \
stringify(list(self.expression.atoms(Symbol)))
scanner.addUserOutput(*needed_symbols)
scanner.Run()
# getting term/reaction values via substitution
subs_dict = {}
for i, symbol in enumerate(scanner.UserOutputList):
subs_dict[symbol] = scanner.UserOutputResults[:, i]
# we include all ec_terms that are not zero (even though they are
# included in the main dict)
ec_term_expressions = [ec_term.expression for ec_term in
self.ec_results.values() if
ec_term.expression != 0 and
not ec_term.name.endswith('gamma_keq')]
ec_term_str_expressions = stringify(ec_term_expressions)
parameter_values = subs_dict[parameter].reshape(points, 1)
scan_res = []
# collecting results in an array
for expr in ec_term_str_expressions:
val = get_value(expr, subs_dict)
scan_res.append(val)
scan_res = array(scan_res).transpose()
scan_res = hstack([parameter_values, scan_res])
return scan_res
def _ecscan_x(self, parameter, scan_range):
mca_objects = [ec_term for ec_term in self.ec_results.values() if
ec_term.expression != 0 and not ec_term.name.endswith(
'gamma_keq')]
scan_res = [list() for _ in range(len(mca_objects) + 1)]
scan_res[0] = scan_range
for parvalue in scan_range:
state_valid = do_safe_state(self.mod, parameter, parvalue,
type='mca')
for i, term in enumerate(mca_objects):
if state_valid:
scan_res[i + 1].append(term.value)
else:
scan_res[i + 1].append(NaN)
return scan_res
def do_par_scan(self,
parameter,
scan_range,
scan_type='value',
init_return=True,
par_scan=False,
par_engine='multiproc'):
try:
assert scan_type in ['elasticity', 'value'], 'scan_type must be one\
of "value" or "elasticity".'
except AssertionError as ae:
print ae
init = getattr(self.mod, parameter)
if scan_type == 'elasticity':
mca_objects = [ec_term for ec_term in self.ec_results.values() if
ec_term.expression != 0 and
not ec_term.name.endswith('gamma_keq')]
additional_cat_classes = {
'All Coefficients': ['Term Elasticities']}
additional_cats = {
'Term Elasticities': [ec_term.name for ec_term in mca_objects
if
ec_term.name.startswith('p')]}
column_names = [parameter] + \
[ec_term.name for ec_term in mca_objects]
y_label = 'Elasticity Coefficient'
scan_res = self._ecscan(parameter,
scan_range,
par_scan,
par_engine)
data_array = scan_res
# ylim = [nanmin(data_array[:, 1:]),
# nanmax(data_array[:, 1:]) * 1.1]
yscale = 'linear'
category_manifest = {pec: True for pec in
additional_cats['Term Elasticities']}
category_manifest['Elasticity Coefficients'] = True
category_manifest['Term Elasticities'] = True
elif scan_type == 'value':
additional_cat_classes = {'All Fluxes/Reactions/Species':
['Term Rates']}
term_names = [term.name for term in self.terms.values()]
additional_cats = {'Term Rates': term_names}
column_names = [parameter] + term_names + [self.name]
y_label = 'Reaction/Term rate'
scan_res = self._valscan(parameter,
scan_range,
par_scan,
par_engine)
data_array = scan_res
# ylim = [nanmin(data_array[:, 1:]),
# nanmax(data_array[:, 1:]) * 1.1]
yscale = 'log'
category_manifest = {'Flux Rates': True, 'Term Rates': True}
if init_return:
self.mod.SetQuiet()
setattr(self.mod, parameter, init)
self.mod.doMca()
self.mod.SetLoud()
mm = ModelMap(self.mod)
species = mm.hasSpecies()
if parameter in species:
x_label = '[%s]' % parameter.replace('_', ' ')
else:
x_label = parameter
xscale = 'log' if scanner_range_setup(scan_range)[3] else 'linear'
ax_properties = {'ylabel': y_label,
'xlabel': x_label,
'xscale': xscale,
'yscale': yscale, }
data = Data2D(mod=self.mod,
column_names=column_names,
data_array=data_array,
ltxe=self._ltxe,
analysis_method='thermokin',
ax_properties=ax_properties,
additional_cat_classes=additional_cat_classes,
additional_cats=additional_cats,
category_manifest=category_manifest,)
if scan_type == 'elasticity':
ec_names = [ec_term.name for ec_term in mca_objects if
ec_term.name.startswith('ec')]
for line in data._lines:
for ec_name in ec_names:
condition1 = line.name != ec_name
condition2 = self.ec_results[line.name]._rname == ec_name
if condition1 and condition2:
line.categories.append(ec_name)
return data
def __add__(self, other):
return generic_term_operation(self, other, '+')
def __mul__(self, other):
return generic_term_operation(self, other, '*')
def __sub__(self, other):
return generic_term_operation(self, other, '-')
def __div__(self, other):
return generic_term_operation(self, other, '/')
def __radd__(self, other):
return generic_term_operation(self, other, '+')
def __rmul__(self, other):
return generic_term_operation(self, other, '*')
def __rsub__(self, other):
return generic_term_operation(self, other, 'rsub')
def __rdiv__(self, other):
return generic_term_operation(self, other, 'rdiv')
def __neg__(self):
return AdditionalTerm(self,
self.mod,
'-' + self.name,
'-' + self._rname,
(-self._unfac_expression),
self._ltxe,
'-' + self.name)
def __pow__(self, power, modulo=None):
return AdditionalTerm(self,
self.mod,
self.name + '**' + str(power),
self._rname + '**' + str(power),
self._unfac_expression ** power,
self._ltxe,
self.name + '**' + str(power))
class Term(object):
def __init__(self, parent, mod, name, rname, expression, ltxe):
super(Term, self).__init__()
self.name = name
self._rname = rname
self._unfac_expression = sympify(expression)
self._parent = parent
self.mod = mod
self._ltxe = ltxe
# properties
self._expression = None
self._str_expression_ = None
self._value = None
self._latex_name = None
self._latex_expression = None
def _repr_latex_(self):
return get_repr_latex(self)
@property
def _str_expression(self):
if not self._str_expression_:
self._str_expression_ = str(self._unfac_expression)
return self._str_expression_
@property
def expression(self):
if not self._expression:
self._expression = self._unfac_expression.factor()
return self._expression
@property
def value(self):
self._calc_value()
return self._value
@property
def latex_name(self):
if not self._latex_name:
self._latex_name = self._ltxe.expression_to_latex(self.name)
return self._latex_name
@property
def latex_expression(self):
if not self._latex_expression:
self._latex_expression = self._ltxe.expression_to_latex(
self.expression,
mul_symbol='dot')
return self._latex_expression
def _calc_value(self, subs_dict=None):
if not subs_dict:
subs_dict = get_subs_dict(self._unfac_expression, self.mod)
self._value = get_value(self._str_expression, subs_dict)
def __add__(self, other):
return generic_term_operation(self, other, '+')
def __mul__(self, other):
return generic_term_operation(self, other, '*')
def __sub__(self, other):
return generic_term_operation(self, other, '-')
def __div__(self, other):
return generic_term_operation(self, other, '/')
def __radd__(self, other):
return generic_term_operation(self, other, '+')
def __rmul__(self, other):
return generic_term_operation(self, other, '*')
def __rsub__(self, other):
return generic_term_operation(self, other, 'rsub')
def __rdiv__(self, other):
return generic_term_operation(self, other, 'rdiv')
def __neg__(self):
return AdditionalTerm(self._parent,
self.mod,
'-' + self.name,
'-' + self._rname,
(-self._unfac_expression),
self._ltxe,
'-' + self.name)
def __pow__(self, power, modulo=None):
return AdditionalTerm(self._parent,
self.mod,
self.name + '**' + str(power),
self._rname + '**' + str(power),
self._unfac_expression ** power,
self._ltxe,
self.name + '**' + str(power))
class RateTerm(Term):
def __init__(self, parent, mod, name, rname, expression, ltxe):
super(RateTerm, self).__init__(parent, mod, name, rname, expression,
ltxe)
self.ec_results = DotDict()
self.ec_results._make_repr('"$" + v.latex_name + "$"', 'v.value',
formatter_factory())
self._populate_ec_results()
self._percentage = None
@property
def percentage(self):
per = (log10(self.value) / log10(self._parent.value)) * 100
return per
def _populate_ec_results(self):
expression_symbols = self._parent._unfac_expression.atoms(Symbol)
expression_symbols.update(self._unfac_expression.atoms(Symbol))
for each in expression_symbols:
each = sympify(each)
ec_name = 'ec%s_%s' % (self._parent._rname, each)
pec_name = 'p%s_%s' % (ec_name, self._rname)
ec = diff(self._unfac_expression, each) * \
(each / self._unfac_expression)
self.ec_results[pec_name] = Term(self._parent,
self.mod,
pec_name,
ec_name,
ec,
self._ltxe)
class AdditionalRateTerm(RateTerm):
@property
def percentage(self):
return 0.0
def append_to_file(self, file_name, term_name=None, parent=None):
if not parent:
parent = self._parent._rname
if not term_name:
term_name = self._rname
term_to_file(file_name, self._unfac_expression, parent, term_name)
class AdditionalTerm(Term):
def __init__(self, parent, mod, name, rname, expression, ltxe,
creation_operation):
super(AdditionalTerm, self).__init__(parent, mod, name, rname,
expression,
ltxe)
self.creation_operation = creation_operation
def simplify_expression(self):
self._expression = self._unfac_expression.factor()
self._latex_expression = None
def get_elasticity(self, var_par, term_name=None):
if not term_name:
term_name = self.name
var_par = sympify(var_par)
ec = diff(self._unfac_expression, var_par) * \
(var_par / self._unfac_expression)
ec_name = 'ec_%s_%s' % (term_name, var_par)
ec_term = Term(self, self.mod, ec_name, ec_name, ec, self._ltxe)
ec_term._latex_name = '\\varepsilon^{%s}_{%s}' % (
term_name.replace('_', ''),
str(var_par).replace('_', ''))
return ec_term
def append_to_file(self, file_name, term_name=None, parent=None):
if not parent and self._parent:
parent = self._parent._rname
if not term_name and self.name != 'new_term':
term_name = self.name
term_to_file(file_name, self._unfac_expression, parent, term_name)
@property
def expression(self):
if not self._expression:
self._expression = self._unfac_expression
return self._expression
def generic_term_operation(self, other, operator, parent=None, name=None,
rname=None):
def get_parent(self):
if type(self) is RateEqn:
parent = self
else:
parent = self._parent
return parent
if operator == 'rsub':
self = -self
operator = '+'
if operator == 'rdiv':
self = AdditionalTerm(get_parent(self),
self.mod,
self.name,
self._rname,
(1 / self._unfac_expression),
self._ltxe,
'1/' + self.name)
operator = '*'
if is_number(other):
other = AdditionalTerm(get_parent(self),
self.mod,
str(other),
str(other),
sympify(other),
self._ltxe,
str(other))
# TODO this type check is a hack - no idea how to check specifically for
# sympy expressions
elif 'sympy' in str(type(other)):
other = AdditionalTerm(get_parent(self),
self.mod,
str(other),
str(other),
other,
self._ltxe,
str(other))
mod = self.mod
operated_on = []
for term in (self, other):
if type(term) is AdditionalTerm:
operated_on.append(term.creation_operation)
else:
operated_on.append(term.name)
if not name:
name = 'new_term'
if not rname:
rname = 'new_term'
if not parent:
if hasattr(self, '_parent') and hasattr(other, '_parent'):
parent = self._parent
elif type(self) is RateEqn and type(other) is not RateEqn:
parent = self
elif type(other) is RateEqn and type(self) is not RateEqn:
parent = other
creation_operation = sympify(
'%s %s %s' % (operated_on[0], operator, operated_on[1]))
expression = sympify('(%s) %s (%s)' % (
self._str_expression, operator, other._str_expression))
ltxe = self._ltxe
return AdditionalTerm(parent, mod, name, rname, expression, ltxe,
creation_operation)
| exe0cdc/PyscesToolbox | psctb/analyse/_thermokin.py | Python | bsd-3-clause | 31,357 | [
"PySCeS"
] | 1f977760b4e4c88d726d806b81257270cbd1332d770ec9fd46f87e3b06f35096 |
import collect_array as ca
import collect_id as ci
import collect_loop as cl
import collect_device as cd
def print_dict_sorted(mydict):
keys = sorted(mydict)
entries = ""
for key in keys:
value = mydict[key]
entries += "'" + key + "': " + value.__repr__() + ","
return "{" + entries[:-1] + "}"
class GenReverseIdx(object):
def __init__(self):
self.ReverseIdx = dict()
self.ReverseIdx[0] = 1
self.ReverseIdx[1] = 0
def get_reverse_idx(ast):
gen_reverse_idx = GenReverseIdx()
return gen_reverse_idx.ReverseIdx
class GenHostArrayData(object):
def __init__(self):
super(GenHostArrayData, self).__init__()
self.HstId = dict()
self.TransposableHstId = list()
self.Mem = dict()
def collect(self, ast):
arrays_ids = ca.GlobalArrayIds()
arrays_ids.visit(ast)
for n in arrays_ids.ids:
self.HstId[n] = 'hst_ptr' + n
self.Mem[n] = 'hst_ptr' + n + '_mem_size'
transposable_array_ids = ca.get_transposable_array_ids(ast)
for n in transposable_array_ids:
self.HstId[n] = n
self.TransposableHstId.append(n)
def get_mem_names(ast):
host_array_data = GenHostArrayData()
host_array_data.collect(ast)
return host_array_data.Mem
def get_host_ids(ast):
host_array_data = GenHostArrayData()
host_array_data.collect(ast)
return host_array_data.HstId
def gen_transposable_host_ids(ast):
host_array_data = GenHostArrayData()
host_array_data.collect(ast)
return host_array_data.TransposableHstId
def get_kernel_args(ast):
gen_kernel_args = GenKernelArgs()
gen_kernel_args.collect(ast)
return gen_kernel_args.kernel_args
class GenArrayDimNames(object):
def __init__(self):
self.num_array_dims = dict()
self.ArrayIdToDimName = dict()
def collect(self, ast):
num_array_dim = ca.NumArrayDim(ast)
num_array_dim.visit(ast)
self.num_array_dims = num_array_dim.numSubscripts
for array_name, num_dims in num_array_dim.numSubscripts.items():
tmp = list()
for i in xrange(num_dims):
tmp.append('hst_ptr' + array_name + '_dim' + str(i + 1))
self.ArrayIdToDimName[array_name] = tmp
stencil_array_id_to_dim_name = ca.LocalMemArrayIdToDimName()
stencil_array_id_to_dim_name.visit(ast)
for key, value in stencil_array_id_to_dim_name.ArrayIdToDimName.iteritems():
self.ArrayIdToDimName[key] = value
def get_array_id_to_dim_name(ast):
gen_array_dim_names = GenArrayDimNames()
gen_array_dim_names.collect(ast)
return gen_array_dim_names.ArrayIdToDimName
class GenIdxToDim(object):
def __init__(self):
self.IdxToDim = dict()
def collect(self, ast, par_dim=2):
col_li = cl.LoopIndices(par_dim)
col_li.visit(ast)
grid_indices = col_li.grid_indices
for i, n in enumerate(reversed(grid_indices)):
self.IdxToDim[i] = n
class GenKernelArgs(object):
def __init__(self):
self.kernel_args = dict()
def collect(self, ast):
arrays_ids = ca.GlobalArrayIds()
arrays_ids.visit(ast)
array_ids = arrays_ids.ids
# print self.ArrayIds
nonarray_ids = ci.GlobalNonArrayIds()
nonarray_ids.visit(ast)
non_array_ids = nonarray_ids.ids
mytype_ids = ci.GlobalTypeIds()
mytype_ids.visit(ast)
types = mytype_ids.types
gen_removed_ids = GenRemovedIds()
gen_removed_ids.collect(ast)
removed_ids = gen_removed_ids.removed_ids
kernel_arg_defines = ci.get_kernel_arg_defines(ast)
runocl_args = ci.get_runocl_args(ast)
arg_ids = non_array_ids.union(array_ids) - removed_ids - kernel_arg_defines - runocl_args
gen_array_dimnames = GenArrayDimNames()
gen_array_dimnames.collect(ast)
num_array_dims = gen_array_dimnames.num_array_dims
arrayid_to_dimname = gen_array_dimnames.ArrayIdToDimName
for n in arg_ids:
tmplist = {n}
try:
if num_array_dims[n] == 2:
tmplist.add(arrayid_to_dimname[n][0])
except KeyError:
pass
for m in tmplist - kernel_arg_defines:
self.kernel_args[m] = types[m]
class GenRemovedIds(object):
def __init__(self):
self.removed_ids = set()
def collect(self, ast):
grid_indices = cl.get_grid_indices(ast)
col_loop_limit = cl.LoopLimit()
col_loop_limit.visit(ast)
upper_limit = col_loop_limit.upper_limit
upper_limits = set(upper_limit[i] for i in grid_indices)
my_kernel = cd.get_kernel(ast)
ids_still_in_kernel = ci.Ids()
ids_still_in_kernel.visit(my_kernel)
self.removed_ids = upper_limits - ids_still_in_kernel.ids
def get_removed_ids(ast):
gen_removed_ids = GenRemovedIds()
gen_removed_ids.collect(ast)
return gen_removed_ids.removed_ids
class GenLocalArrayIdx(object):
def __init__(self):
self.IndexToLocalVar = dict()
def collect(self, ast):
par_dim = cl.get_par_dim(ast)
col_li = cl.LoopIndices(par_dim)
col_li.visit(ast)
grid_indices = col_li.grid_indices
for var in grid_indices:
self.IndexToLocalVar[var] = 'l' + var
def get_local_array_idx(ast):
gen_local_array_idx = GenLocalArrayIdx()
gen_local_array_idx.collect(ast)
return gen_local_array_idx.IndexToLocalVar
class GenIdxToThreadId(object):
def __init__(self):
self.IndexToThreadId = dict()
def collect(self, ast):
par_dim = cl.get_par_dim(ast)
col_li = cl.LoopIndices(par_dim)
col_li.visit(ast)
grid_indices = col_li.grid_indices
for i, n in enumerate(reversed(grid_indices)):
self.IndexToThreadId[n] = 'get_global_id(' + str(i) + ')'
def gen_idx_to_dim(ast):
par_dim = cl.get_par_dim(ast)
gi_to_dim = GenIdxToDim()
gi_to_dim.collect(ast, par_dim)
return gi_to_dim.IdxToDim
| dikujepsen/OpenTran | src/framework/processing/collect_gen.py | Python | mit | 6,164 | [
"VisIt"
] | 888235d92ba3e614896becf5e82553787ce22d7ae539516ee318caa94d486f9f |
# minNEURON.py
from neuron import h
cell = h.SectionList()
soma = h.Section(name='soma') # create soma
soma.push()
#h.topology()
# Geometry
soma.nseg = 1
soma.L = 20
soma.diam = 20
# Biophysics
for sec in h.allsec():
sec.Ra = 100
sec.cm = 1
sec.insert('pas')
# sec.insert('hh') # insert hh
cell.append(sec)
#h('objref rho')
#h('rho = new ChR(0.5)')
#h.rho.Er = Prot.phis[0]
#setattr(h.rho, 'del', Prot.pulses[0][0]) # rho.del will not work because del is reserved word in python
#h.rho.ton = Prot.Dt_ons[0]
#h.rho.toff = Prot.Dt_offs[0]
#h.rho.num = Prot.nPulses
#h.rho.gbar = RhO.g/20000
# Pick an opsin to record from
#rhoRec = h.ChR_apic.o(7)
h.pop_section()
| ProjectPyRhO/PyRhO | pyrho/NEURON/minimal.py | Python | bsd-3-clause | 694 | [
"NEURON"
] | 857943e4f810e7b5ce59192d1fd01c21825f7faadbc8922e067f725831879419 |
#!/usr/bin/env python
#
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Standard setup script.
"""
from setuptools import setup # isort:skip
import glob
import inspect
import os
import pkg_resources
import sys
from distutils.command.install_data import install_data
from distutils.command.sdist import sdist
from pkg_resources import parse_version
from buildbot import version
BUILDING_WHEEL = bool("bdist_wheel" in sys.argv)
def include(d, e):
"""Generate a pair of (directory, file-list) for installation.
'd' -- A directory
'e' -- A glob pattern"""
return (d, [f for f in glob.glob(f'{d}/{e}') if os.path.isfile(f)])
def include_statics(d):
r = []
for root, _, fs in os.walk(d):
r.append((root, [os.path.join(root, f) for f in fs]))
return r
class install_data_twisted(install_data):
"""make sure data files are installed in package.
this is evil.
copied from Twisted/setup.py.
"""
def finalize_options(self):
self.set_undefined_options('install',
('install_lib', 'install_dir'),
)
super().finalize_options()
def run(self):
super().run()
# ensure there's a buildbot/VERSION file
fn = os.path.join(self.install_dir, 'buildbot', 'VERSION')
with open(fn, 'w') as f:
f.write(version)
self.outfiles.append(fn)
class our_sdist(sdist):
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
# ensure there's a buildbot/VERSION file
fn = os.path.join(base_dir, 'buildbot', 'VERSION')
with open(fn, 'w') as f:
f.write(version)
# ensure that NEWS has a copy of the latest release notes, with the
# proper version substituted
src_fn = os.path.join('docs', 'relnotes/index.rst')
with open(src_fn) as f:
src = f.read()
src = src.replace('|version|', version)
dst_fn = os.path.join(base_dir, 'NEWS')
with open(dst_fn, 'w') as f:
f.write(src)
def define_plugin_entry(name, module_name):
"""
helper to produce lines suitable for setup.py's entry_points
"""
if isinstance(name, tuple):
entry, name = name
else:
entry = name
return f'{entry} = {module_name}:{name}'
def concat_dicts(*dicts):
result = {}
for d in dicts:
result.update(d)
return result
def define_plugin_entries(groups):
"""
helper to all groups for plugins
"""
result = {}
for group, modules in groups:
tempo = []
for module_name, names in modules:
tempo.extend([define_plugin_entry(name, module_name)
for name in names])
result[group] = tempo
return result
__file__ = inspect.getframeinfo(inspect.currentframe()).filename
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as long_d_f:
long_description = long_d_f.read()
setup_args = {
'name': "buildbot",
'version': version,
'description': "The Continuous Integration Framework",
'long_description': long_description,
'author': "Brian Warner",
'author_email': "warner-buildbot@lothar.com",
'maintainer': "Dustin J. Mitchell",
'maintainer_email': "dustin@v.igoro.us",
'url': "http://buildbot.net/",
'classifiers': [
'Development Status :: 5 - Production/Stable',
'Environment :: No Input/Output (Daemon)',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Testing',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
'packages': [
"buildbot",
"buildbot.configurators",
"buildbot.worker",
"buildbot.worker.protocols",
"buildbot.worker.protocols.manager",
"buildbot.changes",
"buildbot.clients",
"buildbot.data",
"buildbot.db",
"buildbot.db.migrations.versions",
"buildbot.db.types",
"buildbot.machine",
"buildbot.monkeypatches",
"buildbot.mq",
"buildbot.plugins",
"buildbot.process",
"buildbot.process.users",
"buildbot.reporters",
"buildbot.reporters.generators",
"buildbot.schedulers",
"buildbot.scripts",
"buildbot.secrets",
"buildbot.secrets.providers",
"buildbot.statistics",
"buildbot.statistics.storage_backends",
"buildbot.steps",
"buildbot.steps.package",
"buildbot.steps.package.deb",
"buildbot.steps.package.rpm",
"buildbot.steps.source",
"buildbot.util",
"buildbot.wamp",
"buildbot.www",
"buildbot.www.hooks",
"buildbot.www.authz",
"buildbot.test",
"buildbot.test.util",
"buildbot.test.fake",
"buildbot.test.fakedb",
] + ([] if BUILDING_WHEEL else [ # skip tests for wheels (save 50% of the archive)
"buildbot.test.fuzz",
"buildbot.test.integration",
"buildbot.test.integration.interop",
"buildbot.test.regressions",
"buildbot.test.unit",
]),
'data_files': [
include("buildbot/reporters/templates", "*.txt"),
("buildbot/db/migrations", [
"buildbot/db/migrations/alembic.ini",
]),
include("buildbot/db/migrations/versions", "*.py"),
("buildbot/scripts", [
"buildbot/scripts/sample.cfg",
"buildbot/scripts/buildbot_tac.tmpl",
]),
include("buildbot/spec", "*.raml"),
include("buildbot/spec/types", "*.raml"),
include("buildbot/test/unit/test_templates_dir", "*.html"),
include("buildbot/test/unit/test_templates_dir/plugin", "*.*"),
include("buildbot/test/integration/pki", "*.*"),
include("buildbot/test/integration/pki/ca", "*.*"),
] + include_statics("buildbot/www/static"),
'cmdclass': {'install_data': install_data_twisted,
'sdist': our_sdist},
'entry_points': concat_dicts(define_plugin_entries([
('buildbot.changes', [
('buildbot.changes.mail', [
'MaildirSource', 'CVSMaildirSource',
'SVNCommitEmailMaildirSource',
'BzrLaunchpadEmailMaildirSource']),
('buildbot.changes.bitbucket', ['BitbucketPullrequestPoller']),
('buildbot.changes.github', ['GitHubPullrequestPoller']),
('buildbot.changes.gerritchangesource', [
'GerritChangeSource', 'GerritEventLogPoller']),
('buildbot.changes.gitpoller', ['GitPoller']),
('buildbot.changes.hgpoller', ['HgPoller']),
('buildbot.changes.p4poller', ['P4Source']),
('buildbot.changes.pb', ['PBChangeSource']),
('buildbot.changes.svnpoller', ['SVNPoller'])
]),
('buildbot.schedulers', [
('buildbot.schedulers.basic', [
'SingleBranchScheduler', 'AnyBranchScheduler']),
('buildbot.schedulers.dependent', ['Dependent']),
('buildbot.schedulers.triggerable', ['Triggerable']),
('buildbot.schedulers.forcesched', ['ForceScheduler']),
('buildbot.schedulers.timed', [
'Periodic', 'Nightly', 'NightlyTriggerable']),
('buildbot.schedulers.trysched', [
'Try_Jobdir', 'Try_Userpass'])
]),
('buildbot.secrets', [
('buildbot.secrets.providers.file', ['SecretInAFile']),
('buildbot.secrets.providers.passwordstore', ['SecretInPass']),
('buildbot.secrets.providers.vault', ['HashiCorpVaultSecretProvider']),
('buildbot.secrets.providers.vault_hvac', [
'HashiCorpVaultKvSecretProvider', 'VaultAuthenticatorToken',
'VaultAuthenticatorApprole'])
]),
('buildbot.worker', [
('buildbot.worker.base', ['Worker']),
('buildbot.worker.ec2', ['EC2LatentWorker']),
('buildbot.worker.libvirt', ['LibVirtWorker']),
('buildbot.worker.openstack', ['OpenStackLatentWorker']),
('buildbot.worker.docker', ['DockerLatentWorker']),
('buildbot.worker.kubernetes', ['KubeLatentWorker']),
('buildbot.worker.local', ['LocalWorker']),
]),
('buildbot.machine', [
('buildbot.machine.base', ['Machine']),
]),
('buildbot.steps', [
('buildbot.process.buildstep', ['BuildStep']),
('buildbot.steps.cmake', ['CMake']),
('buildbot.steps.cppcheck', ['Cppcheck']),
('buildbot.steps.gitdiffinfo', ['GitDiffInfo']),
('buildbot.steps.http', [
'HTTPStep', 'POST', 'GET', 'PUT', 'DELETE', 'HEAD', 'OPTIONS']),
('buildbot.steps.master', [
'MasterShellCommand', 'SetProperty', 'SetProperties', 'LogRenderable', "Assert"]),
('buildbot.steps.maxq', ['MaxQ']),
('buildbot.steps.mswin', ['Robocopy']),
('buildbot.steps.package.deb.lintian', ['DebLintian']),
('buildbot.steps.package.deb.pbuilder', [
'DebPbuilder', 'DebCowbuilder', 'UbuPbuilder',
'UbuCowbuilder']),
('buildbot.steps.package.rpm.mock', [
'Mock', 'MockBuildSRPM', 'MockRebuild']),
('buildbot.steps.package.rpm.rpmbuild', ['RpmBuild']),
('buildbot.steps.package.rpm.rpmlint', ['RpmLint']),
('buildbot.steps.python', [
'BuildEPYDoc', 'PyFlakes', 'PyLint', 'Sphinx']),
('buildbot.steps.python_twisted', [
'HLint', 'Trial', 'RemovePYCs']),
('buildbot.steps.shell', [
'ShellCommand', 'TreeSize', 'SetPropertyFromCommand', 'Configure',
'WarningCountingShellCommand', 'Compile', 'Test', 'PerlModuleTest']),
('buildbot.steps.shellsequence', ['ShellSequence']),
('buildbot.steps.source.bzr', ['Bzr']),
('buildbot.steps.source.cvs', ['CVS']),
('buildbot.steps.source.darcs', ['Darcs']),
('buildbot.steps.source.gerrit', ['Gerrit']),
('buildbot.steps.source.git', ['Git', 'GitCommit', 'GitPush', 'GitTag']),
('buildbot.steps.source.github', ['GitHub']),
('buildbot.steps.source.gitlab', ['GitLab']),
('buildbot.steps.source.mercurial', ['Mercurial']),
('buildbot.steps.source.mtn', ['Monotone']),
('buildbot.steps.source.p4', ['P4']),
('buildbot.steps.source.repo', ['Repo']),
('buildbot.steps.source.svn', ['SVN']),
('buildbot.steps.subunit', ['SubunitShellCommand']),
('buildbot.steps.transfer', [
'FileUpload', 'DirectoryUpload', 'MultipleFileUpload',
'FileDownload', 'StringDownload', 'JSONStringDownload',
'JSONPropertiesDownload']),
('buildbot.steps.trigger', ['Trigger']),
('buildbot.steps.vstudio', [
'VC6', 'VC7', 'VS2003', 'VC8', 'VS2005', 'VCExpress9', 'VC9',
'VS2008', 'VC10', 'VS2010', 'VC11', 'VS2012', 'VC12', 'VS2013',
'VC14', 'VS2015', 'VC141', 'VS2017', 'MsBuild4', 'MsBuild',
'MsBuild12', 'MsBuild14', 'MsBuild141']),
('buildbot.steps.worker', [
'SetPropertiesFromEnv', 'FileExists', 'CopyDirectory',
'RemoveDirectory', 'MakeDirectory']),
]),
('buildbot.reporters', [
('buildbot.reporters.generators.build', [
'BuildStatusGenerator',
'BuildStartEndStatusGenerator'
]),
('buildbot.reporters.generators.buildrequest', [
'BuildRequestGenerator'
]),
('buildbot.reporters.generators.buildset', ['BuildSetStatusGenerator']),
('buildbot.reporters.generators.worker', ['WorkerMissingGenerator']),
('buildbot.reporters.mail', ['MailNotifier']),
('buildbot.reporters.pushjet', ['PushjetNotifier']),
('buildbot.reporters.pushover', ['PushoverNotifier']),
('buildbot.reporters.message', [
'MessageFormatter',
'MessageFormatterEmpty',
'MessageFormatterFunction',
'MessageFormatterMissingWorker',
'MessageFormatterRenderable',
]),
('buildbot.reporters.gerrit', ['GerritStatusPush']),
('buildbot.reporters.gerrit_verify_status',
['GerritVerifyStatusPush']),
('buildbot.reporters.http', ['HttpStatusPush']),
('buildbot.reporters.github', ['GitHubStatusPush', 'GitHubCommentPush']),
('buildbot.reporters.gitlab', ['GitLabStatusPush']),
('buildbot.reporters.bitbucketserver', [
'BitbucketServerStatusPush',
'BitbucketServerCoreAPIStatusPush',
'BitbucketServerPRCommentPush'
]),
('buildbot.reporters.bitbucket', ['BitbucketStatusPush']),
('buildbot.reporters.irc', ['IRC']),
('buildbot.reporters.telegram', ['TelegramBot']),
('buildbot.reporters.zulip', ['ZulipStatusPush']),
]),
('buildbot.util', [
# Connection seems to be a way too generic name, though
('buildbot.worker.libvirt', ['Connection']),
('buildbot.changes.filter', ['ChangeFilter']),
('buildbot.changes.gerritchangesource', ['GerritChangeFilter']),
('buildbot.changes.svnpoller', [
('svn.split_file_projects_branches',
'split_file_projects_branches'),
('svn.split_file_branches', 'split_file_branches'),
('svn.split_file_alwaystrunk', 'split_file_alwaystrunk')]),
('buildbot.configurators.janitor', ['JanitorConfigurator']),
('buildbot.config', ['BuilderConfig']),
('buildbot.locks', [
'MasterLock',
'WorkerLock',
]),
('buildbot.manhole', [
'AuthorizedKeysManhole', 'PasswordManhole', 'TelnetManhole']),
('buildbot.process.builder', [
'enforceChosenWorker',
]),
('buildbot.process.factory', [
'BuildFactory', 'GNUAutoconf', 'CPAN', 'Distutils', 'Trial',
'BasicBuildFactory', 'QuickBuildFactory', 'BasicSVN']),
('buildbot.process.logobserver', ['LogLineObserver']),
('buildbot.process.properties', [
'FlattenList', 'Interpolate', 'Property', 'Transform',
'WithProperties', 'renderer', 'Secret']),
('buildbot.process.users.manual', [
'CommandlineUserManager']),
('buildbot.revlinks', ['RevlinkMatch']),
('buildbot.reporters.utils', ['URLForBuild']),
('buildbot.schedulers.canceller', ['OldBuildCanceller']),
('buildbot.schedulers.canceller_buildset', ['FailingBuildsetCanceller']),
('buildbot.schedulers.forcesched', [
'AnyPropertyParameter', 'BooleanParameter',
'ChoiceStringParameter',
'CodebaseParameter', 'FileParameter', 'FixedParameter', 'InheritBuildParameter',
'IntParameter', 'NestedParameter', 'ParameterGroup',
'PatchParameter',
'StringParameter', 'TextParameter', 'UserNameParameter',
'WorkerChoiceParameter',
]),
('buildbot.process.results', [
'Results', 'SUCCESS', 'WARNINGS', 'FAILURE', 'SKIPPED',
'EXCEPTION', 'RETRY', 'CANCELLED']),
('buildbot.steps.source.repo', [
('repo.DownloadsFromChangeSource',
'RepoDownloadsFromChangeSource'),
('repo.DownloadsFromProperties',
'RepoDownloadsFromProperties')]),
('buildbot.steps.shellsequence', ['ShellArg']),
('buildbot.util.kubeclientservice', [
'KubeHardcodedConfig', 'KubeCtlProxyConfigLoader', 'KubeInClusterConfigLoader'
]),
('buildbot.util.ssfilter', ['SourceStampFilter']),
('buildbot.www.avatar', ['AvatarGravatar', 'AvatarGitHub']),
('buildbot.www.auth', [
'UserPasswordAuth', 'HTPasswdAuth', 'RemoteUserAuth', 'CustomAuth']),
('buildbot.www.ldapuserinfo', ['LdapUserInfo']),
('buildbot.www.oauth2', [
'GoogleAuth', 'GitHubAuth', 'GitLabAuth', 'BitbucketAuth']),
('buildbot.db.dbconfig', [
'DbConfig']),
('buildbot.www.authz', [
'Authz', 'fnmatchStrMatcher', 'reStrMatcher']),
('buildbot.www.authz.roles', [
'RolesFromEmails', 'RolesFromGroups', 'RolesFromOwner', 'RolesFromUsername',
'RolesFromDomain']),
('buildbot.www.authz.endpointmatchers', [
'AnyEndpointMatcher', 'StopBuildEndpointMatcher', 'ForceBuildEndpointMatcher',
'RebuildBuildEndpointMatcher', 'AnyControlEndpointMatcher',
'EnableSchedulerEndpointMatcher'
]),
]),
('buildbot.webhooks', [
('buildbot.www.hooks.base', ['base']),
('buildbot.www.hooks.bitbucket', ['bitbucket']),
('buildbot.www.hooks.github', ['github']),
('buildbot.www.hooks.gitlab', ['gitlab']),
('buildbot.www.hooks.gitorious', ['gitorious']),
('buildbot.www.hooks.poller', ['poller']),
('buildbot.www.hooks.bitbucketcloud', ['bitbucketcloud']),
('buildbot.www.hooks.bitbucketserver', ['bitbucketserver'])
])
]), {
'console_scripts': [
'buildbot=buildbot.scripts.runner:run',
# this will also be shipped on non windows :-(
'buildbot_windows_service=buildbot.scripts.windows_service:HandleCommandLine',
]}
)
}
# set zip_safe to false to force Windows installs to always unpack eggs
# into directories, which seems to work better --
# see http://buildbot.net/trac/ticket/907
if sys.platform == "win32":
setup_args['zip_safe'] = False
py_36 = sys.version_info[0] > 3 or (
sys.version_info[0] == 3 and sys.version_info[1] >= 6)
if not py_36:
raise RuntimeError("Buildbot master requires at least Python-3.6")
# pip<1.4 doesn't have the --pre flag, and will thus attempt to install alpha
# and beta versions of Buildbot. Prevent that from happening.
VERSION_MSG = """
This is a pre-release version of Buildbot, which can only be installed with
pip-1.4 or later Try installing the latest stable version of Buildbot instead:
pip install buildbot==0.8.12
See https://pypi.python.org/pypi/buildbot to verify the current stable version.
"""
if 'a' in version or 'b' in version:
try:
pip_dist = pkg_resources.get_distribution('pip')
except pkg_resources.DistributionNotFound:
pip_dist = None
if pip_dist:
if parse_version(pip_dist.version) < parse_version('1.4'):
raise RuntimeError(VERSION_MSG)
twisted_ver = ">= 17.9.0"
autobahn_ver = ">= 0.16.0"
txaio_ver = ">= 2.2.2"
bundle_version = version.split("-")[0]
# dependencies
setup_args['install_requires'] = [
'setuptools >= 8.0',
'Twisted ' + twisted_ver,
'Jinja2 >= 2.1',
'msgpack >= 0.6.0',
# required for tests, but Twisted requires this anyway
'zope.interface >= 4.1.1',
'sqlalchemy >= 1.3.0, < 1.5',
'alembic >= 1.6.0',
'python-dateutil>=1.5',
'txaio ' + txaio_ver,
'autobahn ' + autobahn_ver,
'PyJWT',
'pyyaml'
]
# buildbot_windows_service needs pywin32
if sys.platform == "win32":
setup_args['install_requires'].append('pywin32')
# Unit test dependencies.
test_deps = [
# http client libraries
'treq',
'txrequests',
# pypugjs required for custom templates tests
'pypugjs',
# boto3 and moto required for running EC2 tests
'boto3',
'moto',
'mock>=2.0.0',
'parameterized',
]
if sys.platform != 'win32':
test_deps += [
# LZ4 fails to build on Windows:
# https://github.com/steeve/python-lz4/issues/27
# lz4 required for log compression tests.
'lz4',
]
setup_args['tests_require'] = test_deps
setup_args['extras_require'] = {
'test': [
'setuptools_trial',
'isort',
# spellcheck introduced in version 1.4.0
'pylint<1.7.0',
'pyenchant',
'flake8~=3.9.2',
] + test_deps,
'bundle': [
f"buildbot-www=={bundle_version}",
f"buildbot-worker=={bundle_version}",
f"buildbot-waterfall-view=={bundle_version}",
f"buildbot-console-view=={bundle_version}",
f"buildbot-grid-view=={bundle_version}",
],
'tls': [
'Twisted[tls] ' + twisted_ver,
# There are bugs with extras inside extras:
# <https://github.com/pypa/pip/issues/3516>
# so we explicitly include Twisted[tls] dependencies.
'pyopenssl >= 16.0.0',
'service_identity',
'idna >= 0.6',
],
'docs': [
'docutils>=0.16.0',
'sphinx>=3.2.0',
'sphinx-rtd-theme>=0.5',
'sphinxcontrib-spelling',
'sphinxcontrib-websupport',
'pyenchant',
'sphinx-jinja',
'towncrier',
],
}
if '--help-commands' in sys.argv or 'trial' in sys.argv or 'test' in sys.argv:
setup_args['setup_requires'] = [
'setuptools_trial',
]
if os.getenv('NO_INSTALL_REQS'):
setup_args['install_requires'] = None
setup_args['extras_require'] = None
if __name__ == '__main__':
setup(**setup_args)
# Local Variables:
# fill-column: 71
# End:
| pmisik/buildbot | master/setup.py | Python | gpl-2.0 | 22,871 | [
"Brian"
] | 6aaf8af4d3df056c765a1d7b5123f2c3ad2955259703ac417350bd3c4f10ac06 |
# Making interactions.txt file from sorted BLAST outputs
import csv
import json
import operator
import os
def csv_to_list(csv_file, delimiter=","):
with open(csv_file, 'r') as csv_con:
reader = csv.reader(csv_con, delimiter=delimiter)
return list(reader)
def convert_cells_to_floats(csv_cont):
for row in range(len(csv_cont)):
for cell in range(len(csv_cont[row])):
try:
csv_cont[row][cell] = float(csv_cont[row][cell])
except ValueError:
pass
def sort_by_column(csv_cont, col, reverse=False):
header = csv_cont[0]
body = csv_cont[1:]
if isinstance(col, str):
col_index = header.index(col)
else:
col_index = col
body = sorted(body,
key=operator.itemgetter(col_index),
reverse=reverse)
body.insert(0, header)
return body
def print_csv(csv_content):
print(50 * '-')
for row in csv_content:
row = [str(e) for e in row]
print('\t'.join(row))
print(50 * '_')
def write_csv(dest, csv_cont):
with open(dest, 'w') as out_file:
writer = csv.writer(out_file, delimiter=',')
for row in csv_cont:
writer.writerow(row)
elements = []
phages = []
edges = []
for fn in os.listdir("output/sorted"):
# made node for each bacteria and enter in elements
bac_name = fn[7:16]
obj = {
"group": 'nodes',
"data": {"id": bac_name},
"classes": 'bacteria'
}
elements.append(obj)
csv_cont = csv_to_list("output/sorted/" + fn)[1:]
for i in range(len(csv_cont)):
# only make phage node if it doesn't already exists
if csv_cont[i][1] not in phages:
obj = {
"group": 'nodes',
"data": {"id": csv_cont[i][1]},
"classes": 'phage'
}
elements.append(obj)
phages.append(csv_cont[i][1])
for i in range(len(csv_cont)):
if not (bac_name + "_to_" + csv_cont[i][1] in edges):
obj = {
"group": 'edges',
"data": {
"id": bac_name + "_to_" + csv_cont[i][1],
"source": bac_name,
"target": csv_cont[i][1]
}
}
elements.append(obj)
edges.append(bac_name + "_to_" + csv_cont[i][1])
# open each sorted file in output/sorted and get the
# phages, if phage is not phages list, add and create node,
# create edge
"""
json_file = open("json.txt","w")
json_file.write("cytoscape({" +"\n")
json_file.write("container: document.getElementById('cy')," +"\n")
json_file.write("elements: [" + "\n")
"""
with open("json.txt", "w") as outfile:
json.dump(elements, outfile)
# for thing in elements:
# json.write(thing)
| goyalsid/phageParser | parserscripts/interactions.py | Python | mit | 2,875 | [
"BLAST",
"Cytoscape"
] | d5ff0549eef91f3c3f23bd1489107c452bcf547068a42c62822068769b794de9 |
"""VTK output functions.
Create coarse grid views and write meshes/primitives to .vtu files. Use the
XML VTK format for unstructured meshes (.vtu)
This will use the XML VTK format for unstructured meshes, .vtu
See here for a guide: http://www.vtk.org/pdf/file-formats.pdf
"""
__docformat__ = "restructuredtext en"
import warnings
from numpy import array, ones, zeros, sqrt, asarray, empty, concatenate, \
random, uint8, kron, arange, diff, c_, where, issubdtype, \
integer, mean, sum, prod, ravel, hstack, invert, repeat, floor
from scipy import array, zeros, mean, kron, ones, sparse, rand
from scipy.sparse import csr_matrix, coo_matrix, csc_matrix
from os import system
# pyamg
from pyamg.vis import write_basic_mesh, write_vtu
from pyamg.util.utils import scale_rows, scale_columns
# have to manually install Delaunay package from scikits
try:
from scikits import delaunay
except:
try:
import delaunay
except:
raise ValueError("Install delaunay package from SciKits for this example")
__all__ = ['my_vis', 'shrink_elmts', 'dg_vis']
def shrink_elmts(E2V, Vert, shrink=0.75):
"""
Shrink the elements in the mesh by factor "shrink" towards the barycenter
Only works for simplicial meshes
Parameters
----------
Vert : {array}
coordinate array (N x D)
E2V : {array}
element index array (Nel x Nelnodes)
shrink : {scalar}
factor by which to move each element's points to each element's barycenter
Returns
-------
Vert and E2V with Vert appropriately scaled
"""
E2V = array(E2V)
Vert = array(Vert)
Nelnodes = E2V.shape[1]
Nel = E2V.shape[0]
if(Vert.shape[1] == 2):
Dimen = 2
#Determine if polynomial order is greater than 1
if(Nelnodes > 3):
nonlin = True
num_non_verts = Nelnodes - 3
else:
nonlin = False
elif(Vert[:,2].nonzero()[0].shape[0] == 0): #Assume 2D if last column of Vert is all zero
Dimen = 2
#Determine if polynomial order is greater than 1
if(Nelnodes > 3):
nonlin = True
num_non_verts = Nelnodes - 3
else:
nonlin = False
else:
Dimen = 3
#Determine if polynomial order of basis functions is greater than 1
if(Nelnodes > 4):
nonlin = True
num_non_verts = Nelnodes - 4
else:
nonlin = False
# Account for shared faces, for case that this is used to shrink a cont Gal mesh
#Vert = Vert[E2V.flatten(),:]
#Agg = Agg[E2V.flatten(),:]
#E2V = array(range(Vert.shape[0])).reshape(Vert.shape[0]/Nelnodes, Nelnodes)
#Nel = E2V.shape[0]
#Store Barycenter for each element
Bcenter = zeros((Nel, Vert.shape[1]))
for i in range(Nel):
#Assumes first Dimen+1 nodes are verts for the simplex
verts_K = Vert[E2V[i,0:(Dimen+1)], :]
#Calculate Barycenter of element i
Bcenter[i,:] = mean(verts_K, 0)
#Shift vertices to barycenter
Vert[E2V[i,0:Dimen+1],:] = shrink*verts_K + (1-shrink)*kron(Bcenter[i,:], ones((Dimen+1,1)) )
if(nonlin):
# Move non-vertices to barycenter with the same formula, namely
# shrink*point_barycoords + (1-shrink)*barycenter.
Vert[ E2V[i, (Dimen+1):], :] = shrink*(Vert[ E2V[i, (Dimen+1):], :]) + \
(1-shrink)*kron(Bcenter[i,:], ones((num_non_verts,1)) )
return E2V, Vert
def dg_vis(fname, Vert, E2V, Agg, mesh_type, A=None):
"""Coarse grid visualization for 2-D discontinuous Galerkin Problems, for use with Paraview
Parameters
----------
fname : {string}
file to be written, e.g. 'mymesh.vtu'
Vert : {array}
coordinate array (N x D)
E2V : {array}
element index array (Nel x Nelnodes)
Agg : {csr_matrix}
sparse matrix for the aggregate-vertex relationship (N x Nagg)
mesh_type : {string}
type of elements: tri
A : {sparse amtrix}
optional, used for better coloring
Returns
-------
- Writes data to two .vtk files for use in Paraview (xml 0.1 format)
Notes
-----
Examples
--------
"""
#----------------------
if not issubdtype(Vert.dtype,float):
raise ValueError('Vert should be of type float')
if E2V is not None:
if not issubdtype(E2V.dtype,integer):
raise ValueError('E2V should be of type integer')
if Agg.shape[1] > Agg.shape[0]:
raise ValueError('Agg should be of size Npts x Nagg')
valid_mesh_types = ('tri')
if mesh_type not in valid_mesh_types:
raise ValueError('mesh_type should be %s' % ' or '.join(valid_mesh_types))
if A is not None:
if (A.shape[0] != A.shape[1]) or (A.shape[0] != Agg.shape[0]):
raise ValueError('expected square matrix A and compatible with Agg')
#----------------------
N = Vert.shape[0]
Ndof = N
if E2V is not None:
Nel = E2V.shape[0]
Nelnodes = E2V.shape[1]
if E2V.min() != 0:
warnings.warn('element indices begin at %d' % E2V.min() )
Nagg = Agg.shape[0]
Ncolors = 16 # number of colors to use in the coloring algorithm
# ------------------
#Shrink each element in the mesh for nice plotting
#E2V, Vert = shrink_elmts(E2V, Vert)
# plot_type = 'vertex' output to .vtu --- throw point list down on mesh, so E2V becomes Nx1 array
filename = fname + "_point-aggs.vtu"
if False:#A is not None:
# color aggregates with vertex coloring
G = Agg.T * abs(A) * Agg
colors = vertex_coloring(G, method='LDF')
pdata = Agg * colors # extend aggregate colors to vertices
else:
# color aggregates in sequence
Agg = coo_matrix(Agg)
pdata = zeros(Ndof)
colors = array(range(Agg.shape[1])) % Ncolors
pdata[Agg.row] = Agg.col % Ncolors
write_basic_mesh(Vert, E2V=array(range(N)).reshape(N,1), mesh_type='vertex', pdata=pdata, fname=filename)
# plot_type = 'primal', using a global Delaunay triangulation of the shrunken mesh,
# we visualize the aggregates as if the global Delaunay triangulation defined a continuous Galerkin
# mesh upon which our aggregates are defined.
#circum_cent, edges, tri_pts, tri_nbs = delaunay.delaunay(Vert[:,0], Vert[:,1])
#coarse_grid_vis(filename, Vert, tri_pts, Agg, A=A, plot_type='primal', mesh_type='tri')
filename = fname + "_aggs.vtu"
# Do a local Delaunay triangulation for each aggregate, and throw that down as an element in a new mesh
Agg = csc_matrix(Agg)
E2Vnew = zeros((0,),dtype=int)
colors_new = zeros((0,),dtype=int)
for i in range(Agg.shape[1]):
rowstart = Agg.indptr[i]
rowend = Agg.indptr[i+1]
#The nonzeros in column i of Agg define the dofs in Agg i
members = Agg.indices[rowstart:rowend]
if max(members.shape) > 2:
circum_cent, edges, tri_pts, tri_nbs = delaunay.delaunay(Vert[members,0], Vert[members,1])
#if i == 0:
# E2Vnew = ravel(members[ravel(tri_pts)])
# colors_new = ravel(repeat(colors[i], tri_pts.shape[0]))
#else:
E2Vnew = hstack( (E2Vnew, ravel(members[ravel(tri_pts)]) ) )
colors_new = hstack( (colors_new, ravel(repeat(colors[i], tri_pts.shape[0]))) )
if max(members.shape) == 2:
#create a dummy element, so that only a line is drawn between these two points in paraview
#if i == 0:
# E2Vnew = array([0,members[0],members[1]])
# colors_new = array([colors[i]])
#else:
E2Vnew = hstack( (E2Vnew, array([0,members[0],members[1]]) ) )
colors_new = hstack( (colors_new, colors[i]) )
#Begin Primal plotting
E2V = E2Vnew.reshape(-1,3)
Agg = csr_matrix(Agg)
if E2V.max() >= Agg.shape[0]:
# remove elements with Dirichlet BCs
E2V = E2V[E2V.max(axis=1) < Agg.shape[0]]
# Find elements with all vertices in same aggregate
if len(Agg.indices) != Agg.shape[0]:
# account for 0 rows. Mark them as solitary aggregates
full_aggs = array(Agg.sum(axis=1),dtype=int).ravel()
full_aggs[full_aggs==1] = Agg.indices
full_aggs[full_aggs==0] = Agg.shape[1] + arange(0,Agg.shape[0]-Agg.nnz,dtype=int).ravel()
ElementAggs = full_aggs[E2V]
else:
ElementAggs = Agg.indices[E2V]
# mask[i] == True if all vertices in element i belong to the same aggregate
mask = (ElementAggs[:,:-1] == ElementAggs[:,1:]).all(axis=1)
E2V3 = E2V[mask,:]
Nel3 = E2V3.shape[0]
# 3 edges = 4 nodes. Find where the difference is 0 (bdy edge)
markedges = diff(c_[ElementAggs,ElementAggs[:,0]])
markedges[mask,:]=1
markedelements, markededges = where(markedges==0)
# now concatenate the edges (i.e. first and next one (mod 3 index)
E2V2 = c_[[E2V[markedelements,markededges],
E2V[markedelements,(markededges+1)%3]]].T
Nel2 = E2V2.shape[0]
colors2 = colors_new[markedelements] #2*ones((1,Nel2)) # color edges with twos
colors3 = colors_new[mask] #3*ones((1,Nel3)) # color triangles with threes
Cells = {3: E2V2, 5: E2V3}
cdata = {3: colors2, 5: colors3}
write_vtu(Verts=Vert, Cells=Cells, pdata=None, cdata=cdata, pvdata=None, fname=filename)
def my_vis(ml, V, error=None, fname="", E2V=None, Pcols=None):
"""Coarse grid visualization for 2-D problems, for use with Paraview
For all levels, outputs meshes, aggregates, near nullspace modes B, and selected
prolongator basis functions. Coarse level meshes are constructed by doing a
Delaunay triangulation of interpolated fine grid vertices.
Parameters
----------
ml : {multilevel hierarchy}
defines the multilevel hierarchy to visualize
V : {array}
coordinate array (N x D)
Error : {array}
Fine grid error to plot (N x D)
fname : {string}
string to be appended to all output files, e.g. 'diffusion1'
E2V : {array}
Element index array (Nel x Nelnodes) for the finest level. If None,
then a Delaunay triangulation is done for the finest level. All coarse
levels use an internally calculated Delaunay triangulation
P_cols : {list of tuples}
Optional input list of tuples of the form [(lvl, [ints]), ...]
where lvl is an integer defining the level on which to output
the list of columns in [ints].
Returns
-------
- Writes data to .vtk files for use in Paraview (xml 0.1 format)
Notes
-----
Examples
--------
"""
system('rm -f *.vtu')
##
# For the purposes of clearer plotting, perturb vertices slightly
V += rand(V.shape[0], V.shape[1])*1e-6
##
# Create a list of vertices and meshes for all levels
levels = ml.levels
Vlist = [V]
if E2V is None:
[circ_cent,edges,E2V,tri_nbs]=delaunay.delaunay(V[:,0], V[:,1])
E2Vlist = [E2V]
mesh_type_list = []
mesh_num_list = []
if E2V.shape[1] == 1:
mesh_type_list.append('vertex')
mesh_num_list.append(1)
if E2V.shape[1] == 3:
mesh_type_list.append('tri')
mesh_num_list.append(5)
if E2V.shape[1] == 4:
if vertices.shape[1] == 2:
mesh_type_list.append('quad')
mesh_num_list.append(9)
if sparse.isspmatrix_bsr(levels[0].A):
nPDEs = levels[0].A.blocksize[0]
else:
nPDEs = 1
Agglist = []
Agg = sparse.eye(levels[0].A.shape[0]/nPDEs, levels[0].A.shape[1]/nPDEs, format='csr')
for i in range(1,len(levels)):
##
# Interpolate the vertices to the next level by taking each
# aggregate's center of gravity (i.e. average x and y value).
Agg = Agg.tocsr()*levels[i-1].AggOp.tocsr()
Agg.data[:] = 1.0
Agglist.append(Agg)
AggX = scale_rows(Agg, Vlist[0][:,0], copy=True)
AggY = scale_rows(Agg, Vlist[0][:,1], copy=True)
AggX = ones((1, AggX.shape[0]))*AggX
AggY = ones((1, AggY.shape[0]))*AggY
Agg = Agg.tocsc()
count = Agg.indptr[1:]-Agg.indptr[:-1]
AggX = (ravel(AggX)/count).reshape(-1,1)
AggY = (ravel(AggY)/count).reshape(-1,1)
Vlist.append(hstack((AggX, AggY)))
[circ_cent,edges,E2Vnew,tri_nbs]=delaunay.delaunay(Vlist[i][:,0], Vlist[i][:,1])
E2Vlist.append(E2Vnew)
mesh_type_list.append('tri')
mesh_num_list.append(5)
##
# On each level, output aggregates, B, the mesh
for i in range(len(levels)):
mesh_num = mesh_num_list[i]
mesh_type = mesh_type_list[i]
vertices = Vlist[i]
elements = E2Vlist[i]
# Print mesh
write_basic_mesh(vertices, elements, mesh_type=mesh_type, \
fname=fname+"mesh_lvl"+str(i)+".vtu")
# Visualize the aggregates
if i != (len(levels)-1):
dg_vis(fname+"aggs_lvl"+str(i), Vlist[0], \
E2Vlist[0], Agglist[i], mesh_type)
# Visualize B
if sparse.isspmatrix_bsr(levels[i].A):
nPDEs = levels[i].A.blocksize[0]
else:
nPDEs = 1
cell_stuff = {mesh_num : elements}
for j in range(nPDEs):
indys = arange(j,levels[i].A.shape[0],nPDEs)
write_vtu(Verts=vertices, Cells=cell_stuff, pdata=levels[i].B[indys,:], \
fname=fname+"B_variable"+str(j)+"_lvl"+str(i)+".vtu")
##
# Output requested prolongator basis functions
if Pcols is not None:
for (lvl,cols) in Pcols:
P = levels[lvl].P.tocsc()
cell_stuff = {mesh_num_list[lvl] : E2Vlist[lvl]}
for i in cols:
Pcol = array(P[:,i].todense())
write_vtu(Verts=Vlist[lvl], Cells=cell_stuff, pdata=Pcol,
fname=fname+"P_lvl"+str(lvl)+"col"+str(i)+".vtu")
##
# Output the error on the finest level
if error is not None:
error = error.reshape(-1,1)
cell_stuff = {mesh_num_list[0] : E2Vlist[0]}
if sparse.isspmatrix_bsr(levels[0].A):
nPDEs = levels[0].A.blocksize[0]
else:
nPDEs = 1
for j in range(nPDEs):
indys = arange(j, levels[0].A.shape[0], nPDEs)
write_vtu(Verts=Vlist[0], Cells=cell_stuff, pdata=error[indys,:], \
fname=fname+"error_variable"+str(j)+".vtu")
| pombreda/pyamg | Examples/ComplexSymmetric/my_vis.py | Python | bsd-3-clause | 14,787 | [
"ParaView",
"VTK"
] | 8d644e19382d8082e7b2759798d70335494115fbcb64ce8f85af9765ff9fb05a |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles decorators.
Note: this module only deals with functions whose decorators are still recorded
in the AST. This does not always happen. See the unit test for an example.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.py2tf.pyct import anno
from tensorflow.contrib.py2tf.pyct import pretty_printer
class DecoratorsTransformer(gast.NodeTransformer):
"""Converts or removes decorators."""
def __init__(self, remove_decorators):
self.remove_decorators = remove_decorators
self.additional_dependencies = set()
# pylint:disable=invalid-name
def visit_FunctionDef(self, node):
self.generic_visit(node)
kept_decorators = []
for dec in node.decorator_list:
if isinstance(dec, gast.Call):
dec_func = dec.func
else:
dec_func = dec
# Special cases.
# TODO(mdan): Is there any way we can treat these more generically?
# We may want to forego using decorators altogether if we can't
# properly support them.
if isinstance(dec_func, gast.Name) and dec_func.id in ('classmethod',):
# Assumption: decorators are only visible in the AST when converting
# a function inline (via another decorator).
# In that case, the converted function is no longer part of the
# original object that it was declared into.
# This is currently verified by tests.
continue
if not anno.hasanno(dec_func, 'live_val'):
raise ValueError(
'Could not resolve decorator: %s' % pretty_printer.fmt(dec_func))
dec_value = anno.getanno(dec_func, 'live_val')
if dec_value not in self.remove_decorators:
kept_decorators.append((dec, dec_value))
for _, dec_value in kept_decorators:
if dec_value.__module__ == '__main__':
raise ValueError(
'decorator "%s" was not allowed because it is declared '
'in the module "%s". To fix this, declare it in a separate '
'module that we can import it from.' % (dec_value,
dec_value.__module__))
else:
self.additional_dependencies.add(dec_value)
node.decorator_list = [dec for dec, _ in kept_decorators]
return node
# pylint:enable=invalid-name
def transform(node, remove_decorators):
transformer = DecoratorsTransformer(remove_decorators)
node = transformer.visit(node)
return node, transformer.additional_dependencies
| Xeralux/tensorflow | tensorflow/contrib/py2tf/converters/decorators.py | Python | apache-2.0 | 3,238 | [
"VisIt"
] | e8b0c3123d884204846442f6b677f8b25b456b11d8cde6989f6d6f349ba9443f |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import numpy as np
import warnings
if sys.version > '3':
xrange = range
basestring = str
from pyspark import SparkContext, since
from pyspark.mllib.common import callMLlibFunc, inherit_doc
from pyspark.mllib.linalg import Vectors, SparseVector, _convert_to_vector
from pyspark.sql import DataFrame
class MLUtils(object):
"""
Helper methods to load, save and pre-process data used in MLlib.
.. versionadded:: 1.0.0
"""
@staticmethod
def _parse_libsvm_line(line, multiclass=None):
"""
Parses a line in LIBSVM format into (label, indices, values).
"""
if multiclass is not None:
warnings.warn("deprecated", DeprecationWarning)
items = line.split(None)
label = float(items[0])
nnz = len(items) - 1
indices = np.zeros(nnz, dtype=np.int32)
values = np.zeros(nnz)
for i in xrange(nnz):
index, value = items[1 + i].split(":")
indices[i] = int(index) - 1
values[i] = float(value)
return label, indices, values
@staticmethod
def _convert_labeled_point_to_libsvm(p):
"""Converts a LabeledPoint to a string in LIBSVM format."""
from pyspark.mllib.regression import LabeledPoint
assert isinstance(p, LabeledPoint)
items = [str(p.label)]
v = _convert_to_vector(p.features)
if isinstance(v, SparseVector):
nnz = len(v.indices)
for i in xrange(nnz):
items.append(str(v.indices[i] + 1) + ":" + str(v.values[i]))
else:
for i in xrange(len(v)):
items.append(str(i + 1) + ":" + str(v[i]))
return " ".join(items)
@staticmethod
@since("1.0.0")
def loadLibSVMFile(sc, path, numFeatures=-1, minPartitions=None, multiclass=None):
"""
Loads labeled data in the LIBSVM format into an RDD of
LabeledPoint. The LIBSVM format is a text-based format used by
LIBSVM and LIBLINEAR. Each line represents a labeled sparse
feature vector using the following format:
label index1:value1 index2:value2 ...
where the indices are one-based and in ascending order. This
method parses each line into a LabeledPoint, where the feature
indices are converted to zero-based.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param numFeatures: number of features, which will be determined
from the input data if a nonpositive value
is given. This is useful when the dataset is
already split into multiple files and you
want to load them separately, because some
features may not present in certain files,
which leads to inconsistent feature
dimensions.
:param minPartitions: min number of partitions
@return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> tempFile = NamedTemporaryFile(delete=True)
>>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0")
>>> tempFile.flush()
>>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect()
>>> tempFile.close()
>>> examples[0]
LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0]))
>>> examples[1]
LabeledPoint(-1.0, (6,[],[]))
>>> examples[2]
LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0]))
"""
from pyspark.mllib.regression import LabeledPoint
if multiclass is not None:
warnings.warn("deprecated", DeprecationWarning)
lines = sc.textFile(path, minPartitions)
parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l))
if numFeatures <= 0:
parsed.cache()
numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1
return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2])))
@staticmethod
@since("1.0.0")
def saveAsLibSVMFile(data, dir):
"""
Save labeled data in LIBSVM format.
:param data: an RDD of LabeledPoint to be saved
:param dir: directory to save the data
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
"""
lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p))
lines.saveAsTextFile(dir)
@staticmethod
@since("1.1.0")
def loadLabeledPoints(sc, path, minPartitions=None):
"""
Load labeled points saved using RDD.saveAsTextFile.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param minPartitions: min number of partitions
@return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
"""
minPartitions = minPartitions or min(sc.defaultParallelism, 2)
return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions)
@staticmethod
@since("1.5.0")
def appendBias(data):
"""
Returns a new vector with `1.0` (bias) appended to
the end of the input vector.
"""
vec = _convert_to_vector(data)
if isinstance(vec, SparseVector):
newIndices = np.append(vec.indices, len(vec))
newValues = np.append(vec.values, 1.0)
return SparseVector(len(vec) + 1, newIndices, newValues)
else:
return _convert_to_vector(np.append(vec.toArray(), 1.0))
@staticmethod
@since("1.5.0")
def loadVectors(sc, path):
"""
Loads vectors saved using `RDD[Vector].saveAsTextFile`
with the default number of partitions.
"""
return callMLlibFunc("loadVectors", sc, path)
@staticmethod
@since("2.0.0")
def convertVectorColumnsToML(dataset, *cols):
"""
Converts vector columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Vector` type to the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of vector columns to be converted.
New vector columns will be ignored. If unspecified, all old
vector columns will be converted excepted nested ones.
:return:
the input dataset with old vector columns converted to the
new vector type
>>> import pyspark
>>> from pyspark.mllib.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsToML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertVectorColumnsFromML(dataset, *cols):
"""
Converts vector columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Vector` type from the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of vector columns to be converted.
Old vector columns will be ignored. If unspecified, all new
vector columns will be converted except nested ones.
:return:
the input dataset with new vector columns converted to the
old vector type
>>> import pyspark
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsFromML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertMatrixColumnsToML(dataset, *cols):
"""
Converts matrix columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Matrix` type to the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of matrix columns to be converted.
New matrix columns will be ignored. If unspecified, all old
matrix columns will be converted excepted nested ones.
:return:
the input dataset with old matrix columns converted to the
new matrix type
>>> import pyspark
>>> from pyspark.mllib.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsToML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertMatrixColumnsFromML(dataset, *cols):
"""
Converts matrix columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Matrix` type from the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of matrix columns to be converted.
Old matrix columns will be ignored. If unspecified, all new
matrix columns will be converted except nested ones.
:return:
the input dataset with new matrix columns converted to the
old matrix type
>>> import pyspark
>>> from pyspark.ml.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsFromML", dataset, list(cols))
class Saveable(object):
"""
Mixin for models and transformers which may be saved as files.
.. versionadded:: 1.3.0
"""
def save(self, sc, path):
"""
Save this model to the given path.
This saves:
* human-readable (JSON) model metadata to path/metadata/
* Parquet formatted data to path/data/
The model may be loaded using py:meth:`Loader.load`.
:param sc: Spark context used to save model data.
:param path: Path specifying the directory in which to save
this model. If the directory already exists,
this method throws an exception.
"""
raise NotImplementedError
@inherit_doc
class JavaSaveable(Saveable):
"""
Mixin for models that provide save() through their Scala
implementation.
.. versionadded:: 1.3.0
"""
@since("1.3.0")
def save(self, sc, path):
"""Save this model to the given path."""
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
self._java_model.save(sc._jsc.sc(), path)
class Loader(object):
"""
Mixin for classes which can load saved models from files.
.. versionadded:: 1.3.0
"""
@classmethod
def load(cls, sc, path):
"""
Load a model from the given path. The model should have been
saved using py:meth:`Saveable.save`.
:param sc: Spark context used for loading model files.
:param path: Path specifying the directory to which the model
was saved.
:return: model instance
"""
raise NotImplemented
@inherit_doc
class JavaLoader(Loader):
"""
Mixin for classes which can load saved models using its Scala
implementation.
.. versionadded:: 1.3.0
"""
@classmethod
def _java_loader_class(cls):
"""
Returns the full class name of the Java loader. The default
implementation replaces "pyspark" by "org.apache.spark" in
the Python full class name.
"""
java_package = cls.__module__.replace("pyspark", "org.apache.spark")
return ".".join([java_package, cls.__name__])
@classmethod
def _load_java(cls, sc, path):
"""
Load a Java model from the given path.
"""
java_class = cls._java_loader_class()
java_obj = sc._jvm
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
return java_obj.load(sc._jsc.sc(), path)
@classmethod
@since("1.3.0")
def load(cls, sc, path):
"""Load a model from the given path."""
java_model = cls._load_java(sc, path)
return cls(java_model)
class LinearDataGenerator(object):
"""Utils for generating linear data.
.. versionadded:: 1.5.0
"""
@staticmethod
@since("1.5.0")
def generateLinearInput(intercept, weights, xMean, xVariance,
nPoints, seed, eps):
"""
:param: intercept bias factor, the term c in X'w + c
:param: weights feature vector, the term w in X'w + c
:param: xMean Point around which the data X is centered.
:param: xVariance Variance of the given data
:param: nPoints Number of points to be generated
:param: seed Random Seed
:param: eps Used to scale the noise. If eps is set high,
the amount of gaussian noise added is more.
Returns a list of LabeledPoints of length nPoints
"""
weights = [float(weight) for weight in weights]
xMean = [float(mean) for mean in xMean]
xVariance = [float(var) for var in xVariance]
return list(callMLlibFunc(
"generateLinearInputWrapper", float(intercept), weights, xMean,
xVariance, int(nPoints), int(seed), float(eps)))
@staticmethod
@since("1.5.0")
def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate a RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(intercept))
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("mllib.util tests")\
.getOrCreate()
globs['spark'] = spark
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| TK-TarunW/ecosystem | spark-2.0.2-bin-hadoop2.7/python/pyspark/mllib/util.py | Python | apache-2.0 | 19,841 | [
"Gaussian"
] | fa85d6dd75bbaded7021f986c815d220667fad789173ed8fa712d9dcdec52176 |
"""provides the parent frame of Priithon's ND 2d-section-viewer"""
from __future__ import print_function
__author__ = "Sebastian Haase <haase@msg.ucsf.edu>"
__license__ = "BSD license - see LICENSE file"
import six, sys
from .splitNDcommon import *
def viewImgFiles(filenames):
for fn in filenames:
#see email
#Re: [wxPython-users] wxFileDropTarget get filename %-encoded (on gtk not on msw)
#From: Robin Dunn <robin@alldunn.com>
#To: wxPython-users@lists.wxwidgets.org
#Date: Dec 1 2004 Wednesday 12:33:49 pm
if wx.Platform == "__WXGTK__" and wx.VERSION[:2] == (2,4):
import urllib.request, urllib.parse, urllib.error
fn = urllib.parse.unquote(fn)
run(fn, _scoopLevel=2)
'''
class MyFileDropTarget(wx.FileDropTarget):
def __init__(self, parent):
wx.FileDropTarget.__init__(self)
self.mySPV = parent
#self.log = log
def OnDropFiles(self, x, y, filenames):
#if len(filenames) > 1:
# wx.MessageBox("Oops", "More than file", style=wx.ICON_ERROR)
# filenames = filenames[:1] # HACK
#global fn, a
import os.path
if len(filenames) == 1:
f = filenames[0]
if os.path.isdir(f):
from Priithon.all import Y
Y.listFilesViewer(f)
return
fUPPER = f.upper()
if fUPPER.endswith('.PY') or \
fUPPER.endswith('.PYW') or \
fUPPER.endswith('.PYC'):
ssss = "execfile('%s')" % f
import __main__
#print __main__.shell.promptPosEnd, __main__.shell.GetTextLength()
#s.GetCurrentPos()
#setCurrentPos(112542)
#
if not __main__.shell.promptPosEnd == __main__.shell.GetTextLength():
wx.MessageBox("Your are in the middle of typing a command",
"not ready",
style=wx.ICON_ERROR)
return
__main__.shell.SetCurrentPos( __main__.shell.GetTextLength() )
__main__.shell.InsertText(__main__.shell.promptPosEnd, ssss)
__main__.shell.SetCurrentPos( __main__.shell.GetTextLength() )
__main__.shell.SetFocus()
# __main__.shell.promptPosStart += len(ssss)
# __main__.shell.promptPosEnd += len(ssss)
# __main__.shell.push(ssss)
# __main__.shell.prompt()
return
import usefulX as Y
if 1: ## ctrl not pressed -- new window
viewImgFiles(filenames)
else: ## ctrl pressed - use existing frame
if len(filenames) > 1:
wx.MessageBox("Oops", "More than file", style=wx.ICON_ERROR)
filenames = filenames[:1] # HACK
fn = filenames[0]
a = Y.load(fn) #20051213
if a is None:
return
spv = self.mySPV
if len(a.shape) != len(spv.data.shape):
wx.MessageBox("Dimension mismatch old vs. new",
"Differnt dimesion !?",
style=wx.ICON_ERROR)
else:
spv.data = a
spv.helpNewData(doAutoscale=False, setupHistArr=True)#20051128 - CHECK
#20051128 - CHECK
# spv.img = spv.data[ tuple(spv.zsec) ]
# spv.viewer.setImage( spv.img )
# ####3 HACK ??
# ##if spv.hist_arr is None:
# spv.recalcHist()
title=''
if hasattr(spv.data, 'Mrc'):
title += "<%s>" % spv.data.Mrc.filename
title2 = "%d) %s" %(spv.id, title)
wx.GetTopLevelParent(spv.viewer).SetTitle(title2)
'''
def run(img, title=None, size=None, originLeftBottom=None, _scoopLevel=1): # just to not get a return value
"""img can be either an n-D image array (n >= 2)
or a filename for a Fits or Mrc or jpg/gif/... file
or a sequence of the above
"""
import os
from . import usefulX as Y
if type(img) is list:
for i in img:
run(i, title, size, originLeftBottom, _scoopLevel=2)
return
if type(img) is tuple:
from . import fftfuncs as F
imgs = tuple(( Y.load(i) if isinstance(i, six.string_types) else i for i in img ))
moa = F.mockNDarray(*imgs)
run(moa, title, size, originLeftBottom, _scoopLevel=2)
return
#"filename"
if isinstance(img, six.string_types) and os.path.isfile(img):
fn=img
p,f = os.path.split(os.path.abspath(fn))
#print fn, (fn[:6] == "_thmb_"), (fn[-4:] == ".jpg")
if f[:6] == "_thmb_" and f[-4:] == ".jpg":
f = os.path.join(p, f[6:-4])
if os.path.isfile( f ):
fn = f
elif f[-4:] == ".txt":
from .mmviewer import mview
if size is None:
return mview(fn)
else:
return mview(fn, size=size)
a = Y.load(fn) #20051213
if a is None:
return
#20060824 CHECK if originLeftBottom is None and \
#20060824 CHECK hasattr(a, '_originLeftBottom'):
#20060824 CHECK originLeftBottom = a._originLeftBottom
if title is None:
import os.path
title = "<%s>" % os.path.basename(fn)
return run(a, title, size, originLeftBottom=originLeftBottom, _scoopLevel=2)
if title is None:
# python expression: evaluate this string and use it it as title !
if type(img)==str: # title
try:
#import sys
fr = sys._getframe(_scoopLevel)
locs = fr.f_locals
globs = fr.f_globals
a = eval(img, globs, locs)
img,title = a, img
except ValueError: # HACK: stack not deep enough
pass
#eval("Y.view(%s, '%s', %s)" % (img, img, size), locs) # HACK
else: # see if img has a name in the parent dictionary - use that as title
try:
#import sys
fr = sys._getframe(_scoopLevel)
vars = fr.f_globals.copy()
vars.update( fr.f_locals )
for v in list(vars.keys()):
if vars[v] is img:
title = v
break
except ValueError: # stack not deep enough
pass
spv(img, title, size, originLeftBottom)
class spv(spvCommon):
""" self.hist_arr != None ONLY IF NOT self.img.type() in (na.UInt8, na.Int16, na.UInt16)
then also self.hist_max and self.hist_min is set to min,max of number type !!
and: self.hist_range = self.hist_max - self.hist_min
then call:
S.histogram(self.img, self.hist_min, self.hist_max, self.hist_arr)
self.hist.setHist(self.hist_arr, self.hist_min, self.hist_max)
otherwise call self.recalcHist()
this _should_ be done from worker thread !?
"""
##thrd class ResultEvent(wx.PyEvent):
##thrd """Simple event to carry arbitrary result data"""
##thrd
##thrd def __init__(self, data):
##thrd wx.PyEvent.__init__(self)
##thrd self.SetEventType(EVT_RESULT_ID)
##thrd self.data = data
def __init__(self, data, title='', size=None,
originLeftBottom=None, parent=None, frameParent=None):
"""
splitter window for single-color viewerer
combines a "topBox" - zslider, OnMouse info,
a viewer window
and a set histogram windows (one for each color)
if parent is None: makes a new frame with "smart" title and given size
"""
spvCommon.__init__(self)
# 20070715: what can we do with zeros in zshape - skip slider ?!
if not isinstance(data, F.mockNDarray):
data = N.asanyarray(data) # 20060720 - numpy arrays don't have ndim attribute
if min(data.shape) < 1:
raise ValueError("data shape contains zeros (%s)"% (data.shape,))
if not 1 < data.ndim:
raise ValueError("cannot display %dD data"% data.ndim)
try:
_1checkIt = repr(data) # protect against crash from ""error: copy2bytes: access beyond buffer""
del _1checkIt
except:
raise
####self.copyDataIfUnsupportedType(data)
self.data = data
self.zshape= self.data.shape[:-2]
self.zndim = len(self.zshape)
self.zsec = [0] * self.zndim
self.zlast = [0]*self.zndim # remember - for checking if update needed
#FIMXE: next line should be done by calling helpNewData() instead - see below
self.img = self.data[ tuple(self.zsec) ]
if self.img.dtype.type in (N.complex64, N.complex128):
if True: #self.m_viewComplexAsAbsNotPhase: (memo20051128-> viewComplexAsAbsNotPhase in viewer-class
self.img = N.asarray(abs(self.img), N.float32) # check if this does temp copy
else:
#from Priithon.all import U
#data = U.phase(self.m_imgArr.astype(na.float32)
#not temp copy for type conversion:
self.img = N.arctan2(N.asarray(self.img.imag, N.float32),
N.asarray(self.img.real, N.float32))
self.recalcHist_todo_Set = set()
from .usefulX import viewers
n = len( viewers )
#self.__class__.viewers[ title ] = self
viewers.append( self )
self.id = n
if parent is None:
parent=self.makeFrame(size, title, frameParent)
needShow=True
else:
self.downSizeToFitWindow=False
needShow=False
splitter = wx.SplitterWindow(parent, -1, style=wx.SP_LIVE_UPDATE|wx.SP_3DSASH)
self.splitter = splitter
sizer = wx.BoxSizer(wx.VERTICAL)
self.upperPanel = wx.Panel(splitter, -1)
self.upperPanel.SetSizer(sizer)
self.upperPanel.SetAutoLayout(True)
self.boxAtTop = wx.BoxSizer(wx.HORIZONTAL)
self.putZSlidersIntoTopBox(self.upperPanel, self.boxAtTop)
#20171225-PY2to3
#sizer.AddSizer(self.boxAtTop, 0, wx.GROW|wx.ALL, 2)
sizer.Add(self.boxAtTop, 0, wx.GROW|wx.ALL, 2)
from . import viewer
v = viewer.GLViewer(self.upperPanel, self.img, originLeftBottom=originLeftBottom)
test="""
# wx.glcanvas not running on xcygwin
# https://github.com/mkeeter/kokopelli/commit/823a93438d9c5f75e16d19339a9ab96e5d971b0c
from wx import glcanvas
v = None
for d in [32, 24, 16, 8]:
try:
v = viewer.GLViewer(self.upperPanel, self.img, originLeftBottom=originLeftBottom, depth=d)
except Exception, e:
print d, e
#continue
else:
break
if v is None:
raise RuntimeError, 'depth of this display not found for open gl'"""
self.viewer = v
self.viewer.Bind(wx.EVT_IDLE, self.OnIdle)
if self.zndim > 0:
v.m_menu.AppendSeparator()
v.m_menu.AppendRadioItem(Menu_AutoHistSec0, "autoHist off")
v.m_menu.AppendRadioItem(Menu_AutoHistSec1, "autoHist viewer")
v.m_menu.AppendRadioItem(Menu_AutoHistSec2, "autoHist viewer+histAutoZoom")
parent.Bind(wx.EVT_MENU, self.OnMenuAutoHistSec, id=Menu_AutoHistSec0)
parent.Bind(wx.EVT_MENU, self.OnMenuAutoHistSec, id=Menu_AutoHistSec1)
parent.Bind(wx.EVT_MENU, self.OnMenuAutoHistSec, id=Menu_AutoHistSec2)
#wx.EVT_MENU(parent, Menu_AutoHistSec0, self.OnMenuAutoHistSec)
#wx.EVT_MENU(parent, Menu_AutoHistSec1, self.OnMenuAutoHistSec)
#wx.EVT_MENU(parent, Menu_AutoHistSec2, self.OnMenuAutoHistSec)
v.m_menu.AppendSeparator()
self.vOnWheel_zoom = self.viewer.OnWheel
menuSub0 = wx.Menu()
menuSub0.Append(Menu_WheelWhatMenu+1+self.zndim, "zoom")
for i in range(self.zndim):
menuSub0.Append(Menu_WheelWhatMenu+1+i, "scroll axis %d" % i)
#20171225-PY2to3 deprecation warning use Append
if wx.version().startswith('3'):
v.m_menu.AppendMenu(Menu_WheelWhatMenu, "mouse wheel does", menuSub0)
else:
v.m_menu.Append(Menu_WheelWhatMenu, "mouse wheel does", menuSub0)
for i in range(self.zndim+1):
#wx.EVT_MENU(parent, Menu_WheelWhatMenu+1+i,self.OnWheelWhat)
parent.Bind(wx.EVT_MENU, self.OnWheelWhat, id=Menu_WheelWhatMenu+1+i)
menuSub1 = wx.Menu()
for i in range(len(scrollIncrL)):
menuSub1.Append(Menu_ScrollIncrementMenu+1+i, "%3s" % scrollIncrL[i])
#20171225-PY2to3 deprecation warning use Append
if wx.version().startswith('3'):
v.m_menu.AppendMenu(Menu_ScrollIncrementMenu, "scroll increment", menuSub1)
else:
v.m_menu.Append(Menu_ScrollIncrementMenu, "scroll increment", menuSub1)
for i in range(len(scrollIncrL)):
#wx.EVT_MENU(parent, Menu_ScrollIncrementMenu+1+i,self.OnScrollIncr)
parent.Bind(wx.EVT_MENU, self.OnScrollIncr, id=Menu_ScrollIncrementMenu+1+i)
menuSub2 = wx.Menu()
from Priithon.all import Y
self.plot_avgBandSize=1
self.plot_s='-+'
def OnChProWi(ev):
i= wx.GetNumberFromUser("each line profile gets averaged over a band of given width",
'width:', "profile averaging width:",
self.plot_avgBandSize, 1, 1000)
self.plot_avgBandSize=i
#Y.vLeftClickNone(self.id) # fixme: would be nice if
#done!?
Y._plotprofile_avgSize = self.plot_avgBandSize
def OnSelectSubRegion(ev):
Y.vLeftViewSubRegion(self.id)
left_list = [('horizontal profile',
lambda ev: Y.vLeftClickHorizProfile(self.id, self.plot_avgBandSize, self.plot_s)),
('vertical profile',
lambda ev: Y.vLeftClickVertProfile(self.id, self.plot_avgBandSize, self.plot_s)),
('any-line-profile',
lambda ev: Y.vLeftClickLineProfile(self.id, abscissa='line', s=self.plot_s)),
('any-line-profile over x',
lambda ev: Y.vLeftClickLineProfile(self.id, abscissa='x', s=self.plot_s)),
('any-line-profile over y',
lambda ev: Y.vLeftClickLineProfile(self.id, abscissa='y', s=self.plot_s)),
('Z-profile',
lambda ev: Y.vLeftClickZProfile(self.id, self.plot_avgBandSize, self.plot_s)),
('line measure',
lambda ev: Y.vLeftClickLineMeasure(self.id)),
('triangle measure',
lambda ev: Y.vLeftClickTriangleMeasure(self.id)),
('mark-cross',
lambda ev: Y.vLeftClickMarks(self.id, callFn=None)),
('<nothing>',
lambda ev: Y.vLeftClickNone(self.id)),
('<clear graphics>',
lambda ev: Y.vClearGraphics(self.id)),
('<change profile "width"',
lambda ev: OnChProWi(ev)),
('select-view xy-sub-region',
lambda ev: OnSelectSubRegion(ev)),
]
for i in range(len(left_list)):
itemId = Menu_LeftClickMenu+1+i
menuSub2.Append(itemId, "%s" % left_list[i][0])
#20171225-PY2to3 deprecation warning
parent.Bind(wx.EVT_MENU, left_list[i][1], id=itemId)
#wx.EVT_MENU(parent, itemId, left_list[i][1])
if wx.version().startswith('3') and not wx.version().endswith('(phoenix)'):
v.m_menu.AppendMenu(Menu_LeftClickMenu, "on left click ...", menuSub2)
else:
v.m_menu.Append(Menu_LeftClickMenu, "on left click ...", menuSub2)
v.m_menu_save.Append(Menu_SaveND, "save nd data stack")
v.m_menu_save.Append(Menu_AssignND, "assign nd data stack to var name")
#20171225-PY2to3 deprecation warning
parent.Bind(wx.EVT_MENU, self.OnMenuSaveND, id=Menu_SaveND)
parent.Bind(wx.EVT_MENU, self.OnMenuAssignND, id=Menu_AssignND)
#wx.EVT_MENU(parent, Menu_SaveND, self.OnMenuSaveND)
#wx.EVT_MENU(parent, Menu_AssignND, self.OnMenuAssignND)
#dt = MyFileDropTarget(self)
#v.SetDropTarget(dt)
from Priithon import fileDropPopup
v.SetDropTarget( fileDropPopup.FileDropTarget(v) )
def splitND_onMouse(x,y, ev): #20080707 ,xyEffVal):
yy,xx = int(round(y)), int(round(x)) # NEW 20080701: in new coord system, integer pixel coord go through the center of pixel
try:
pic_ny, pic_nx = self.img.shape
except AttributeError:
return # can happend when window just got closed (windows)
if (0<=yy<pic_ny and
0<=xx<pic_nx):
xyEffVal = self.img[yy,xx] #20080707
if N.issubdtype(self.data.dtype, N.integer): #(N.uint8, N.int16, N.uint16, N.int32):
vs = "%6d" %(xyEffVal,)
elif self.data.dtype == bool:
vs = "%4d" %(xyEffVal,)
else:
if N.abs(xyEffVal) > .02:
vs = "%7.2f" %(xyEffVal,)
else:
vs = "%7.2e" %(xyEffVal,)
else:
vs = "---"
#self.label.SetLabel("xy: %3d %3d val: %7.2f"%(x,y, xyEffVal))#self.img[y,x]))
if v.m_scale > 1 and self.showFloatCoordsWhenZoomingIn:
self.label.SetLabel("%.1fx yx: %5.1f %5.1f val: %s"%(v.m_scale, y,x, vs))
elif v.m_scale !=1:
self.label.SetLabel("%.1fx yx: %3d %3d val: %s"%(v.m_scale, yy,xx, vs))
else:
self.label.SetLabel("yx: %3d %3d val: %s"%(yy,xx, vs))
v.doOnMouse.append(splitND_onMouse)
from . import histogram
h = histogram.HistogramCanvas(splitter, size=(400,110))
self.hist = h
#20070525-black_on_black h.SetCursor(wx.CROSS_CURSOR)
import weakref # 20060823
# 20060823 v.hist4colmap = weakref.proxy( h ) # HACK
# see viewer.py::updateHistColMap
v.my_hist = weakref.proxy( h ) # CHECK 20060823
h.my_viewer = weakref.proxy( v ) # CHECK 20060823
v.my_spv = weakref.proxy( self ) # CHECK 20070823
h.my_spv = weakref.proxy( self ) # CHECK 20070823
def splitND_onBrace(s, gamma=None):
l,r = s.leftBrace, s.rightBrace
try:
if gamma is not None:
v.cmgray(gamma)
v.changeHistogramScaling(l,r)
except:
pass
h.doOnBrace.append(splitND_onBrace)
#20080707 del splitND_onBrace
def splitND_onMouseHist(xEff, ev): #20080707 , bin):
l,r = h.leftBrace, h.rightBrace
if self.data.dtype.type in (N.uint8, N.int16, N.uint16, N.int32):
self.label.SetLabel("I: %6.0f l/r: %6.0f %6.0f" %(xEff,l,r))
else:
self.label.SetLabel("I: %7.2g l/r: %7.2g %7.2g"%(xEff,l,r))
h.doOnMouse.append(splitND_onMouseHist)
#20080707del splitND_onMouseHist
#from Priithon import seb as S
def splitND_onReload(event=None):
self.helpNewData()
v.OnReload = splitND_onReload
#20171225-PY2to3 deprecation warning use meth: EvtHandler.Bind -> self.Bind()
v.Bind(wx.EVT_MENU, splitND_onReload, id=viewer.Menu_Reload)
#wx.EVT_MENU(v, viewer.Menu_Reload, splitND_onReload)
self.OnReload = splitND_onReload
del splitND_onReload
#self.hist_min, self.hist_min, self.hist_avg, self.hist_dev
sizer.Add(v, 1, wx.GROW|wx.ALL, 2)
if self.downSizeToFitWindow:
fac = 1./1.189207115002721 # >>> 2 ** (1./4)
#v.m_scale *= .05 # fac
s=max(self.img.shape)
while v.m_scale * s > 600:
v.m_scale *= fac
#20070809 wx.Yield()
if needShow:
parent.Show()
self.installKeyCommands(parent)
# ACCEL_CMD:"Cmd" is a pseudo key which is the same as Control for PC and Unix platforms but the special "Apple" (a.k.a as "Command") key on Macs.
self.keyShortcutTable[ wx.MOD_CMD, ord('W') ] = parent.Close
self.autoHistEachSect = 0
self.scrollIncr = 1
self.noHistUpdate = 0 # used for debugging speed issues
#20040317 splitter.SetMinimumPaneSize(20)
splitter.SetMinimumPaneSize(5)
splitter.SetSashGravity(1.0)
splitter.SplitHorizontally(self.upperPanel, h, -50)
#77 splitter.SplitHorizontally(v, h, -50)
#import pdb
#pdb.set_trace()
self.setupHistArr()
self.recalcHist(triggeredFromIdle=True)
#print "debug:", self.mmms
self.hist.autoFit(amin=self.mmms[0], amax=self.mmms[1])
#20051128 wx.Yield()
#v.changeHistogramScaling(self.mmms[0],self.mmms[1])
wx.Yield()
v.center()
#20171225-PY2to3 deprecation warning use meth: EvtHandler.Bind -> self.Bind()
wx.GetTopLevelParent(parent).Bind(wx.EVT_CLOSE, self.onClose)
#wx.EVT_CLOSE(wx.GetTopLevelParent(parent), self.onClose)
self.setDefaultKeyShortcuts()
##thrd # Set up event handler for any worker thread results
##thrd def EVT_RESULT(win, func):
##thrd win.Connect(-1, -1, EVT_RESULT_ID, func)
##thrd EVT_RESULT(self.self.frame, self.OnResult)
##thrd
##thrd class Worker(threading.Thread):
##thrd def __init__ (self2):
##thrd threading.Thread.__init__(self2)
##thrd self2.start()
##thrd
##thrd def run(self2):
##thrd #print "split.WorkerThread - start"
##thrd
##thrd self.lastHistImg = None
##thrd self.recalcHist__Done = 1
##thrd try:
##thrd while self.a>0 and \
##thrd self.hist.__class__.__name__ != '_wxPyDeadObject':
##thrd wx.Usleep( workerInterval )
##thrd # print self.a
##thrd self.a += 1
##thrd # wx.PostEvent(self.op.GetParent(), NewReport(self.wb))
##thrd # print self.viewer.m_imgArr
##thrd # print self.lastHistImg
##thrd if (not self.viewer.m_imgArr is self.lastHistImg) and \
##thrd self.recalcHist__Done:
##thrd # if (not self.viewer.m_imgArr == self.lastHistImg) and \
##thrd # self.recalcHist__Done:
##thrd self.lastHistImg = self.viewer.m_imgArr
##thrd #time print self.a, "z=", self.z,
##thrd self.recalcHist()
##thrd #time print "done."
##thrd except: # The C++ part of the GLViewer object has been deleted
##thrd pass
##thrd #print "split.WorkerThread exited"
##thrd
##thrd if self.hist_arr is None:
##thrd self.a = 1
##thrd self.aa = Worker()
#still __init__
def onClose(self, ev=None):
#print "debug: splitND::onClose"
try:
del self.data
del self.img
except:
if PriConfig.raiseEventHandlerExceptions:
raise
else:
print(" ### ### cought exception for debugging: #### ", file=sys.stderr)
traceback.print_exc()
print(" ### ### cought exception for debugging: #### ", file=sys.stderr)
from .usefulX import viewers
try:
viewers[ self.id ] = None
except:
if PriConfig.raiseEventHandlerExceptions:
raise
else:
print(" ### ### cought exception for debugging: #### ", file=sys.stderr)
traceback.print_exc()
print(" ### ### cought exception for debugging: #### ", file=sys.stderr)
if ev:
ev.GetEventObject().Destroy()
#20070808self.frame.Destroy()
# import gc
# wx.CallAfter( gc.collect )
#FIXME: size=(width+20,height+50+100)) # 20070627 MSW: was height+120
def makeFrame(self, size, title, frameParent=None):
"""
create frame
if data has "Mrc" attribute, append "<filename>" to given title
"""
self.downSizeToFitWindow=False
### size = (400,400)
if size is None:
height,width = self.data.shape[-2:] #20051128 self.img.shape
if height//2 == (width-1): ## real_fft2d
width = (width-1)*2
if width>600 or height>600:
width=height=600
self.downSizeToFitWindow=True
#22 if self.nz > 1 and width<250: #HACK: minimum width to see z-slider
#22 width = 250
elif type(size) == int:
width,height = size,size
else:
width,height = size
if title is None:
title=''
if hasattr(self.data, 'Mrc'): # was a HACK: and (len(title)<1 or title[-1]!='>'):
if title !='':
title += " "
title += "<%s>" % self.data.Mrc.filename
title2 = "%d) %s" %(self.id, title)
frame = wx.Frame(frameParent, -1, title2, size=(width+20,height+50+100)) # 20070627 MSW: was height+120
from .usefulX import shellMessage
shellMessage("# window: %s\n"% title2)
self.title = title
self.title2 = title2
return frame
def putZSlidersIntoTopBox(self, parent, boxSizer):
[si.GetWindow().Destroy() for si in boxSizer.GetChildren()] # needed with Y.viewInViewer
self.zzslider = [None]*self.zndim
for i in range(self.zndim-1,-1,-1):
self.zzslider[i] = wx.Slider(parent, 1001+i, self.zsec[i], 0, self.zshape[i]-1,
wx.DefaultPosition, wx.DefaultSize,
#wx.SL_VERTICAL
wx.SL_HORIZONTAL
| wx.SL_AUTOTICKS | wx.SL_LABELS )
if self.zshape[i] > 1:
self.zzslider[i].SetTickFreq(5)#, 1)
##boxSizer.Add(vslider, 1, wx.EXPAND)
boxSizer.Insert(0, self.zzslider[i], 1, wx.EXPAND)
#wx.EVT_SLIDER(parent, self.zzslider[i].GetId(), self.OnZZSlider)
parent.Bind(wx.EVT_SLIDER, self.OnZZSlider, id=self.zzslider[i].GetId())
else: # still good to create the slider - just to no have special handling
# self.zzslider[i].Show(0) #
boxSizer.Insert(0, self.zzslider[i], 0, 0)
if self.zndim == 0:
label = wx.StaticText(parent, -1, "")
#label.SetHelpText("This is the help text for the label")
boxSizer.Add(label, 0, wx.GROW|wx.ALL, 2)
self.label = wx.StaticText(parent, -1, "----move mouse over image----xxxx") # HACK find better way to reserve space to have "val: 1234" always visible
boxSizer.Insert(0, self.label, 0, wx.GROW|wx.ALL, 2)#Add(self.label, 0, wx.GROW|wx.ALL, 2)
boxSizer.Layout()
parent.Layout()
'''
def copyDataIfUnsupportedType(self, data):
self.dataIsCplx = False
if data.type() == na.Int32:
print "** split-viewer: converted Int32 to Int16"
data = data.astype(na.Int16)
elif data.type() == na.UInt32:
print "** split-viewer: converted UInt32 to UInt16"
data = data.astype(na.UInt16)
elif data.type() == na.Float64:
print "** split-viewer: converted Float64 to Float32"
data = data.astype(na.Float32)
elif data.type() == na.Complex64:
print "** split-viewer: converted Complex64 to Complex32"
self.dataCplx = data.astype(na.Complex32)
self.dataIsCplx = True
self.dataCplxShowAbsNotPhase = True
data = na.abs(self.dataCplx)
elif data.type() == na.Complex32:
print "** split-viewer: complex - used abs()"
self.dataCplx = data
self.dataIsCplx = True
self.dataCplxShowAbsNotPhase = True
data = na.abs(self.dataCplx)
self.data = data
'''
def OnAutoHistScale(self, event=77777):
from . import useful as U
mi,ma,me,ss = U.mmms( self.img )
self.hist.autoFit(amin=mi, amax=ma)
def OnViewFFT(self, event=77777):
from . import fftfuncs as F
if self.data.dtype.type in (N.complex64, N.complex128):
f = F.fft2d(self.data)
#f[ ... , 0,0] = 0. # force DC to zero to ease scaling ...
run(f, title='cFFT2d of %d'%self.id, _scoopLevel=2)
else:
f = F.rfft2d(self.data)
f[ ... , 0,0] = 0. # force DC to zero to ease scaling ...
run(f, title='rFFT2d of %d'%self.id, _scoopLevel=2)
# def On88(self, event):
# import fftfuncs as F
# # if self.dataIsCplx:
# # f = F.fft2d(self.dataCplx)
# # run(f, title='cFFT2d of %d'%self.id, _scoopLevel=2)
# # else:
# f = F.irfft2d(self.data)
# run(f, title='irFFT2d of %d'%self.id, _scoopLevel=2)
def OnViewFFTInv(self, event=77777):
from . import fftfuncs as F
if self.data.dtype.type in (N.complex64, N.complex128):
f = F.irfft2d(self.data)
else:
wx.Bell()
return
# f = F.irfft2d(self.data)
run(f, title='irFFT2d of %d'%self.id, _scoopLevel=2)
def OnViewCplxAsAbs(self, event=77777):
if not self.data.dtype.type in (N.complex64, N.complex128)\
or self.viewer.m_viewComplexAsAbsNotPhase:
wx.Bell()
return
self.viewer.m_viewComplexAsAbsNotPhase = True
####self.data = N.absolute(self.dataCplx)
self.helpNewData()
def OnViewCplxAsPhase(self, event=77777):
if not self.data.dtype.type in (N.complex64, N.complex128)\
or self.viewer.m_viewComplexAsAbsNotPhase == False:
wx.Bell()
return
self.viewer.m_viewComplexAsAbsNotPhase = False
#import useful as U
#self.data = U.phase(self.dataCplx)
self.helpNewData()
def OnViewFlipXZ(self, event=77777):
from . import fftfuncs as F
if self.data.dtype.type in (N.complex64, N.complex128):
print("TODO: cplx ")
run(F.getXZview(self.data, zaxis=0), title='X-Z of %d'%self.id, _scoopLevel=2)
from .usefulX import vHistSettings
vHistSettings(self.id,-1)
def OnViewFlipYZ(self, event=77777):
from . import fftfuncs as F
if self.data.dtype.type in (N.complex64, N.complex128):
print("TODO: cplx ")
run(F.getYZview(self.data, zaxis=0), title='Y-Z of %d'%self.id, _scoopLevel=2)
from .usefulX import vHistSettings
vHistSettings(self.id,-1)
def OnViewMaxProj(self, event=77777):
from . import useful as U
if self.data.dtype.type in (N.complex64, N.complex128):
print("TODO: cplx ")
run(U.project(self.data), title='proj of %d'%self.id, _scoopLevel=2)
from .usefulX import vHistSettings
vHistSettings(self.id,-1)
def OnShowPopupTransient(self, evt=77777):
try:
print(self.win)
except:
print('pass')
pass
self.win = TestTransientPopup(self.frame, wx.SIMPLE_BORDER)
# Show the popup right below or above the button
# depending on available screen space...
#btn = evt.GetEventObject()
#pos = btn.ClientToScreen( (0,0) )
#sz = btn.GetSize()
#win.Position(pos, (0, sz.height))
self.win.Position(self.frame.GetPosition(), (0,0) )
self.win.Popup()
def helpNewData(self, doAutoscale=True, setupHistArr=True):
"""doAutoscale gets ORed with self.autoHistEachSect == 2
"""
#self.zshape= data.shape[:-2]
#self.zndim = len(self.zshape)
###self.img = data[ (0,)*self.zndim ]
#self.zsec = [0] * self.zndim
#self.zlast = [-1]*self.zndim # remember - for checking if update needed
self.img = self.data[tuple(self.zsec)]
if self.img.dtype.type in (N.complex64, N.complex128):
if self.viewer.m_viewComplexAsAbsNotPhase:
#BAD20060302 self.img = abs(N.asarray(self.img, N.Float32))# check if this does tempcopy
# From: Todd Miller <jmiller@stsci.edu>
# Subject: Re: [Numpy-discussion] numarray: need Float32 abs
# from array of type na.Complex64 or na.Complex32
# Date: Thu, 02 Mar 2006 10:13:32 -0500
# stores the abs() into the real component of the original array.
# img.real is a view not a copy.
#error-for-read-only-arrays na.abs(self.img, self.img.real)
# optional step which makes the complex img array
# a real valued array with complex storage.
#self.img.imag = 0
# just forget that img is using complex storage.
self.img = N.asarray(N.absolute(self.img), N.float32)
else:
#from Priithon.all import U
#data = U.phase(self.m_imgArr.astype(N.float32)
#not temp copy for type conversion:
self.img = N.arctan2(N.asarray(self.img.imag, N.float32),
N.asarray(self.img.real, N.float32))
self.viewer.setImage( self.img )
#print "debug1:", self.mmms
#CHECK
if setupHistArr:
self.setupHistArr()
if not self.noHistUpdate: # used for debugging speed issues
self.recalcHist(triggeredFromIdle=True)
if doAutoscale or self.autoHistEachSect == 2:
self.hist.autoFit(amin=self.mmms[0], amax=self.mmms[1])
#h.setBraces(self.mmms[0], self.mmms[1])
#h.fitXcontrast()
#self.viewer.changeHistogramScaling(self.mmms[0],self.mmms[1])
elif self.autoHistEachSect == 1:
self.hist.setBraces(self.mmms[0], self.mmms[1])
#print "debug2:", self.mmms
def OnHistLog(self, ev=77777):
self.hist.OnLog(ev)
def OnViewVTK(self, ev=77777):
from . import usefulX as Y
####use tostring instead self.m = Y.vtkMountain(self.img, "vtk of %d: %s" % (self.id, self.title))
#20060722 a = N.NumArray(shape=self.img.shape, type=self.img.type(), buffer=self.viewer.m_imgArrString)
a = N.fromstring(self.viewer.m_imgArrString, self.img.dtype)
a.shape = self.img.shape
self.m = Y.vtkMountain(a, "vtk of %d: %s" % (self.id, self.title))
##thrd def OnResult(self, event):
##thrd #if event.data is None:
##thrd self.hist.setHist(self.recalcHist__a_h, self.recalcHist__mmms[0], self.recalcHist__mmms[1])
def setupHistArr(self):
self.hist_arr = None
if self.img.dtype.type == N.uint8:
self.hist_min, self.hist_max = 0, (1<<8)-1
elif self.img.dtype.type == N.uint16:
self.hist_min, self.hist_max = 0, (1<<16)-1
elif self.img.dtype.type == N.int16:
self.hist_min, self.hist_max = 0-(1<<15), (1<<15)-1
from Priithon.all import U
self.hist.hist_min,self.hist.hist_max = U.mm(self.img)#self.hist_min
#if self.hist.m_histPlotArray is not None:
# self.hist.m_histPlotArray[0,0],self.hist.m_histPlotArray[-1,0] = U.mm(self.img)
# print self.hist.m_histPlotArray[0,0],self.hist.m_histPlotArray[-1,0]
if self.img.dtype.type in (N.uint8, N.int16, N.uint16):
self.hist_range = self.hist_max - self.hist_min + 1
self.hist_arr = N.zeros(shape=self.hist_range, dtype=N.int32)
def OnIdle(self, event):
if len(self.recalcHist_todo_Set):
i = self.recalcHist_todo_Set.pop()
self.recalcHist(triggeredFromIdle=True)
def recalcHist(self, triggeredFromIdle):
if not triggeredFromIdle:
self.recalcHist_todo_Set.add(0)
return
#CHECK img = self.viewer.m_imgArr
img = self.img
from . import useful as U
mmms = U.mmms( img )
self.mmms = mmms
#time import time
#time x = time.clock()
# print mmms
if self.hist_arr is not None:
#glSeb import time
#glSeb x = time.clock()
if sys.platform.startswith('linux'): # 20180712 win SIM error
wx.Yield() # 20180406 dileptus linux
U.histogram(img, amin=self.hist_min, amax=self.hist_max, histArr=self.hist_arr)
self.hist.setHist(self.hist_arr, self.hist_min, self.hist_max)
#glSeb print "ms: %.2f"% ((time.clock()-x)*1000.0)
## FIXME setHist needs to NOT alloc xArray every time !!!
else:
# self.viewer.m_imgChanged = True
# self.viewer.Refresh(False)
#20040915(OverflowError: float too large to convert) resolution = int(mmms[1]-mmms[0]+2)
#20040915if resolution > 10000:
#20040915 resolution = 10000
#20040915elif resolution < 1000: #CHECK
#20040915 resolution = 10000 # CHECK
resolution = 10000
a_h = U.histogram(img, resolution, mmms[0], mmms[1])
# self.hist.setHist(a_h, mmms[0], mmms[1])
self.recalcHist__a_h = a_h
self.recalcHist__Done = 1
#time print "recalcHist ms: %.2f"% ((time.clock()-x)*1000.0)
#20171225 py2to3
if wx.VERSION[0] <= 3:
mainthread = wx.Thread_IsMain()
elif wx.VERSION[0] >= 4:
mainthread = wx.IsMainThread()
if mainthread:#wx.IsMainThread():#Thread_IsMain():
self.hist.setHist(self.recalcHist__a_h,
self.mmms[0],
self.mmms[1])
else:
wx.PostEvent(self.frame, self.__class__.ResultEvent(None))
class TestTransientPopup(wx.PopupTransientWindow):
"""Adds a bit of text and mouse movement to the wxPopupWindow"""
def __init__(self, parent, style):
wx.PopupTransientWindow.__init__(self, parent, style)
panel = wx.Panel(self, -1)
panel.SetBackgroundColour("#FFB6C1")
st = wx.StaticText(panel, -1,
"wxPopupTransientWindow is a\n"
"wxPopupWindow which disappears\n"
"automatically when the user\n"
"clicks the mouse outside it or if it\n"
"(or its first child) loses focus in \n"
"any other way."
,
pos=(10,10))
sz = st.GetBestSize()
panel.SetSize( (sz.width+20, sz.height+20) )
self.SetSize(panel.GetSize())
#wx.EVT_KEY_DOWN(self, self.OnKeyDown)
#wx.EVT_KEY_UP(self, self.OnKeyUp)
#wx.EVT_CHAR(self, self.OnChar)
#self.SetFocus()
def ProcessLeftDown(self, evt):
#print "ProcessLeftDown"
#self.Dismiss()
return False
#def OnDismiss(self):
# print "OnDismiss"
def OnKeyDown(self, evt):
print("OnKeyDown")
#self.Dismiss()
def OnKeyUp(self, evt):
print("OnKeyUp")
def OnChar(self, evt):
print("OnKeyChar")
| macronucleus/chromagnon | Chromagnon/Priithon/splitND.py | Python | mit | 41,286 | [
"VTK"
] | 0f0a1c31efa5920bfdb6751f4bbdfe3972ea68bfd20f726a51f4bfc03cbf230d |
#!python
from __future__ import with_statement
import argparse
import os
import logging
from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
# guts of a programmable filter
normalizeMass="""
input = self.GetPolyDataInput();
output = self.GetPolyDataOutput();
output.CopyStructure(input)
output.CopyAttributes(input)
mass = input.GetPointData().GetArray('mass');
if mass is not None:
ntups = mass.GetNumberOfTuples();
massNorm = mass.NewInstance();
massNorm.SetNumberOfTuples(ntups);
massNorm.SetName('MassNorm');
minRadius = 100.00
minMax = mass.GetRange();
drange = (minMax[1] - minMax[0]) / (minRadius*10)
for i in range(ntups):
curmass = mass.GetTuple1(i)
massNorm.SetTuple1(i, minRadius + (curmass - minMax[0])/drange)
output.GetPointData().AddArray(massNorm);
"""
# default=[-2000.0, 100.0, 70000],
# default=[-4000.246, 600.739, -523.349],
# default=[0.0, 1.0, 0.0],
args = argparse.ArgumentParser(description='control nbody visualization.')
args.add_argument("-f", "--filename", type=str, help="filename to read")
args.add_argument("-g", "--debug", action="store_const", const=1,
help="enable debugging")
args.add_argument("--axes", action="store_const", const=1, help="axes")
args.add_argument("--eye", nargs=3, type=float,
default=[0.0, 0.0, 300.0],
help="location of the camera")
args.add_argument("--ref", nargs=3, type=float,
default=[0.5, 0.5, 0.8],
help="location the camera focuses on")
args.add_argument("--vup", nargs=3, type=float,
default=[0.0, 1.0, 0.0],
help="view 'up' direction")
args = args.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
reader = CSVReader(FileName=args.filename)
geomFilter = TableToPoints()
geomFilter.XColumn = 'x'
geomFilter.YColumn = 'y'
geomFilter.ZColumn = 'z'
marray = geomFilter.PointData.GetArray('mass')
if marray is None or marray.GetNumberOfTuples() == 0:
print("Mass array is missing!")
exit(1)
pyFilter = ProgrammableFilter()
pyFilter.Script = normalizeMass
glyphFilter = Glyph(pyFilter, GlyphType="Sphere")
glyphFilter.Scalars = ['POINTS', 'MassNorm']
glyphFilter.ScaleMode = 'scalar'
glyphFilter.GlyphType.ThetaResolution = 180
glyphFilter.GlyphType.PhiResolution = 90
glyphFilter.UpdatePipeline()
glyphRep = Show(glyphFilter)
glyphRep.Scaling = True
glyphRep.Orient = False
# assign lut by name is only supported in PV 4.1
#massNorm = glyphFilter.PointData.GetArray('MassNorm')
#glyphRep.ColorArrayName = 'MassNorm'
#glyphRep.Representation = 'Surface'
#glyphRep.LookupTable = AssignLookupTable(massNorm,'Rainbow Desaturated')
rview = GetRenderView()
rview.CameraPosition = args.eye
rview.CameraFocalPoint = args.ref
rview.CameraViewUp = args.vup
if not args.axes:
rview.CenterAxesVisibility = 0
paraview.simple._DisableFirstRenderCameraReset()
def outname(infilename):
timestep = os.path.basename(infilename)
# lop off any existing extension. But don't fail if there is no extension.
try:
timestep = timestep[0:timestep.rindex(".")]
except ValueError: pass
return timestep + ".png"
Render()
WriteImage(outname(args.filename), Magnification=3)
#eye = GetRenderView().CameraPosition
#ref = GetRenderView().CameraFocalPoint
#vup = GetRenderView().CameraViewUp
#print("eye: %7.3f %7.3f %7.3f" % (eye[0], eye[1], eye[2]))
#print("ref: %7.3f %7.3f %7.3f" % (ref[0], ref[1], ref[2]))
#print("vup: %7.3f %7.3f %7.3f" % (vup[0], vup[1], vup[2]))
Delete(glyphRep)
Delete(glyphFilter)
Delete(pyFilter)
Delete(geomFilter)
Delete(reader)
del glyphRep
del glyphFilter
del pyFilter
del geomFilter
del reader
| tfogal/freeprocessing | batch.py | Python | lgpl-3.0 | 3,747 | [
"ParaView"
] | 59858f7344ddd112a19c3e5ff3f9b85e7bd01d7a85333ebf438387e7c8b6dcdd |
""" DISET request handler base class for the TransformationDB.
"""
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.TransformationSystem.DB.TransformationDB import TransformationDB
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
transTypes = [basestring, int, long]
__RCSID__ = "$Id$"
TASKS_STATE_NAMES = [
'TotalCreated',
'Created',
'Running',
'Submitted',
'Failed',
'Waiting',
'Done',
'Completed',
'Stalled',
'Killed',
'Staging',
'Checking',
'Rescheduled',
'Scheduled']
FILES_STATE_NAMES = ['PercentProcessed', 'Processed', 'Unused', 'Assigned', 'Total', 'Problematic',
'ApplicationCrash', 'MaxReset']
database = False
class TransformationManagerHandler(RequestHandler):
def __init__(self, *args, **kargs):
self.setDatabase(database)
super(TransformationManagerHandler, self).__init__(*args, **kargs)
def _parseRes(self, res):
if not res['OK']:
gLogger.error('TransformationManager failure', res['Message'])
return res
def setDatabase(self, oDatabase):
global database
database = oDatabase
types_getCounters = [basestring, list, dict]
def export_getCounters(self, table, attrList, condDict, older=None, newer=None, timeStamp=None):
res = database.getCounters(table, attrList, condDict, older=older, newer=newer, timeStamp=timeStamp)
return self._parseRes(res)
####################################################################
#
# These are the methods to manipulate the transformations table
#
types_addTransformation = [basestring, basestring, basestring, basestring, basestring, basestring, basestring]
def export_addTransformation(self, transName, description, longDescription, transType, plugin, agentType, fileMask,
transformationGroup='General',
groupSize=1,
inheritedFrom=0,
body='',
maxTasks=0,
eventsPerTask=0,
addFiles=True,
inputMetaQuery=None,
outputMetaQuery=None):
# authorDN = self._clientTransport.peerCredentials['DN']
# authorGroup = self._clientTransport.peerCredentials['group']
credDict = self.getRemoteCredentials()
authorDN = credDict['DN']
authorGroup = credDict['group']
res = database.addTransformation(transName, description, longDescription, authorDN, authorGroup, transType, plugin,
agentType, fileMask,
transformationGroup=transformationGroup,
groupSize=groupSize,
inheritedFrom=inheritedFrom,
body=body,
maxTasks=maxTasks,
eventsPerTask=eventsPerTask,
addFiles=addFiles,
inputMetaQuery=inputMetaQuery,
outputMetaQuery=outputMetaQuery)
if res['OK']:
gLogger.info("Added transformation %d" % res['Value'])
return self._parseRes(res)
types_deleteTransformation = [transTypes]
def export_deleteTransformation(self, transName):
credDict = self.getRemoteCredentials()
authorDN = credDict['DN']
# authorDN = self._clientTransport.peerCredentials['DN']
res = database.deleteTransformation(transName, author=authorDN)
return self._parseRes(res)
types_cleanTransformation = [transTypes]
def export_cleanTransformation(self, transName):
credDict = self.getRemoteCredentials()
authorDN = credDict['DN']
# authorDN = self._clientTransport.peerCredentials['DN']
res = database.cleanTransformation(transName, author=authorDN)
return self._parseRes(res)
types_setTransformationParameter = [transTypes, basestring]
def export_setTransformationParameter(self, transName, paramName, paramValue):
credDict = self.getRemoteCredentials()
authorDN = credDict['DN']
# authorDN = self._clientTransport.peerCredentials['DN']
res = database.setTransformationParameter(transName, paramName, paramValue, author=authorDN)
return self._parseRes(res)
types_deleteTransformationParameter = [transTypes, basestring]
def export_deleteTransformationParameter(self, transName, paramName):
# credDict = self.getRemoteCredentials()
# authorDN = credDict[ 'DN' ]
# authorDN = self._clientTransport.peerCredentials['DN']
res = database.deleteTransformationParameter(transName, paramName)
return self._parseRes(res)
types_getTransformations = []
def export_getTransformations(self, condDict=None, older=None, newer=None, timeStamp='CreationDate',
orderAttribute=None, limit=None, extraParams=False, offset=None):
if not condDict:
condDict = {}
res = database.getTransformations(condDict=condDict,
older=older,
newer=newer,
timeStamp=timeStamp,
orderAttribute=orderAttribute,
limit=limit,
extraParams=extraParams,
offset=offset)
return self._parseRes(res)
types_getTransformation = [transTypes]
def export_getTransformation(self, transName, extraParams=False):
res = database.getTransformation(transName, extraParams=extraParams)
return self._parseRes(res)
types_getTransformationParameters = [transTypes, [basestring, list, tuple]]
def export_getTransformationParameters(self, transName, parameters):
res = database.getTransformationParameters(transName, parameters)
return self._parseRes(res)
types_getTransformationWithStatus = [[basestring, list, tuple]]
def export_getTransformationWithStatus(self, status):
res = database.getTransformationWithStatus(status)
return self._parseRes(res)
####################################################################
#
# These are the methods to manipulate the TransformationFiles tables
#
types_addFilesToTransformation = [transTypes, [list, tuple]]
def export_addFilesToTransformation(self, transName, lfns):
res = database.addFilesToTransformation(transName, lfns)
return self._parseRes(res)
types_addTaskForTransformation = [transTypes]
def export_addTaskForTransformation(self, transName, lfns=[], se='Unknown'):
res = database.addTaskForTransformation(transName, lfns=lfns, se=se)
return self._parseRes(res)
def _wasFileInError(self, newStatus, currentStatus):
""" Tells whether the file was Assigned and failed, i.e. was not Processed """
return currentStatus.lower() == 'assigned' and newStatus.lower() != 'processed'
types_setFileStatusForTransformation = [transTypes, dict]
def export_setFileStatusForTransformation(self, transName, dictOfNewFilesStatus):
""" Sets the file status for the transformation.
The dictOfNewFilesStatus is a dictionary with the form:
{12345: ('StatusA', errorA), 6789: ('StatusB',errorB), ... } where the keys are fileIDs
The tuple may be a string with only the status if the client was from an older version
"""
if not dictOfNewFilesStatus:
return S_OK({})
statusSample = dictOfNewFilesStatus.values()[0]
if isinstance(statusSample, basestring):
# FIXME: kept for backward compatibility with old clients... Remove when no longer needed
# This comes from an old client, set the error flag but we must get the current status first
newStatusForFileIDs = {}
res = database.getTransformationFiles({'TransformationID': transName, 'FileID': dictOfNewFilesStatus.keys()})
if not res['OK']:
return res
currentStatus = dict((fileDict['FileID'], fileDict['Status']) for fileDict in res['Value'])
for fileID, status in dictOfNewFilesStatus.iteritems():
newStatus = dictOfNewFilesStatus[fileID]
newStatusForFileIDs[fileID] = (newStatus, self._wasFileInError(newStatus, currentStatus[fileID]))
elif isinstance(statusSample, (list, tuple)) and len(statusSample) == 2:
newStatusForFileIDs = dictOfNewFilesStatus
else:
return S_ERROR("Status field should be a string or two values")
res = database._getConnectionTransID(False, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = database.setFileStatusForTransformation(transID, newStatusForFileIDs, connection=connection)
return self._parseRes(res)
types_getTransformationStats = [transTypes]
def export_getTransformationStats(self, transName):
res = database.getTransformationStats(transName)
return self._parseRes(res)
types_getTransformationFilesCount = [transTypes, basestring]
def export_getTransformationFilesCount(self, transName, field, selection={}):
res = database.getTransformationFilesCount(transName, field, selection=selection)
return self._parseRes(res)
types_getTransformationFiles = []
def export_getTransformationFiles(self, condDict=None, older=None, newer=None, timeStamp='LastUpdate',
orderAttribute=None, limit=None, offset=None):
if not condDict:
condDict = {}
res = database.getTransformationFiles(condDict=condDict, older=older, newer=newer, timeStamp=timeStamp,
orderAttribute=orderAttribute, limit=limit, offset=offset,
connection=False)
return self._parseRes(res)
####################################################################
#
# These are the methods to manipulate the TransformationTasks table
#
types_getTransformationTasks = []
def export_getTransformationTasks(self, condDict=None, older=None, newer=None, timeStamp='CreationTime',
orderAttribute=None, limit=None, inputVector=False, offset=None):
if not condDict:
condDict = {}
res = database.getTransformationTasks(condDict=condDict, older=older, newer=newer, timeStamp=timeStamp,
orderAttribute=orderAttribute, limit=limit, inputVector=inputVector,
offset=offset)
return self._parseRes(res)
types_setTaskStatus = [transTypes, [list, int, long], basestring]
def export_setTaskStatus(self, transName, taskID, status):
res = database.setTaskStatus(transName, taskID, status)
return self._parseRes(res)
types_setTaskStatusAndWmsID = [transTypes, [long, int], basestring, basestring]
def export_setTaskStatusAndWmsID(self, transName, taskID, status, taskWmsID):
res = database.setTaskStatusAndWmsID(transName, taskID, status, taskWmsID)
return self._parseRes(res)
types_getTransformationTaskStats = [transTypes]
def export_getTransformationTaskStats(self, transName):
res = database.getTransformationTaskStats(transName)
return self._parseRes(res)
types_deleteTasks = [transTypes, [long, int], [long, int]]
def export_deleteTasks(self, transName, taskMin, taskMax):
credDict = self.getRemoteCredentials()
authorDN = credDict['DN']
# authorDN = self._clientTransport.peerCredentials['DN']
res = database.deleteTasks(transName, taskMin, taskMax, author=authorDN)
return self._parseRes(res)
types_extendTransformation = [transTypes, [long, int]]
def export_extendTransformation(self, transName, nTasks):
credDict = self.getRemoteCredentials()
authorDN = credDict['DN']
# authorDN = self._clientTransport.peerCredentials['DN']
res = database.extendTransformation(transName, nTasks, author=authorDN)
return self._parseRes(res)
types_getTasksToSubmit = [transTypes, [long, int]]
def export_getTasksToSubmit(self, transName, numTasks, site=''):
""" Get information necessary for submission for a given number of tasks for a given transformation """
res = database.getTransformation(transName)
if not res['OK']:
return self._parseRes(res)
transDict = res['Value']
submitDict = {}
res = database.getTasksForSubmission(transName, numTasks=numTasks, site=site, statusList=['Created'])
if not res['OK']:
return self._parseRes(res)
tasksDict = res['Value']
for taskID, taskDict in tasksDict.items():
res = database.reserveTask(transName, long(taskID))
if not res['OK']:
return self._parseRes(res)
else:
submitDict[taskID] = taskDict
transDict['JobDictionary'] = submitDict
return S_OK(transDict)
####################################################################
#
# These are the methods for TransformationMetaQueries table. It replaces methods
# for the old TransformationInputDataQuery table
#
types_createTransformationMetaQuery = [transTypes, dict, basestring]
def export_createTransformationMetaQuery(self, transName, queryDict, queryType):
credDict = self.getRemoteCredentials()
authorDN = credDict['DN']
res = database.createTransformationMetaQuery(transName, queryDict, queryType, author=authorDN)
return self._parseRes(res)
types_deleteTransformationMetaQuery = [transTypes, basestring]
def export_deleteTransformationMetaQuery(self, transName, queryType):
credDict = self.getRemoteCredentials()
authorDN = credDict['DN']
res = database.deleteTransformationMetaQuery(transName, queryType, author=authorDN)
return self._parseRes(res)
types_getTransformationMetaQuery = [transTypes, basestring]
def export_getTransformationMetaQuery(self, transName, queryType):
res = database.getTransformationMetaQuery(transName, queryType)
return self._parseRes(res)
####################################################################
#
# These are the methods for transformation logging manipulation
#
types_getTransformationLogging = [transTypes]
def export_getTransformationLogging(self, transName):
res = database.getTransformationLogging(transName)
return self._parseRes(res)
####################################################################
#
# These are the methods for transformation additional parameters
#
types_getAdditionalParameters = [transTypes]
def export_getAdditionalParameters(self, transName):
res = database.getAdditionalParameters(transName)
return self._parseRes(res)
####################################################################
#
# These are the methods for file manipulation
#
types_getFileSummary = [list]
def export_getFileSummary(self, lfns):
res = database.getFileSummary(lfns)
return self._parseRes(res)
types_addDirectory = [basestring]
def export_addDirectory(self, path, force=False):
res = database.addDirectory(path, force=force)
return self._parseRes(res)
types_exists = [list]
def export_exists(self, lfns):
res = database.exists(lfns)
return self._parseRes(res)
types_addFile = [[list, dict, basestring]]
def export_addFile(self, fileDicts, force=False):
""" Interface provides { LFN1 : { PFN1, SE1, ... }, LFN2 : { PFN2, SE2, ... } }
"""
res = database.addFile(fileDicts, force=force)
return self._parseRes(res)
types_removeFile = [[list, dict]]
def export_removeFile(self, lfns):
""" Interface provides [ LFN1, LFN2, ... ]
"""
if isinstance(lfns, dict):
lfns = lfns.keys()
res = database.removeFile(lfns)
return self._parseRes(res)
types_setMetadata = [basestring, dict]
def export_setMetadata(self, path, querydict):
""" Set metadata to a file or to a directory (path)
"""
res = database.setMetadata(path, querydict)
return self._parseRes(res)
####################################################################
#
# These are the methods used for web monitoring
#
# TODO Get rid of this (talk to Matvey)
types_getDistinctAttributeValues = [basestring, dict]
def export_getDistinctAttributeValues(self, attribute, selectDict):
res = database.getTableDistinctAttributeValues('Transformations', [attribute], selectDict)
if not res['OK']:
return self._parseRes(res)
return S_OK(res['Value'][attribute])
types_getTableDistinctAttributeValues = [basestring, list, dict]
def export_getTableDistinctAttributeValues(self, table, attributes, selectDict):
res = database.getTableDistinctAttributeValues(table, attributes, selectDict)
return self._parseRes(res)
types_getTransformationStatusCounters = []
def export_getTransformationStatusCounters(self):
res = database.getCounters('Transformations', ['Status'], {})
if not res['OK']:
return self._parseRes(res)
statDict = {}
for attrDict, count in res['Value']:
statDict[attrDict['Status']] = count
return S_OK(statDict)
types_getTransformationSummary = []
def export_getTransformationSummary(self):
""" Get the summary of the currently existing transformations """
res = database.getTransformations()
if not res['OK']:
return self._parseRes(res)
transList = res['Value']
resultDict = {}
for transDict in transList:
transID = transDict['TransformationID']
res = database.getTransformationTaskStats(transID)
if not res['OK']:
gLogger.warn('Failed to get job statistics for transformation %d' % transID)
continue
transDict['JobStats'] = res['Value']
res = database.getTransformationStats(transID)
if not res['OK']:
transDict['NumberOfFiles'] = -1
else:
transDict['NumberOfFiles'] = res['Value']['Total']
resultDict[transID] = transDict
return S_OK(resultDict)
types_getTabbedSummaryWeb = [basestring, dict, dict, list, int, int]
def export_getTabbedSummaryWeb(self, table, requestedTables, selectDict, sortList, startItem, maxItems):
tableDestinations = {'Transformations': {'TransformationFiles': ['TransformationID'],
'TransformationTasks': ['TransformationID']},
'TransformationFiles': {'Transformations': ['TransformationID'],
'TransformationTasks': ['TransformationID', 'TaskID']},
'TransformationTasks': {'Transformations': ['TransformationID'],
'TransformationFiles': ['TransformationID', 'TaskID']}}
tableSelections = {'Transformations': ['TransformationID', 'AgentType', 'Type', 'TransformationGroup',
'Plugin'],
'TransformationFiles': ['TransformationID', 'TaskID', 'Status', 'UsedSE', 'TargetSE'],
'TransformationTasks': ['TransformationID', 'TaskID', 'ExternalStatus', 'TargetSE']}
tableTimeStamps = {'Transformations': 'CreationDate',
'TransformationFiles': 'LastUpdate',
'TransformationTasks': 'CreationTime'}
tableStatusColumn = {'Transformations': 'Status',
'TransformationFiles': 'Status',
'TransformationTasks': 'ExternalStatus'}
resDict = {}
res = self.__getTableSummaryWeb(table, selectDict, sortList, startItem, maxItems,
selectColumns=tableSelections[table], timeStamp=tableTimeStamps[table],
statusColumn=tableStatusColumn[table])
if not res['OK']:
gLogger.error("Failed to get Summary for table", "%s %s" % (table, res['Message']))
return self._parseRes(res)
resDict[table] = res['Value']
selections = res['Value']['Selections']
tableSelection = {}
for destination in tableDestinations[table].keys():
tableSelection[destination] = {}
for parameter in tableDestinations[table][destination]:
tableSelection[destination][parameter] = selections.get(parameter, [])
for table, paramDict in requestedTables.items():
sortList = paramDict.get('SortList', [])
startItem = paramDict.get('StartItem', 0)
maxItems = paramDict.get('MaxItems', 50)
res = self.__getTableSummaryWeb(table, tableSelection[table], sortList, startItem, maxItems,
selectColumns=tableSelections[table], timeStamp=tableTimeStamps[table],
statusColumn=tableStatusColumn[table])
if not res['OK']:
gLogger.error("Failed to get Summary for table", "%s %s" % (table, res['Message']))
return self._parseRes(res)
resDict[table] = res['Value']
return S_OK(resDict)
types_getTransformationsSummaryWeb = [dict, list, int, int]
def export_getTransformationsSummaryWeb(self, selectDict, sortList, startItem, maxItems):
return self.__getTableSummaryWeb('Transformations', selectDict, sortList, startItem, maxItems,
selectColumns=['TransformationID', 'AgentType', 'Type', 'Group', 'Plugin'],
timeStamp='CreationDate', statusColumn='Status')
types_getTransformationTasksSummaryWeb = [dict, list, int, int]
def export_getTransformationTasksSummaryWeb(self, selectDict, sortList, startItem, maxItems):
return self.__getTableSummaryWeb('TransformationTasks', selectDict, sortList, startItem, maxItems,
selectColumns=['TransformationID', 'ExternalStatus', 'TargetSE'],
timeStamp='CreationTime', statusColumn='ExternalStatus')
types_getTransformationFilesSummaryWeb = [dict, list, int, int]
def export_getTransformationFilesSummaryWeb(self, selectDict, sortList, startItem, maxItems):
return self.__getTableSummaryWeb('TransformationFiles', selectDict, sortList, startItem, maxItems,
selectColumns=['TransformationID', 'Status', 'UsedSE', 'TargetSE'],
timeStamp='LastUpdate', statusColumn='Status')
def __getTableSummaryWeb(self, table, selectDict, sortList, startItem, maxItems, selectColumns=[],
timeStamp=None, statusColumn='Status'):
fromDate = selectDict.get('FromDate', None)
if fromDate:
del selectDict['FromDate']
# if not fromDate:
# fromDate = last_update
toDate = selectDict.get('ToDate', None)
if toDate:
del selectDict['ToDate']
# Sorting instructions. Only one for the moment.
if sortList:
orderAttribute = sortList[0][0] + ":" + sortList[0][1]
else:
orderAttribute = None
# Get the columns that match the selection
fcn = None
fcnName = "get%s" % table
if hasattr(database, fcnName) and callable(getattr(database, fcnName)):
fcn = getattr(database, fcnName)
if not fcn:
return S_ERROR("Unable to invoke database.%s, it isn't a member function of database" % fcnName)
res = fcn(condDict=selectDict, older=toDate, newer=fromDate, timeStamp=timeStamp,
orderAttribute=orderAttribute)
if not res['OK']:
return self._parseRes(res)
# The full list of columns in contained here
allRows = res['Records']
# Prepare the standard structure now within the resultDict dictionary
resultDict = {}
# Create the total records entry
resultDict['TotalRecords'] = len(allRows)
# Create the ParameterNames entry
resultDict['ParameterNames'] = res['ParameterNames']
# Find which element in the tuple contains the requested status
if statusColumn not in resultDict['ParameterNames']:
return S_ERROR("Provided status column not present")
statusColumnIndex = resultDict['ParameterNames'].index(statusColumn)
# Get the rows which are within the selected window
if resultDict['TotalRecords'] == 0:
return S_OK(resultDict)
ini = startItem
last = ini + maxItems
if ini >= resultDict['TotalRecords']:
return S_ERROR('Item number out of range')
if last > resultDict['TotalRecords']:
last = resultDict['TotalRecords']
selectedRows = allRows[ini:last]
resultDict['Records'] = selectedRows
# Generate the status dictionary
statusDict = {}
for row in selectedRows:
status = row[statusColumnIndex]
statusDict[status] = statusDict.setdefault(status, 0) + 1
resultDict['Extras'] = statusDict
# Obtain the distinct values of the selection parameters
res = database.getTableDistinctAttributeValues(table, selectColumns, selectDict, older=toDate, newer=fromDate)
distinctSelections = zip(selectColumns, [])
if res['OK']:
distinctSelections = res['Value']
resultDict['Selections'] = distinctSelections
return S_OK(resultDict)
types_getTransformationSummaryWeb = [dict, list, int, int]
def export_getTransformationSummaryWeb(self, selectDict, sortList, startItem, maxItems):
""" Get the summary of the transformation information for a given page in the generic format """
# Obtain the timing information from the selectDict
last_update = selectDict.get('CreationDate', None)
if last_update:
del selectDict['CreationDate']
fromDate = selectDict.get('FromDate', None)
if fromDate:
del selectDict['FromDate']
if not fromDate:
fromDate = last_update
toDate = selectDict.get('ToDate', None)
if toDate:
del selectDict['ToDate']
# Sorting instructions. Only one for the moment.
if sortList:
orderAttribute = []
for i in sortList:
orderAttribute += [i[0] + ":" + i[1]]
else:
orderAttribute = None
# Get the transformations that match the selection
res = database.getTransformations(condDict=selectDict, older=toDate, newer=fromDate,
orderAttribute=orderAttribute)
if not res['OK']:
return self._parseRes(res)
ops = Operations()
# Prepare the standard structure now within the resultDict dictionary
resultDict = {}
trList = res['Records']
# Create the total records entry
nTrans = len(trList)
resultDict['TotalRecords'] = nTrans
# Create the ParameterNames entry
# As this list is a reference to the list in the DB, we cannot extend it, therefore copy it
resultDict['ParameterNames'] = list(res['ParameterNames'])
# Add the job states to the ParameterNames entry
taskStateNames = TASKS_STATE_NAMES + ops.getValue('Transformations/AdditionalTaskStates', [])
resultDict['ParameterNames'] += ['Jobs_' + x for x in taskStateNames]
# Add the file states to the ParameterNames entry
fileStateNames = FILES_STATE_NAMES + ops.getValue('Transformations/AdditionalFileStates', [])
resultDict['ParameterNames'] += ['Files_' + x for x in fileStateNames]
# Get the transformations which are within the selected window
if nTrans == 0:
return S_OK(resultDict)
ini = startItem
last = ini + maxItems
if ini >= nTrans:
return S_ERROR('Item number out of range')
if last > nTrans:
last = nTrans
transList = trList[ini:last]
statusDict = {}
extendableTranfs = ops.getValue('Transformations/ExtendableTransfTypes',
['Simulation', 'MCsimulation'])
givenUpFileStatus = ops.getValue('Transformations/GivenUpFileStatus',
['MissingInFC'])
problematicStatuses = ops.getValue('Transformations/ProblematicStatuses',
['Problematic'])
# Add specific information for each selected transformation
for trans in transList:
transDict = dict(zip(resultDict['ParameterNames'], trans))
# Update the status counters
status = transDict['Status']
statusDict[status] = statusDict.setdefault(status, 0) + 1
# Get the statistics on the number of jobs for the transformation
transID = transDict['TransformationID']
res = database.getTransformationTaskStats(transID)
taskDict = {}
if res['OK'] and res['Value']:
taskDict = res['Value']
for state in taskStateNames:
trans.append(taskDict.get(state, 0))
# Get the statistics for the number of files for the transformation
fileDict = {}
transType = transDict['Type']
if transType.lower() in extendableTranfs:
fileDict['PercentProcessed'] = '-'
else:
res = database.getTransformationStats(transID)
if res['OK']:
fileDict = res['Value']
total = fileDict['Total']
for stat in givenUpFileStatus:
total -= fileDict.get(stat, 0)
processed = fileDict.get('Processed', 0)
fileDict['PercentProcessed'] = "%.1f" % (int(processed * 1000. / total) / 10.) if total else 0.
problematic = 0
for stat in problematicStatuses:
problematic += fileDict.get(stat, 0)
fileDict['Problematic'] = problematic
for state in fileStateNames:
trans.append(fileDict.get(state, 0))
resultDict['Records'] = transList
resultDict['Extras'] = statusDict
return S_OK(resultDict)
###########################################################################
def initializeTransformationManagerHandler(serviceInfo):
global database
database = TransformationDB('TransformationDB', 'Transformation/TransformationDB')
return S_OK()
| petricm/DIRAC | TransformationSystem/Service/TransformationManagerHandler.py | Python | gpl-3.0 | 29,763 | [
"DIRAC"
] | 6d2e396248b73504bb668c91f049f819756816d432a6b3c9601b8790e9d9d60d |
# -*- coding: utf-8 -*-
import numpy as np
from pgmpy.factors.distributions import BaseDistribution
from pgmpy.factors.distributions import GaussianDistribution
class CanonicalDistribution(BaseDistribution):
"""
The intermediate factors in a Gaussian network can be described
compactly using a simple parametric representation called the
canonical form. This representation is closed under the basic
operations used in inference: factor product, factor division,
factor reduction, and marginalization. Thus, we define this
CanonicalDistribution class that allows the inference process to be
performed on joint Gaussian networks.
A canonical form C (X; K,h,g) is defined as
C (X; K,h,g) = exp( ((-1/2) * X.T * K * X) + (h.T * X) + g)
References
----------
Probabilistic Graphical Models, Principles and Techniques,
Daphne Koller and Nir Friedman, Section 14.2, Chapter 14.
"""
def __init__(self, variables, K, h, g):
"""
Parameters
----------
variables: list or array-like
The variables for wich the distribution is defined.
K: n x n, 2-d array-like
h : n x 1, array-like
g : int, float
pdf: function
The probability density function of the distribution.
The terms K, h and g are defined parameters for canonical
factors representation.
Examples
--------
>>> from pgmpy.factors.continuous import CanonicalDistribution
>>> phi = CanonicalDistribution(['X', 'Y'], np.array([[1, -1], [-1, 1]]),
np.array([[1], [-1]]), -3)
>>> phi.variables
['X', 'Y']
>>> phi.K
array([[1, -1],
[-1, 1]])
>>> phi.h
array([[1],
[-1]])
>>> phi.g
-3
"""
no_of_var = len(variables)
if len(h) != no_of_var:
raise ValueError(
"Length of h parameter vector must be equal to "
"the number of variables."
)
self.variables = variables
self.h = np.asarray(np.reshape(h, (no_of_var, 1)), dtype=float)
self.g = g
self.K = np.asarray(K, dtype=float)
if self.K.shape != (no_of_var, no_of_var):
raise ValueError(
f"The K matrix should be a square matrix with order equal to the number of variables. Got: {self.K.shape}, Expected: {(no_of_var, no_of_var)}"
)
@property
def pdf(self):
def fun(*args):
x = np.array(args)
return np.exp(
self.g + np.dot(x, self.h)[0] - 0.5 * np.dot(x.T, np.dot(self.K, x))
)
return fun
def assignment(self, *x):
"""
Returns the probability value of the PDF at the given parameter values.
Parameters
----------
*x: values of all variables of this distribution,
collective defining a point at which the probability value is to be computed.
Returns
-------
float: The probability value at the point.
Examples
--------
>>> from pgmpy.factors.distributions import GaussianDistribution
>>> dist = GaussianDistribution(variables=['x1', 'x2'],
... mean=[[0], [0]],
... cov=[[1, 0], [0, 1]])
>>> dist.assignment(0, 0)
0.15915494309189535
"""
return self.pdf(*x)
def copy(self):
"""
Makes a copy of the factor.
Returns
-------
CanonicalDistribution object: Copy of the factor
Examples
--------
>>> from pgmpy.factors.continuous import CanonicalDistribution
>>> phi = CanonicalDistribution(['X', 'Y'], np.array([[1, -1], [-1, 1]]),
np.array([[1], [-1]]), -3)
>>> phi.variables
['X', 'Y']
>>> phi.K
array([[1, -1],
[-1, 1]])
>>> phi.h
array([[1],
[-1]])
>>> phi.g
-3
>>> phi2 = phi.copy()
>>> phi2.variables
['X', 'Y']
>>> phi2.K
array([[1, -1],
[-1, 1]])
>>> phi2.h
array([[1],
[-1]])
>>> phi2.g
-3
"""
copy_factor = CanonicalDistribution(
self.variables, self.K.copy(), self.h.copy(), self.g
)
return copy_factor
def to_joint_gaussian(self):
"""
Return an equivalent Joint Gaussian Distribution.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import CanonicalDistribution
>>> phi = CanonicalDistribution(['x1', 'x2'], np.array([[3, -2], [-2, 4]]),
np.array([[5], [-1]]), 1)
>>> jgd = phi.to_joint_gaussian()
>>> jgd.variables
['x1', 'x2']
>>> jgd.covariance
array([[ 0.5 , 0.25 ],
[ 0.25 , 0.375]])
>>> jgd.mean
array([[ 2.25 ],
[ 0.875]])
"""
covariance = np.linalg.inv(self.K)
mean = np.dot(covariance, self.h)
return GaussianDistribution(self.variables, mean, covariance)
def reduce(self, values, inplace=True):
"""
Reduces the distribution to the context of the given variable values.
Let C(X,Y ; K, h, g) be some canonical form over X,Y where,
k = [[K_XX, K_XY], ; h = [[h_X],
[K_YX, K_YY]] [h_Y]]
The formula for the obtained conditional distribution for setting
Y = y is given by,
.. math:: K' = K_{XX}
.. math:: h' = h_X - K_{XY} * y
.. math:: g' = g + {h^T}_Y * y - 0.5 * y^T * K_{YY} * y
Parameters
----------
values: list, array-like
A list of tuples of the form (variable name, variable value).
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new CanonicalFactor object.
Returns
-------
CanonicalDistribution or None:
if inplace=True (default) returns None
if inplace=False returns a new CanonicalDistribution instance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import CanonicalDistribution
>>> phi = CanonicalDistribution(['X1', 'X2', 'X3'],
... np.array([[1, -1, 0], [-1, 4, -2], [0, -2, 4]]),
... np.array([[1], [4], [-1]]), -2)
>>> phi.variables
['X1', 'X2', 'X3']
>>> phi.K
array([[ 1., -1.],
[-1., 3.]])
>>> phi.h
array([[ 1. ],
[ 3.5]])
>>> phi.g
-2
>>> phi.reduce([('X3', 0.25)])
>>> phi.variables
['X1', 'X2']
>>> phi.K
array([[ 1, -1],
[-1, 4]])
>>> phi.h
array([[ 1. ],
[ 4.5]])
>>> phi.g
-2.375
"""
if not isinstance(values, (list, tuple, np.ndarray)):
raise TypeError(
f"variables: Expected type list or array-like, got type {type(values)}"
)
if not all([var in self.variables for var, value in values]):
raise ValueError("Variable not in scope.")
phi = self if inplace else self.copy()
var_to_reduce = [var for var, value in values]
# index_to_keep -> j vector
index_to_keep = [
self.variables.index(var)
for var in self.variables
if var not in var_to_reduce
]
# index_to_reduce -> i vector
index_to_reduce = [self.variables.index(var) for var in var_to_reduce]
K_i_i = self.K[np.ix_(index_to_keep, index_to_keep)]
K_i_j = self.K[np.ix_(index_to_keep, index_to_reduce)]
K_j_j = self.K[np.ix_(index_to_reduce, index_to_reduce)]
h_i = self.h[index_to_keep]
h_j = self.h[index_to_reduce]
# The values for the reduced variables.
y = np.array([value for var, value in values]).reshape(len(index_to_reduce), 1)
phi.variables = [self.variables[index] for index in index_to_keep]
phi.K = K_i_i
phi.h = h_i - np.dot(K_i_j, y)
phi.g = (
self.g + (np.dot(h_j.T, y) - (0.5 * np.dot(np.dot(y.T, K_j_j), y)))[0][0]
)
if not inplace:
return phi
def marginalize(self, variables, inplace=True):
"""
Modifies the factor with marginalized values.
Let C(X,Y ; K, h, g) be some canonical form over X,Y where,
k = [[K_XX, K_XY], ; h = [[h_X],
[K_YX, K_YY]] [h_Y]]
In this case, the result of the integration operation is a canonical
from C (K', h', g') given by,
.. math:: K' = K_{XX} - K_{XY} * {K^{-1}}_{YY} * K_YX
.. math:: h' = h_X - K_{XY} * {K^{-1}}_{YY} * h_Y
.. math:: g' = g + 0.5 * (|Y| * log(2*pi) - log(|K_{YY}|) + {h^T}_Y * K_{YY} * h_Y)
Parameters
----------
variables: list or array-like
List of variables over which to marginalize.
inplace: boolean
If inplace=True it will modify the distribution itself,
else would return a new distribution.
Returns
-------
CanonicalDistribution or None :
if inplace=True (default) returns None
if inplace=False return a new CanonicalDistribution instance
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import CanonicalDistribution
>>> phi = CanonicalDistribution(['X1', 'X2', 'X3'],
... np.array([[1, -1, 0], [-1, 4, -2], [0, -2, 4]]),
... np.array([[1], [4], [-1]]), -2)
>>> phi.K
array([[ 1, -1, 0],
[-1, 4, -2],
[ 0, -2, 4]])
>>> phi.h
array([[ 1],
[ 4],
[-1]])
>>> phi.g
-2
>>> phi.marginalize(['X3'])
>>> phi.K
array([[ 1., -1.],
[-1., 3.]])
>>> phi.h
array([[ 1. ],
[ 3.5]])
>>> phi.g
0.22579135
"""
if not isinstance(variables, (list, tuple, np.ndarray)):
raise TypeError(
f"variables: Expected type list or array-like, got type {type(variables)}"
)
if not all([var in self.variables for var in variables]):
raise ValueError("Variable not in scope.")
phi = self if inplace else self.copy()
# index_to_keep -> i vector
index_to_keep = [
self.variables.index(var) for var in self.variables if var not in variables
]
# index_to_marginalize -> j vector
index_to_marginalize = [self.variables.index(var) for var in variables]
K_i_i = self.K[np.ix_(index_to_keep, index_to_keep)]
K_i_j = self.K[np.ix_(index_to_keep, index_to_marginalize)]
K_j_i = self.K[np.ix_(index_to_marginalize, index_to_keep)]
K_j_j = self.K[np.ix_(index_to_marginalize, index_to_marginalize)]
K_j_j_inv = np.linalg.inv(K_j_j)
h_i = self.h[index_to_keep]
h_j = self.h[index_to_marginalize]
phi.variables = [self.variables[index] for index in index_to_keep]
phi.K = K_i_i - np.dot(np.dot(K_i_j, K_j_j_inv), K_j_i)
phi.h = h_i - np.dot(np.dot(K_i_j, K_j_j_inv), h_j)
phi.g = (
self.g
+ 0.5
* (
len(variables) * np.log(2 * np.pi)
- np.log(abs(np.linalg.det(K_j_j)))
+ np.dot(np.dot(h_j.T, K_j_j), h_j)
)[0][0]
)
if not inplace:
return phi
def _operate(self, other, operation, inplace=True):
"""
Gives the CanonicalDistribution operation (product or divide) with
the other factor.
The product of two canonical factors over the same scope
X is simply:
C(K1, h1, g1) * C(K2, h2, g2) = C(K1+K2, h1+h2, g1+g2)
The division of canonical forms is defined analogously:
C(K1, h1, g1) / C(K2, h2, g2) = C(K1-K2, h1-h2, g1- g2)
When we have two canonical factors over different scopes X and Y,
we simply extend the scope of both to make their scopes match and
then perform the operation of the above equation. The extension of
the scope is performed by simply adding zero entries to both the K
matrices and the h vectors.
Parameters
----------
other: CanonicalFactor
The CanonicalDistribution to be multiplied.
operation: String
'product' for multiplication operation and
'divide' for division operation.
Returns
-------
CanonicalDistribution or None:
if inplace=True (default) returns None
if inplace=False returns a new CanonicalDistribution instance.
Example
-------
>>> import numpy as np
>>> from pgmpy.factors.continuous import CanonicalDistribution
>>> phi1 = CanonicalDistribution(['x1', 'x2', 'x3'],
np.array([[1, -1, 0], [-1, 4, -2], [0, -2, 4]]),
np.array([[1], [4], [-1]]), -2)
>>> phi2 = CanonicalDistribution(['x1', 'x2'], np.array([[3, -2], [-2, 4]]),
np.array([[5], [-1]]), 1)
>>> phi3 = phi1 * phi2
>>> phi3.K
array([[ 4., -3., 0.],
[-3., 8., -2.],
[ 0., -2., 4.]])
>>> phi3.h
array([ 6., 3., -1.])
>>> phi3.g
-1
>>> phi4 = phi1 / phi2
>>> phi4.K
array([[-2., 1., 0.],
[ 1., 0., -2.],
[ 0., -2., 4.]])
>>> phi4.h
array([-4., 5., -1.])
>>> phi4.g
-3
"""
if not isinstance(other, CanonicalDistribution):
raise TypeError(
f"CanonicalDistribution object can only be multiplied or divided with an another CanonicalDistribution object. Got {type(other)}, expected CanonicalDistribution."
)
phi = self if inplace else self.copy()
all_vars = self.variables + [
var for var in other.variables if var not in self.variables
]
no_of_var = len(all_vars)
self_var_index = [all_vars.index(var) for var in self.variables]
other_var_index = [all_vars.index(var) for var in other.variables]
def _extend_K_scope(K, index):
ext_K = np.zeros([no_of_var, no_of_var])
ext_K[np.ix_(index, index)] = K
return ext_K
def _extend_h_scope(h, index):
ext_h = np.zeros(no_of_var).reshape(no_of_var, 1)
ext_h[index] = h
return ext_h
phi.variables = all_vars
if operation == "product":
phi.K = _extend_K_scope(self.K, self_var_index) + _extend_K_scope(
other.K, other_var_index
)
phi.h = _extend_h_scope(self.h, self_var_index) + _extend_h_scope(
other.h, other_var_index
)
phi.g = self.g + other.g
else:
phi.K = _extend_K_scope(self.K, self_var_index) - _extend_K_scope(
other.K, other_var_index
)
phi.h = _extend_h_scope(self.h, self_var_index) - _extend_h_scope(
other.h, other_var_index
)
phi.g = self.g - other.g
if not inplace:
return phi
def product(self, other, inplace=True):
"""
Returns the product of two gaussian distributions.
Parameters
----------
other: CanonicalFactor
The GaussianDistribution to be multiplied.
inplace: boolean
If True, modifies the distribution itself, otherwise returns a new
CanonicalDistribution object.
Returns
-------
CanonicalDistribution or None:
if inplace=True (default) returns None.
if inplace=False returns a new CanonicalDistribution instance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> dis1 = GD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> dis2 = GD(['x3', 'x4'], [1, 2], [[2, 3], [5, 6]])
>>> dis3 = dis1.product(dis2, inplace=False)
>>> dis3.covariance
array([[ 3.6, 1. , -0.4, -0.6],
[ 1. , 2.5, -1. , -1.5],
[-0.4, -1. , 1.6, 2.4],
[-1. , -2.5, 4. , 4.5]])
>>> dis3.mean
array([[ 1.6],
[-1.5],
[ 1.6],
[ 3.5]])
"""
return self._operate(other, operation="product", inplace=inplace)
def divide(self, other, inplace=True):
"""
Returns the division of two gaussian distributions.
Parameters
----------
other: GaussianDistribution
The GaussianDistribution to be divided.
inplace: boolean
If True, modifies the distribution itself, otherwise returns a new
GaussianDistribution object.
Returns
-------
CanonicalDistribution or None:
if inplace=True (default) returns None.
if inplace=False returns a new CanonicalDistribution instance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> dis1 = GD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> dis2 = GD(['x3', 'x4'], [1, 2], [[2, 3], [5, 6]])
>>> dis3 = dis1.divide(dis2, inplace=False)
>>> dis3.covariance
array([[ 3.6, 1. , -0.4, -0.6],
[ 1. , 2.5, -1. , -1.5],
[-0.4, -1. , 1.6, 2.4],
[-1. , -2.5, 4. , 4.5]])
>>> dis3.mean
array([[ 1.6],
[-1.5],
[ 1.6],
[ 3.5]])
"""
return self._operate(other, operation="divide", inplace=inplace)
def __mul__(self, other):
return self.product(other, inplace=False)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.divide(other, inplace=False)
__div__ = __truediv__
| pgmpy/pgmpy | pgmpy/factors/distributions/CanonicalDistribution.py | Python | mit | 19,128 | [
"Gaussian"
] | 65d181a7da87469e7179abc773d02f147a8d45e9e61653a6c2daad19450eddfb |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Alexander Yang
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import numpy as np
import mdtraj
from mdtraj import io
from mdtraj.testing import eq
import pytest
def test_read(get_fn):
filename = get_fn('out.gsd')
traj = mdtraj.load(filename)
assert len(traj) == 1000
assert traj.top.n_atoms == 80
assert traj.top.n_bonds == 70
assert traj.top.atom(0).name == 'opls_135'
assert traj.top.atom(1).name == 'opls_140'
def test_read_start(get_fn):
filename = get_fn('out.gsd')
traj = mdtraj.load(filename)
other = mdtraj.load(filename, start=2)
eq(traj[2].xyz, other[0].xyz)
eq(traj[2].unitcell_lengths, other[0].unitcell_lengths)
assert traj.top == other.top
def test_read_frame(get_fn):
filename = get_fn('out.gsd')
traj = mdtraj.load(filename)
other = mdtraj.load(filename, frame=2)
eq(traj[2].xyz, other[0].xyz)
eq(traj[2].unitcell_lengths, other[0].unitcell_lengths)
assert traj.top == other.top
def test_read_stride(get_fn):
filename = get_fn('out.gsd')
traj = mdtraj.load(filename)
other = mdtraj.load(filename, stride=10)
eq(traj[0].xyz, other[0].xyz)
eq(traj[0].unitcell_lengths, other[0].unitcell_lengths)
eq(traj[10].xyz, other[1].xyz)
eq(traj[10].unitcell_lengths, other[1].unitcell_lengths)
assert traj.top == other.top
def test_read_variable_top_error(get_fn):
filename = get_fn('variable_top.gsd')
with pytest.raises(IOError):
traj = mdtraj.load(filename)
def test_write(get_fn, tmpdir):
filename = get_fn('out.gsd')
traj = mdtraj.load(filename)
fn = '{}/compare.gsd'.format(tmpdir)
traj.save(fn)
other = mdtraj.load(fn)
assert traj.top == other.top
eq(other.xyz, traj.xyz)
eq(other.unitcell_lengths, traj.unitcell_lengths)
def test_write_frame(get_fn, tmpdir):
filename = get_fn('out.gsd')
traj = mdtraj.load(filename)
fn = '{}/compare.gsd'.format(tmpdir)
traj[2].save(fn)
other = mdtraj.load(fn)
assert traj.top == other.top
eq(traj[2].xyz, other.xyz)
eq(traj[2].unitcell_lengths, other.unitcell_lengths)
| dwhswenson/mdtraj | tests/test_gsd.py | Python | lgpl-2.1 | 3,075 | [
"MDTraj"
] | 563b87a0aec86d87f22f6c5b51ca97a18302c7e94620b118631fb5c48ab30bef |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from trumptweets.tweets import views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('trumptweets.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^api/tweetloader$', views.tweet_loader, name='tweet_loader'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| Patrick-and-Michael/trumptweets | config/urls.py | Python | mit | 1,729 | [
"VisIt"
] | b0452bdd124b7896049da4f03b0856960fc9e0c34da92bbeea2938398d5fde06 |
from collections import defaultdict, deque
import random
import re
from typing import Any, Text, List, Dict, Optional, TYPE_CHECKING
from rasa.core.actions.action import ACTION_LISTEN_NAME
from rasa.core.domain import Domain
from rasa.core.events import UserUttered, ActionExecuted, Event
from rasa.core.interpreter import RegexInterpreter, NaturalLanguageInterpreter
from rasa.core.training.generator import TrainingDataGenerator
from rasa.core.training.structures import StoryGraph, StoryStep
if TYPE_CHECKING:
from rasa_nlu.training_data import TrainingData, Message
import networkx
EDGE_NONE_LABEL = "NONE"
START_NODE_ID = 0
END_NODE_ID = -1
TMP_NODE_ID = -2
VISUALIZATION_TEMPLATE_PATH = '/visualization.html'
class UserMessageGenerator(object):
def __init__(self, nlu_training_data):
self.nlu_training_data = nlu_training_data
self.mapping = self._create_reverse_mapping(self.nlu_training_data)
@staticmethod
def _create_reverse_mapping(
data: "TrainingData"
) -> Dict[Dict[Text, Any], List["Message"]]:
"""Create a mapping from intent to messages
This allows a faster intent lookup."""
d = defaultdict(list)
for example in data.training_examples:
if example.get("intent", {}) is not None:
d[example.get("intent", {})].append(example)
return d
@staticmethod
def _contains_same_entity(entities, e):
return (entities.get(e.get("entity")) is None or
entities.get(e.get("entity")) != e.get("value"))
def message_for_data(self, structured_info):
"""Find a data sample with the same intent and entities.
Given the parsed data from a message (intent and entities) finds a
message in the data that has the same intent and entities."""
if structured_info.get("intent") is not None:
intent_name = structured_info.get("intent", {}).get("name")
usable_examples = self.mapping.get(intent_name, [])[:]
random.shuffle(usable_examples)
for example in usable_examples:
entities = {e.get("entity"): e.get("value")
for e in example.get("entities", [])}
for e in structured_info.get("entities", []):
if self._contains_same_entity(entities, e):
break
else:
return example.text
return structured_info.get("text")
def _fingerprint_node(graph, node, max_history):
"""Fingerprint a node in a graph.
Can be used to identify nodes that are similar and can be merged within the
graph.
Generates all paths starting at `node` following the directed graph up to
the length of `max_history`, and returns a set of strings describing the
found paths. If the fingerprint creation for two nodes results in the same
sets these nodes are indistinguishable if we walk along the path and only
remember max history number of nodes we have visited. Hence, if we randomly
walk on our directed graph, always only remembering the last `max_history`
nodes we have visited, we can never remember if we have visited node A or
node B if both have the same fingerprint. """
# the candidate list contains all node paths that haven't been
# extended till `max_history` length yet.
candidates = deque()
candidates.append([node])
continuations = []
while len(candidates) > 0:
candidate = candidates.pop()
last = candidate[-1]
empty = True
for _, succ_node in graph.out_edges(last):
next_candidate = candidate[:]
next_candidate.append(succ_node)
# if the path is already long enough, we add it to the results,
# otherwise we add it to the candidates
# that we still need to visit
if len(next_candidate) == max_history:
continuations.append(next_candidate)
else:
candidates.append(next_candidate)
empty = False
if empty:
continuations.append(candidate)
return {" - ".join([graph.node[node]["label"]
for node in continuation])
for continuation in continuations}
def _incoming_edges(graph, node):
return {(prev_node, k)
for prev_node, _, k in graph.in_edges(node, keys=True)}
def _outgoing_edges(graph, node):
return {(succ_node, k)
for _, succ_node, k in graph.out_edges(node, keys=True)}
def _outgoing_edges_are_similar(graph, node_a, node_b):
"""If the outgoing edges from the two nodes are similar enough,
it doesn't matter if you are in a or b.
As your path will be the same because the outgoing edges will lead you to
the same nodes anyways."""
ignored = {node_b, node_a}
a_edges = {(target, k) for target, k in _outgoing_edges(graph, node_a) if
target not in ignored}
b_edges = {(target, k) for target, k in _outgoing_edges(graph, node_b) if
target not in ignored}
return a_edges == b_edges or not a_edges or not b_edges
def _nodes_are_equivalent(graph, node_a, node_b, max_history):
"""Decides if two nodes are equivalent based on their fingerprints."""
return (graph.node[node_a]["label"] == graph.node[node_b]["label"] and
(_outgoing_edges_are_similar(graph, node_a, node_b) or
_incoming_edges(graph, node_a) == _incoming_edges(graph, node_b) or
_fingerprint_node(graph, node_a, max_history) ==
_fingerprint_node(graph, node_b, max_history)))
def _add_edge(graph, u, v, key, label=None, **kwargs):
"""Adds an edge to the graph if the edge is not already present. Uses the
label as the key."""
if key is None:
key = EDGE_NONE_LABEL
if key == EDGE_NONE_LABEL:
label = ""
if not graph.has_edge(u, v, key=EDGE_NONE_LABEL):
graph.add_edge(u, v, key=key, label=label, **kwargs)
else:
d = graph.get_edge_data(u, v, key=EDGE_NONE_LABEL)
_transfer_style(kwargs, d)
def _transfer_style(source, target):
"""Copy over class names from source to target for all special classes.
Used if a node is highlighted and merged with another node."""
clazzes = source.get("class", "")
special_classes = {"dashed", "active"}
if "class" not in target:
target["class"] = ""
for c in special_classes:
if c in clazzes and c not in target["class"]:
target["class"] += " " + c
target["class"] = target["class"].strip()
return target
def _merge_equivalent_nodes(graph, max_history):
"""Searches for equivalent nodes in the graph and merges them."""
changed = True
# every node merge changes the graph and can trigger previously
# impossible node merges - we need to repeat until
# the graph doesn't change anymore
while changed:
changed = False
remaining_node_ids = [n for n in graph.nodes() if n > 0]
for idx, i in enumerate(remaining_node_ids):
if graph.has_node(i):
# assumes node equivalence is cumulative
for j in remaining_node_ids[idx + 1:]:
if (graph.has_node(j) and
_nodes_are_equivalent(graph, i, j, max_history)):
# make sure we keep special styles
_transfer_style(graph.nodes(data=True)[j],
graph.nodes(data=True)[i])
changed = True
# moves all outgoing edges to the other node
j_outgoing_edges = list(graph.out_edges(j, keys=True,
data=True))
for _, succ_node, k, d in j_outgoing_edges:
_add_edge(graph, i, succ_node, k, d.get("label"),
**{"class": d.get("class", "")})
graph.remove_edge(j, succ_node)
# moves all incoming edges to the other node
j_incoming_edges = list(graph.in_edges(j, keys=True,
data=True))
for prev_node, _, k, d in j_incoming_edges:
_add_edge(graph, prev_node, i, k, d.get("label"),
**{"class": d.get("class", "")})
graph.remove_edge(prev_node, j)
graph.remove_node(j)
async def _replace_edge_labels_with_nodes(graph,
next_id,
interpreter,
nlu_training_data):
"""User messages are created as edge labels. This removes the labels and
creates nodes instead.
The algorithms (e.g. merging) are simpler if the user messages are labels
on the edges. But it sometimes
looks better if in the final graphs the user messages are nodes instead
of edge labels."""
if nlu_training_data:
message_generator = UserMessageGenerator(nlu_training_data)
else:
message_generator = None
edges = list(graph.edges(keys=True, data=True))
for s, e, k, d in edges:
if k != EDGE_NONE_LABEL:
if message_generator and d.get("label", k) is not None:
parsed_info = await interpreter.parse(d.get("label", k))
label = message_generator.message_for_data(parsed_info)
else:
label = d.get("label", k)
next_id += 1
graph.remove_edge(s, e, k)
graph.add_node(next_id,
label=label,
shape="rect",
style="filled",
fillcolor="lightblue",
**_transfer_style(d, {"class": "intent"}))
graph.add_edge(s, next_id, **{"class": d.get("class", "")})
graph.add_edge(next_id, e, **{"class": d.get("class", "")})
def visualization_html_path():
import pkg_resources
return pkg_resources.resource_filename(__name__,
VISUALIZATION_TEMPLATE_PATH)
def persist_graph(graph, output_file):
"""Plots the graph and persists it into a html file."""
import networkx as nx
expg = nx.nx_pydot.to_pydot(graph)
with open(visualization_html_path(), 'r') as file:
template = file.read()
# customize content of template by replacing tags
template = template.replace('// { is-client }', 'isClient = true', 1)
template = template.replace('// { graph-content }', "graph = `{}`"
.format(expg.to_string()), 1)
with open(output_file, 'w') as file:
file.write(template)
def _length_of_common_action_prefix(this: List[Event],
other: List[Event]) -> int:
"""Calculate number of actions that two conversations have in common."""
num_common_actions = 0
t_cleaned = [e for e in this if e.type_name in {"user", "action"}]
o_cleaned = [e for e in other if e.type_name in {"user", "action"}]
for i, e in enumerate(t_cleaned):
if i == len(o_cleaned):
break
elif e.type_name == "user" and o_cleaned[i].type_name == "user":
continue
elif (e.type_name == "action" and
o_cleaned[i].type_name == "action" and
o_cleaned[i].action_name == e.action_name):
num_common_actions += 1
else:
break
return num_common_actions
def _add_default_nodes(graph: 'networkx.MultiDiGraph',
fontsize: int = 12) -> None:
"""Add the standard nodes we need."""
graph.add_node(START_NODE_ID,
label="START",
fillcolor="green", style="filled", fontsize=fontsize,
**{"class": "start active"})
graph.add_node(END_NODE_ID,
label="END",
fillcolor="red", style="filled", fontsize=fontsize,
**{"class": "end"})
graph.add_node(TMP_NODE_ID,
label="TMP",
style="invis",
**{"class": "invisible"})
def _create_graph(fontsize: int = 12) -> 'networkx.MultiDiGraph':
"""Create a graph and adds the default nodes."""
import networkx as nx
graph = nx.MultiDiGraph()
_add_default_nodes(graph, fontsize)
return graph
def sanitize(s):
if s:
return re.sub(r"""^[a-zA-Z0-9\s_-]""", "", s)
else:
return s
def _add_message_edge(graph: 'networkx.MultiDiGraph',
message: Dict[Text, Any],
current_node: int,
next_node_idx: int,
is_current: bool
):
"""Create an edge based on the user message."""
if message:
message_key = sanitize(message.get("intent", {}).get("name", None))
message_label = sanitize(message.get("text", None))
else:
message_key = None
message_label = None
_add_edge(graph, current_node, next_node_idx, message_key,
message_label,
**{"class": "active" if is_current else ""})
async def visualize_neighborhood(
current: Optional[List[Event]],
event_sequences: List[List[Event]],
output_file: Optional[Text] = None,
max_history: int = 2,
interpreter: NaturalLanguageInterpreter = RegexInterpreter(),
nlu_training_data: Optional["TrainingData"] = None,
should_merge_nodes: bool = True,
max_distance: int = 1,
fontsize: int = 12
):
"""Given a set of event lists, visualizing the flows."""
graph = _create_graph(fontsize)
_add_default_nodes(graph)
next_node_idx = START_NODE_ID
special_node_idx = -3
path_ellipsis_ends = set()
for events in event_sequences:
if current and max_distance:
prefix = _length_of_common_action_prefix(current, events)
else:
prefix = len(events)
message = None
current_node = START_NODE_ID
idx = 0
is_current = events == current
for idx, el in enumerate(events):
if not prefix:
idx -= 1
break
if isinstance(el, UserUttered):
if not el.intent:
message = await interpreter.parse(el.text)
else:
message = el.parse_data
elif (isinstance(el, ActionExecuted) and
el.action_name != ACTION_LISTEN_NAME):
next_node_idx += 1
graph.add_node(next_node_idx,
label=el.action_name,
fontsize=fontsize,
**{"class": "active" if is_current else ""})
_add_message_edge(graph, message, current_node, next_node_idx,
is_current)
current_node = next_node_idx
message = None
prefix -= 1
# determine what the end node of the conversation is going to be
# this can either be an ellipsis "...", the conversation end node
# "END" or a "TMP" node if this is the active conversation
if is_current:
if (isinstance(events[idx], ActionExecuted) and
events[idx].action_name == ACTION_LISTEN_NAME):
next_node_idx += 1
graph.add_node(next_node_idx,
label=" ? " if not message else
sanitize(message.get("intent", {}))
.get("name", " ? "),
shape="rect",
**{"class": "intent dashed active"})
target = next_node_idx
elif current_node:
d = graph.nodes(data=True)[current_node]
d["class"] = "dashed active"
target = TMP_NODE_ID
else:
target = TMP_NODE_ID
elif idx == len(events) - 1:
target = END_NODE_ID
elif current_node and current_node not in path_ellipsis_ends:
graph.add_node(special_node_idx, label="...",
**{"class": "ellipsis"})
target = special_node_idx
path_ellipsis_ends.add(current_node)
special_node_idx -= 1
else:
target = END_NODE_ID
_add_message_edge(graph, message, current_node, target, is_current)
if should_merge_nodes:
_merge_equivalent_nodes(graph, max_history)
await _replace_edge_labels_with_nodes(graph, next_node_idx, interpreter,
nlu_training_data)
_remove_auxiliary_nodes(graph, special_node_idx)
if output_file:
persist_graph(graph, output_file)
return graph
def _remove_auxiliary_nodes(graph: 'networkx.MultiDiGraph',
special_node_idx: int) -> None:
"""Remove any temporary or unused nodes."""
graph.remove_node(TMP_NODE_ID)
if not len(list(graph.predecessors(END_NODE_ID))):
graph.remove_node(END_NODE_ID)
# remove duplicated "..." nodes after merging
ps = set()
for i in range(special_node_idx + 1, TMP_NODE_ID):
for pred in list(graph.predecessors(i)):
if pred in ps:
graph.remove_node(i)
else:
ps.add(pred)
async def visualize_stories(
story_steps: List[StoryStep],
domain: Domain,
output_file: Optional[Text],
max_history: int,
interpreter: NaturalLanguageInterpreter = RegexInterpreter(),
nlu_training_data: Optional["TrainingData"] = None,
should_merge_nodes: bool = True,
fontsize: int = 12,
silent: bool = False
):
"""Given a set of stories, generates a graph visualizing the flows in the
stories.
Visualization is always a trade off between making the graph as small as
possible while
at the same time making sure the meaning doesn't change to "much". The
algorithm will
compress the graph generated from the stories to merge nodes that are
similar. Hence,
the algorithm might create paths through the graph that aren't actually
specified in the
stories, but we try to minimize that.
Output file defines if and where a file containing the plotted graph
should be stored.
The history defines how much 'memory' the graph has. This influences in
which situations the
algorithm will merge nodes. Nodes will only be merged if they are equal
within the history, this
means the larger the history is we take into account the less likely it
is we merge any nodes.
The training data parameter can be used to pass in a Rasa NLU training
data instance. It will
be used to replace the user messages from the story file with actual
messages from the training data."""
story_graph = StoryGraph(story_steps)
g = TrainingDataGenerator(story_graph, domain,
use_story_concatenation=False,
tracker_limit=100,
augmentation_factor=0)
completed_trackers = g.generate(silent)
event_sequences = [t.events for t in completed_trackers]
graph = await visualize_neighborhood(None,
event_sequences,
output_file,
max_history,
interpreter,
nlu_training_data,
should_merge_nodes,
max_distance=1,
fontsize=fontsize)
return graph
| RasaHQ/rasa_core | rasa/core/training/visualization.py | Python | apache-2.0 | 20,128 | [
"VisIt"
] | 73ec8ba6deee9acbd772e0902f7c4a5723c2cc7afb960aa9ceecb7f07850f958 |
# Copyright (c) 2006-2007, 2009-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2012 FELD Boris <lothiraldan@gmail.com>
# Copyright (c) 2013-2021 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2014 Eevee (Alex Munroe) <amunroe@yelp.com>
# Copyright (c) 2015-2016 Ceridwen <ceridwenv@gmail.com>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2016 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2017 rr- <rr-@sakuya.pl>
# Copyright (c) 2017 Derek Gustafson <degustaf@gmail.com>
# Copyright (c) 2018 Serhiy Storchaka <storchaka@gmail.com>
# Copyright (c) 2018 brendanator <brendan.maginnis@gmail.com>
# Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Copyright (c) 2018 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2019-2021 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2019 Alex Hall <alex.mojaki@gmail.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2020 David Gilman <davidgilman1@gmail.com>
# Copyright (c) 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 hippo91 <guillaume.peillex@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/LICENSE
"""tests for specific behaviour of astroid nodes
"""
import builtins
import copy
import os
import platform
import sys
import textwrap
import unittest
import pytest
import astroid
from astroid import bases, builder
from astroid import context as contextmod
from astroid import exceptions, node_classes, nodes, parse, test_utils, transforms, util
from . import resources
abuilder = builder.AstroidBuilder()
BUILTINS = builtins.__name__
PY38 = sys.version_info[:2] >= (3, 8)
try:
import typed_ast # pylint: disable=unused-import
HAS_TYPED_AST = True
except ImportError:
# typed_ast merged in `ast` in Python 3.8
HAS_TYPED_AST = PY38
class AsStringTest(resources.SysPathSetup, unittest.TestCase):
def test_tuple_as_string(self):
def build(string):
return abuilder.string_build(string).body[0].value
self.assertEqual(build("1,").as_string(), "(1, )")
self.assertEqual(build("1, 2, 3").as_string(), "(1, 2, 3)")
self.assertEqual(build("(1, )").as_string(), "(1, )")
self.assertEqual(build("1, 2, 3").as_string(), "(1, 2, 3)")
def test_func_signature_issue_185(self):
code = textwrap.dedent(
"""
def test(a, b, c=42, *, x=42, **kwargs):
print(a, b, c, args)
"""
)
node = parse(code)
self.assertEqual(node.as_string().strip(), code.strip())
def test_as_string_for_list_containing_uninferable(self):
node = builder.extract_node(
"""
def foo():
bar = [arg] * 1
"""
)
binop = node.body[0].value
inferred = next(binop.infer())
self.assertEqual(inferred.as_string(), "[Uninferable]")
self.assertEqual(binop.as_string(), "[arg] * 1")
def test_frozenset_as_string(self):
ast_nodes = builder.extract_node(
"""
frozenset((1, 2, 3)) #@
frozenset({1, 2, 3}) #@
frozenset([1, 2, 3,]) #@
frozenset(None) #@
frozenset(1) #@
"""
)
ast_nodes = [next(node.infer()) for node in ast_nodes]
self.assertEqual(ast_nodes[0].as_string(), "frozenset((1, 2, 3))")
self.assertEqual(ast_nodes[1].as_string(), "frozenset({1, 2, 3})")
self.assertEqual(ast_nodes[2].as_string(), "frozenset([1, 2, 3])")
self.assertNotEqual(ast_nodes[3].as_string(), "frozenset(None)")
self.assertNotEqual(ast_nodes[4].as_string(), "frozenset(1)")
def test_varargs_kwargs_as_string(self):
ast = abuilder.string_build("raise_string(*args, **kwargs)").body[0]
self.assertEqual(ast.as_string(), "raise_string(*args, **kwargs)")
def test_module_as_string(self):
"""check as_string on a whole module prepared to be returned identically"""
module = resources.build_file("data/module.py", "data.module")
with open(resources.find("data/module.py")) as fobj:
self.assertMultiLineEqual(module.as_string(), fobj.read())
def test_module2_as_string(self):
"""check as_string on a whole module prepared to be returned identically"""
module2 = resources.build_file("data/module2.py", "data.module2")
with open(resources.find("data/module2.py")) as fobj:
self.assertMultiLineEqual(module2.as_string(), fobj.read())
def test_as_string(self):
"""check as_string for python syntax >= 2.7"""
code = """one_two = {1, 2}
b = {v: k for (k, v) in enumerate('string')}
cdd = {k for k in b}\n\n"""
ast = abuilder.string_build(code)
self.assertMultiLineEqual(ast.as_string(), code)
def test_3k_as_string(self):
"""check as_string for python 3k syntax"""
code = """print()
def function(var):
nonlocal counter
try:
hello
except NameError as nexc:
(*hell, o) = b'hello'
raise AttributeError from nexc
\n"""
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string(), code)
def test_3k_annotations_and_metaclass(self):
code = '''
def function(var: int):
nonlocal counter
class Language(metaclass=Natural):
"""natural language"""
'''
code_annotations = textwrap.dedent(code)
expected = '''\
def function(var: int):
nonlocal counter
class Language(metaclass=Natural):
"""natural language"""'''
ast = abuilder.string_build(code_annotations)
self.assertEqual(ast.as_string().strip(), expected)
def test_ellipsis(self):
ast = abuilder.string_build("a[...]").body[0]
self.assertEqual(ast.as_string(), "a[...]")
def test_slices(self):
for code in (
"a[0]",
"a[1:3]",
"a[:-1:step]",
"a[:, newaxis]",
"a[newaxis, :]",
"del L[::2]",
"del A[1]",
"del Br[:]",
):
ast = abuilder.string_build(code).body[0]
self.assertEqual(ast.as_string(), code)
def test_slice_and_subscripts(self):
code = """a[:1] = bord[2:]
a[:1] = bord[2:]
del bree[3:d]
bord[2:]
del av[d::f], a[df:]
a[:1] = bord[2:]
del SRC[::1, newaxis, 1:]
tous[vals] = 1010
del thousand[key]
del a[::2], a[:-1:step]
del Fee.form[left:]
aout.vals = miles.of_stuff
del (ccok, (name.thing, foo.attrib.value)), Fee.form[left:]
if all[1] == bord[0:]:
pass\n\n"""
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string(), code)
def test_int_attribute(self):
code = """
x = (-3).real
y = (3).imag
"""
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string().strip(), code.strip())
def test_operator_precedence(self):
with open(resources.find("data/operator_precedence.py")) as f:
for code in f:
self.check_as_string_ast_equality(code)
@staticmethod
def check_as_string_ast_equality(code):
"""
Check that as_string produces source code with exactly the same
semantics as the source it was originally parsed from
"""
pre = builder.parse(code)
post = builder.parse(pre.as_string())
pre_repr = pre.repr_tree()
post_repr = post.repr_tree()
assert pre_repr == post_repr
assert pre.as_string().strip() == code.strip()
def test_class_def(self):
code = """
import abc
class A:
pass
class B(metaclass=A, x=1):
pass
class C(B):
pass
class D(metaclass=abc.ABCMeta):
pass
"""
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string().strip(), code.strip())
# This test is disabled on PyPy because we cannot get a proper release on TravisCI that has
# proper support for f-strings (we need 7.2 at least)
@pytest.mark.skipif(
sys.version_info[:2] < (3, 6) or platform.python_implementation() == "PyPy",
reason="Needs f-string support.",
)
def test_f_strings(self):
code = r'''
a = f"{'a'}"
b = f'{{b}}'
c = f""" "{'c'}" """
d = f'{d!r} {d!s} {d!a}'
e = f'{e:.3}'
f = f'{f:{x}.{y}}'
n = f'\n'
everything = f""" " \' \r \t \\ {{ }} {'x' + x!r:a} {["'"]!s:{a}}"""
'''
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string().strip(), code.strip())
class _NodeTest(unittest.TestCase):
"""test transformation of If Node"""
CODE = None
@property
def astroid(self):
try:
return self.__class__.__dict__["CODE_Astroid"]
except KeyError:
module = builder.parse(self.CODE)
self.__class__.CODE_Astroid = module
return module
class IfNodeTest(_NodeTest):
"""test transformation of If Node"""
CODE = """
if 0:
print()
if True:
print()
else:
pass
if "":
print()
elif []:
raise
if 1:
print()
elif True:
print()
elif func():
pass
else:
raise
"""
def test_if_elif_else_node(self):
"""test transformation for If node"""
self.assertEqual(len(self.astroid.body), 4)
for stmt in self.astroid.body:
self.assertIsInstance(stmt, nodes.If)
self.assertFalse(self.astroid.body[0].orelse) # simple If
self.assertIsInstance(self.astroid.body[1].orelse[0], nodes.Pass) # If / else
self.assertIsInstance(self.astroid.body[2].orelse[0], nodes.If) # If / elif
self.assertIsInstance(self.astroid.body[3].orelse[0].orelse[0], nodes.If)
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.block_range(1), (0, 22))
self.assertEqual(self.astroid.block_range(10), (0, 22)) # XXX (10, 22) ?
self.assertEqual(self.astroid.body[1].block_range(5), (5, 6))
self.assertEqual(self.astroid.body[1].block_range(6), (6, 6))
self.assertEqual(self.astroid.body[1].orelse[0].block_range(7), (7, 8))
self.assertEqual(self.astroid.body[1].orelse[0].block_range(8), (8, 8))
class TryExceptNodeTest(_NodeTest):
CODE = """
try:
print ('pouet')
except IOError:
pass
except UnicodeError:
print()
else:
print()
"""
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.body[0].block_range(1), (1, 8))
self.assertEqual(self.astroid.body[0].block_range(2), (2, 2))
self.assertEqual(self.astroid.body[0].block_range(3), (3, 8))
self.assertEqual(self.astroid.body[0].block_range(4), (4, 4))
self.assertEqual(self.astroid.body[0].block_range(5), (5, 5))
self.assertEqual(self.astroid.body[0].block_range(6), (6, 6))
self.assertEqual(self.astroid.body[0].block_range(7), (7, 7))
self.assertEqual(self.astroid.body[0].block_range(8), (8, 8))
class TryFinallyNodeTest(_NodeTest):
CODE = """
try:
print ('pouet')
finally:
print ('pouet')
"""
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.body[0].block_range(1), (1, 4))
self.assertEqual(self.astroid.body[0].block_range(2), (2, 2))
self.assertEqual(self.astroid.body[0].block_range(3), (3, 4))
self.assertEqual(self.astroid.body[0].block_range(4), (4, 4))
class TryExceptFinallyNodeTest(_NodeTest):
CODE = """
try:
print('pouet')
except Exception:
print ('oops')
finally:
print ('pouet')
"""
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.body[0].block_range(1), (1, 6))
self.assertEqual(self.astroid.body[0].block_range(2), (2, 2))
self.assertEqual(self.astroid.body[0].block_range(3), (3, 4))
self.assertEqual(self.astroid.body[0].block_range(4), (4, 4))
self.assertEqual(self.astroid.body[0].block_range(5), (5, 5))
self.assertEqual(self.astroid.body[0].block_range(6), (6, 6))
class ImportNodeTest(resources.SysPathSetup, unittest.TestCase):
def setUp(self):
super().setUp()
self.module = resources.build_file("data/module.py", "data.module")
self.module2 = resources.build_file("data/module2.py", "data.module2")
def test_import_self_resolve(self):
myos = next(self.module2.igetattr("myos"))
self.assertTrue(isinstance(myos, nodes.Module), myos)
self.assertEqual(myos.name, "os")
self.assertEqual(myos.qname(), "os")
self.assertEqual(myos.pytype(), "%s.module" % BUILTINS)
def test_from_self_resolve(self):
namenode = next(self.module.igetattr("NameNode"))
self.assertTrue(isinstance(namenode, nodes.ClassDef), namenode)
self.assertEqual(namenode.root().name, "astroid.node_classes")
self.assertEqual(namenode.qname(), "astroid.node_classes.Name")
self.assertEqual(namenode.pytype(), "%s.type" % BUILTINS)
abspath = next(self.module2.igetattr("abspath"))
self.assertTrue(isinstance(abspath, nodes.FunctionDef), abspath)
self.assertEqual(abspath.root().name, "os.path")
self.assertEqual(abspath.pytype(), "%s.function" % BUILTINS)
if sys.platform != "win32":
# Not sure what is causing this check to fail on Windows.
# For some reason the abspath() inference returns a different
# path than expected:
# AssertionError: 'os.path._abspath_fallback' != 'os.path.abspath'
self.assertEqual(abspath.qname(), "os.path.abspath")
def test_real_name(self):
from_ = self.module["NameNode"]
self.assertEqual(from_.real_name("NameNode"), "Name")
imp_ = self.module["os"]
self.assertEqual(imp_.real_name("os"), "os")
self.assertRaises(exceptions.AttributeInferenceError, imp_.real_name, "os.path")
imp_ = self.module["NameNode"]
self.assertEqual(imp_.real_name("NameNode"), "Name")
self.assertRaises(exceptions.AttributeInferenceError, imp_.real_name, "Name")
imp_ = self.module2["YO"]
self.assertEqual(imp_.real_name("YO"), "YO")
self.assertRaises(exceptions.AttributeInferenceError, imp_.real_name, "data")
def test_as_string(self):
ast = self.module["modutils"]
self.assertEqual(ast.as_string(), "from astroid import modutils")
ast = self.module["NameNode"]
self.assertEqual(
ast.as_string(), "from astroid.node_classes import Name as NameNode"
)
ast = self.module["os"]
self.assertEqual(ast.as_string(), "import os.path")
code = """from . import here
from .. import door
from .store import bread
from ..cave import wine\n\n"""
ast = abuilder.string_build(code)
self.assertMultiLineEqual(ast.as_string(), code)
def test_bad_import_inference(self):
# Explication of bug
"""When we import PickleError from nonexistent, a call to the infer
method of this From node will be made by unpack_infer.
inference.infer_from will try to import this module, which will fail and
raise a InferenceException (by mixins.do_import_module). The infer_name
will catch this exception and yield and Uninferable instead.
"""
code = """
try:
from pickle import PickleError
except ImportError:
from nonexistent import PickleError
try:
pass
except PickleError:
pass
"""
module = builder.parse(code)
handler_type = module.body[1].handlers[0].type
excs = list(node_classes.unpack_infer(handler_type))
# The number of returned object can differ on Python 2
# and Python 3. In one version, an additional item will
# be returned, from the _pickle module, which is not
# present in the other version.
self.assertIsInstance(excs[0], nodes.ClassDef)
self.assertEqual(excs[0].name, "PickleError")
self.assertIs(excs[-1], util.Uninferable)
def test_absolute_import(self):
module = resources.build_file("data/absimport.py")
ctx = contextmod.InferenceContext()
# will fail if absolute import failed
ctx.lookupname = "message"
next(module["message"].infer(ctx))
ctx.lookupname = "email"
m = next(module["email"].infer(ctx))
self.assertFalse(m.file.startswith(os.path.join("data", "email.py")))
def test_more_absolute_import(self):
module = resources.build_file("data/module1abs/__init__.py", "data.module1abs")
self.assertIn("sys", module.locals)
class CmpNodeTest(unittest.TestCase):
def test_as_string(self):
ast = abuilder.string_build("a == 2").body[0]
self.assertEqual(ast.as_string(), "a == 2")
class ConstNodeTest(unittest.TestCase):
def _test(self, value):
node = nodes.const_factory(value)
# pylint: disable=no-member; Infers two potential values
self.assertIsInstance(node._proxied, nodes.ClassDef)
self.assertEqual(node._proxied.name, value.__class__.__name__)
self.assertIs(node.value, value)
self.assertTrue(node._proxied.parent)
self.assertEqual(node._proxied.root().name, value.__class__.__module__)
def test_none(self):
self._test(None)
def test_bool(self):
self._test(True)
def test_int(self):
self._test(1)
def test_float(self):
self._test(1.0)
def test_complex(self):
self._test(1.0j)
def test_str(self):
self._test("a")
def test_unicode(self):
self._test("a")
def test_copy(self):
"""
Make sure copying a Const object doesn't result in infinite recursion
"""
const = copy.copy(nodes.Const(1))
assert const.value == 1
class NameNodeTest(unittest.TestCase):
def test_assign_to_True(self):
"""test that True and False assignments don't crash"""
code = """
True = False
def hello(False):
pass
del True
"""
with self.assertRaises(exceptions.AstroidBuildingError):
builder.parse(code)
class AnnAssignNodeTest(unittest.TestCase):
def test_primitive(self):
code = textwrap.dedent(
"""
test: int = 5
"""
)
assign = builder.extract_node(code)
self.assertIsInstance(assign, nodes.AnnAssign)
self.assertEqual(assign.target.name, "test")
self.assertEqual(assign.annotation.name, "int")
self.assertEqual(assign.value.value, 5)
self.assertEqual(assign.simple, 1)
def test_primitive_without_initial_value(self):
code = textwrap.dedent(
"""
test: str
"""
)
assign = builder.extract_node(code)
self.assertIsInstance(assign, nodes.AnnAssign)
self.assertEqual(assign.target.name, "test")
self.assertEqual(assign.annotation.name, "str")
self.assertEqual(assign.value, None)
def test_complex(self):
code = textwrap.dedent(
"""
test: Dict[List[str]] = {}
"""
)
assign = builder.extract_node(code)
self.assertIsInstance(assign, nodes.AnnAssign)
self.assertEqual(assign.target.name, "test")
self.assertIsInstance(assign.annotation, astroid.Subscript)
self.assertIsInstance(assign.value, astroid.Dict)
def test_as_string(self):
code = textwrap.dedent(
"""
print()
test: int = 5
test2: str
test3: List[Dict[str, str]] = []
"""
)
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string().strip(), code.strip())
class ArgumentsNodeTC(unittest.TestCase):
def test_linenumbering(self):
ast = builder.parse(
"""
def func(a,
b): pass
x = lambda x: None
"""
)
self.assertEqual(ast["func"].args.fromlineno, 2)
self.assertFalse(ast["func"].args.is_statement)
xlambda = next(ast["x"].infer())
self.assertEqual(xlambda.args.fromlineno, 4)
self.assertEqual(xlambda.args.tolineno, 4)
self.assertFalse(xlambda.args.is_statement)
self.skipTest(
"FIXME http://bugs.python.org/issue10445 "
"(no line number on function args)"
)
def test_kwoargs(self):
ast = builder.parse(
"""
def func(*, x):
pass
"""
)
args = ast["func"].args
self.assertTrue(args.is_argument("x"))
@test_utils.require_version(minver="3.8")
def test_positional_only(self):
ast = builder.parse(
"""
def func(x, /, y):
pass
"""
)
args = ast["func"].args
self.assertTrue(args.is_argument("x"))
self.assertTrue(args.is_argument("y"))
index, node = args.find_argname("x")
self.assertEqual(index, 0)
self.assertIsNotNone(node)
class UnboundMethodNodeTest(unittest.TestCase):
def test_no_super_getattr(self):
# This is a test for issue
# https://bitbucket.org/logilab/astroid/issue/91, which tests
# that UnboundMethod doesn't call super when doing .getattr.
ast = builder.parse(
"""
class A(object):
def test(self):
pass
meth = A.test
"""
)
node = next(ast["meth"].infer())
with self.assertRaises(exceptions.AttributeInferenceError):
node.getattr("__missssing__")
name = node.getattr("__name__")[0]
self.assertIsInstance(name, nodes.Const)
self.assertEqual(name.value, "test")
class BoundMethodNodeTest(unittest.TestCase):
def test_is_property(self):
ast = builder.parse(
"""
import abc
def cached_property():
# Not a real decorator, but we don't care
pass
def reify():
# Same as cached_property
pass
def lazy_property():
pass
def lazyproperty():
pass
def lazy(): pass
class A(object):
@property
def builtin_property(self):
return 42
@abc.abstractproperty
def abc_property(self):
return 42
@cached_property
def cached_property(self): return 42
@reify
def reified(self): return 42
@lazy_property
def lazy_prop(self): return 42
@lazyproperty
def lazyprop(self): return 42
def not_prop(self): pass
@lazy
def decorated_with_lazy(self): return 42
cls = A()
builtin_property = cls.builtin_property
abc_property = cls.abc_property
cached_p = cls.cached_property
reified = cls.reified
not_prop = cls.not_prop
lazy_prop = cls.lazy_prop
lazyprop = cls.lazyprop
decorated_with_lazy = cls.decorated_with_lazy
"""
)
for prop in (
"builtin_property",
"abc_property",
"cached_p",
"reified",
"lazy_prop",
"lazyprop",
"decorated_with_lazy",
):
inferred = next(ast[prop].infer())
self.assertIsInstance(inferred, nodes.Const, prop)
self.assertEqual(inferred.value, 42, prop)
inferred = next(ast["not_prop"].infer())
self.assertIsInstance(inferred, bases.BoundMethod)
class AliasesTest(unittest.TestCase):
def setUp(self):
self.transformer = transforms.TransformVisitor()
def parse_transform(self, code):
module = parse(code, apply_transforms=False)
return self.transformer.visit(module)
def test_aliases(self):
def test_from(node):
node.names = node.names + [("absolute_import", None)]
return node
def test_class(node):
node.name = "Bar"
return node
def test_function(node):
node.name = "another_test"
return node
def test_callfunc(node):
if node.func.name == "Foo":
node.func.name = "Bar"
return node
return None
def test_assname(node):
if node.name == "foo":
return nodes.AssignName(
"bar", node.lineno, node.col_offset, node.parent
)
return None
def test_assattr(node):
if node.attrname == "a":
node.attrname = "b"
return node
return None
def test_getattr(node):
if node.attrname == "a":
node.attrname = "b"
return node
return None
def test_genexpr(node):
if node.elt.value == 1:
node.elt = nodes.Const(2, node.lineno, node.col_offset, node.parent)
return node
return None
self.transformer.register_transform(nodes.ImportFrom, test_from)
self.transformer.register_transform(nodes.ClassDef, test_class)
self.transformer.register_transform(nodes.FunctionDef, test_function)
self.transformer.register_transform(nodes.Call, test_callfunc)
self.transformer.register_transform(nodes.AssignName, test_assname)
self.transformer.register_transform(nodes.AssignAttr, test_assattr)
self.transformer.register_transform(nodes.Attribute, test_getattr)
self.transformer.register_transform(nodes.GeneratorExp, test_genexpr)
string = """
from __future__ import print_function
class Foo: pass
def test(a): return a
foo = Foo()
foo.a = test(42)
foo.a
(1 for _ in range(0, 42))
"""
module = self.parse_transform(string)
self.assertEqual(len(module.body[0].names), 2)
self.assertIsInstance(module.body[0], nodes.ImportFrom)
self.assertEqual(module.body[1].name, "Bar")
self.assertIsInstance(module.body[1], nodes.ClassDef)
self.assertEqual(module.body[2].name, "another_test")
self.assertIsInstance(module.body[2], nodes.FunctionDef)
self.assertEqual(module.body[3].targets[0].name, "bar")
self.assertIsInstance(module.body[3].targets[0], nodes.AssignName)
self.assertEqual(module.body[3].value.func.name, "Bar")
self.assertIsInstance(module.body[3].value, nodes.Call)
self.assertEqual(module.body[4].targets[0].attrname, "b")
self.assertIsInstance(module.body[4].targets[0], nodes.AssignAttr)
self.assertIsInstance(module.body[5], nodes.Expr)
self.assertEqual(module.body[5].value.attrname, "b")
self.assertIsInstance(module.body[5].value, nodes.Attribute)
self.assertEqual(module.body[6].value.elt.value, 2)
self.assertIsInstance(module.body[6].value, nodes.GeneratorExp)
class Python35AsyncTest(unittest.TestCase):
def test_async_await_keywords(self):
async_def, async_for, async_with, await_node = builder.extract_node(
"""
async def func(): #@
async for i in range(10): #@
f = __(await i)
async with test(): #@
pass
"""
)
self.assertIsInstance(async_def, nodes.AsyncFunctionDef)
self.assertIsInstance(async_for, nodes.AsyncFor)
self.assertIsInstance(async_with, nodes.AsyncWith)
self.assertIsInstance(await_node, nodes.Await)
self.assertIsInstance(await_node.value, nodes.Name)
def _test_await_async_as_string(self, code):
ast_node = parse(code)
self.assertEqual(ast_node.as_string().strip(), code.strip())
def test_await_as_string(self):
code = textwrap.dedent(
"""
async def function():
await 42
await x[0]
(await x)[0]
await (x + y)[0]
"""
)
self._test_await_async_as_string(code)
def test_asyncwith_as_string(self):
code = textwrap.dedent(
"""
async def function():
async with 42:
pass
"""
)
self._test_await_async_as_string(code)
def test_asyncfor_as_string(self):
code = textwrap.dedent(
"""
async def function():
async for i in range(10):
await 42
"""
)
self._test_await_async_as_string(code)
def test_decorated_async_def_as_string(self):
code = textwrap.dedent(
"""
@decorator
async def function():
async for i in range(10):
await 42
"""
)
self._test_await_async_as_string(code)
class ContextTest(unittest.TestCase):
def test_subscript_load(self):
node = builder.extract_node("f[1]")
self.assertIs(node.ctx, astroid.Load)
def test_subscript_del(self):
node = builder.extract_node("del f[1]")
self.assertIs(node.targets[0].ctx, astroid.Del)
def test_subscript_store(self):
node = builder.extract_node("f[1] = 2")
subscript = node.targets[0]
self.assertIs(subscript.ctx, astroid.Store)
def test_list_load(self):
node = builder.extract_node("[]")
self.assertIs(node.ctx, astroid.Load)
def test_list_del(self):
node = builder.extract_node("del []")
self.assertIs(node.targets[0].ctx, astroid.Del)
def test_list_store(self):
with self.assertRaises(exceptions.AstroidSyntaxError):
builder.extract_node("[0] = 2")
def test_tuple_load(self):
node = builder.extract_node("(1, )")
self.assertIs(node.ctx, astroid.Load)
def test_tuple_store(self):
with self.assertRaises(exceptions.AstroidSyntaxError):
builder.extract_node("(1, ) = 3")
def test_starred_load(self):
node = builder.extract_node("a = *b")
starred = node.value
self.assertIs(starred.ctx, astroid.Load)
def test_starred_store(self):
node = builder.extract_node("a, *b = 1, 2")
starred = node.targets[0].elts[1]
self.assertIs(starred.ctx, astroid.Store)
def test_unknown():
"""Test Unknown node"""
assert isinstance(next(nodes.Unknown().infer()), type(util.Uninferable))
assert isinstance(nodes.Unknown().name, str)
assert isinstance(nodes.Unknown().qname(), str)
@pytest.mark.skipif(not HAS_TYPED_AST, reason="requires typed_ast")
def test_type_comments_with():
module = builder.parse(
"""
with a as b: # type: int
pass
with a as b: # type: ignore
pass
"""
)
node = module.body[0]
ignored_node = module.body[1]
assert isinstance(node.type_annotation, astroid.Name)
assert ignored_node.type_annotation is None
@pytest.mark.skipif(not HAS_TYPED_AST, reason="requires typed_ast")
def test_type_comments_for():
module = builder.parse(
"""
for a, b in [1, 2, 3]: # type: List[int]
pass
for a, b in [1, 2, 3]: # type: ignore
pass
"""
)
node = module.body[0]
ignored_node = module.body[1]
assert isinstance(node.type_annotation, astroid.Subscript)
assert node.type_annotation.as_string() == "List[int]"
assert ignored_node.type_annotation is None
@pytest.mark.skipif(not HAS_TYPED_AST, reason="requires typed_ast")
def test_type_coments_assign():
module = builder.parse(
"""
a, b = [1, 2, 3] # type: List[int]
a, b = [1, 2, 3] # type: ignore
"""
)
node = module.body[0]
ignored_node = module.body[1]
assert isinstance(node.type_annotation, astroid.Subscript)
assert node.type_annotation.as_string() == "List[int]"
assert ignored_node.type_annotation is None
@pytest.mark.skipif(not HAS_TYPED_AST, reason="requires typed_ast")
def test_type_comments_invalid_expression():
module = builder.parse(
"""
a, b = [1, 2, 3] # type: something completely invalid
a, b = [1, 2, 3] # typeee: 2*+4
a, b = [1, 2, 3] # type: List[int
"""
)
for node in module.body:
assert node.type_annotation is None
@pytest.mark.skipif(not HAS_TYPED_AST, reason="requires typed_ast")
def test_type_comments_invalid_function_comments():
module = builder.parse(
"""
def func():
# type: something completely invalid
pass
def func1():
# typeee: 2*+4
pass
def func2():
# type: List[int
pass
"""
)
for node in module.body:
assert node.type_comment_returns is None
assert node.type_comment_args is None
@pytest.mark.skipif(not HAS_TYPED_AST, reason="requires typed_ast")
def test_type_comments_function():
module = builder.parse(
"""
def func():
# type: (int) -> str
pass
def func1():
# type: (int, int, int) -> (str, str)
pass
def func2():
# type: (int, int, str, List[int]) -> List[int]
pass
"""
)
expected_annotations = [
(["int"], astroid.Name, "str"),
(["int", "int", "int"], astroid.Tuple, "(str, str)"),
(["int", "int", "str", "List[int]"], astroid.Subscript, "List[int]"),
]
for node, (expected_args, expected_returns_type, expected_returns_string) in zip(
module.body, expected_annotations
):
assert node.type_comment_returns is not None
assert node.type_comment_args is not None
for expected_arg, actual_arg in zip(expected_args, node.type_comment_args):
assert actual_arg.as_string() == expected_arg
assert isinstance(node.type_comment_returns, expected_returns_type)
assert node.type_comment_returns.as_string() == expected_returns_string
@pytest.mark.skipif(not HAS_TYPED_AST, reason="requires typed_ast")
def test_type_comments_arguments():
module = builder.parse(
"""
def func(
a, # type: int
):
# type: (...) -> str
pass
def func1(
a, # type: int
b, # type: int
c, # type: int
):
# type: (...) -> (str, str)
pass
def func2(
a, # type: int
b, # type: int
c, # type: str
d, # type: List[int]
):
# type: (...) -> List[int]
pass
"""
)
expected_annotations = [
["int"],
["int", "int", "int"],
["int", "int", "str", "List[int]"],
]
for node, expected_args in zip(module.body, expected_annotations):
assert len(node.type_comment_args) == 1
if PY38:
assert isinstance(node.type_comment_args[0], astroid.Const)
assert node.type_comment_args[0].value == Ellipsis
else:
assert isinstance(node.type_comment_args[0], astroid.Ellipsis)
assert len(node.args.type_comment_args) == len(expected_args)
for expected_arg, actual_arg in zip(expected_args, node.args.type_comment_args):
assert actual_arg.as_string() == expected_arg
@pytest.mark.skipif(
not PY38, reason="needs to be able to parse positional only arguments"
)
def test_type_comments_posonly_arguments():
module = builder.parse(
"""
def f_arg_comment(
a, # type: int
b, # type: int
/,
c, # type: Optional[int]
d, # type: Optional[int]
*,
e, # type: float
f, # type: float
):
# type: (...) -> None
pass
"""
)
expected_annotations = [
[["int", "int"], ["Optional[int]", "Optional[int]"], ["float", "float"]]
]
for node, expected_types in zip(module.body, expected_annotations):
assert len(node.type_comment_args) == 1
if PY38:
assert isinstance(node.type_comment_args[0], astroid.Const)
assert node.type_comment_args[0].value == Ellipsis
else:
assert isinstance(node.type_comment_args[0], astroid.Ellipsis)
type_comments = [
node.args.type_comment_posonlyargs,
node.args.type_comment_args,
node.args.type_comment_kwonlyargs,
]
for expected_args, actual_args in zip(expected_types, type_comments):
assert len(expected_args) == len(actual_args)
for expected_arg, actual_arg in zip(expected_args, actual_args):
assert actual_arg.as_string() == expected_arg
@pytest.mark.skipif(not HAS_TYPED_AST, reason="requires typed_ast")
def test_correct_function_type_comment_parent():
data = """
def f(a):
# type: (A) -> A
pass
"""
parsed_data = builder.parse(data)
f = parsed_data.body[0]
assert f.type_comment_args[0].parent is f
assert f.type_comment_returns.parent is f
def test_is_generator_for_yield_assignments():
node = astroid.extract_node(
"""
class A:
def test(self):
a = yield
while True:
print(a)
yield a
a = A()
a.test
"""
)
inferred = next(node.infer())
assert isinstance(inferred, astroid.BoundMethod)
assert bool(inferred.is_generator())
class AsyncGeneratorTest:
def test_async_generator(self):
node = astroid.extract_node(
"""
async def a_iter(n):
for i in range(1, n + 1):
yield i
await asyncio.sleep(1)
a_iter(2) #@
"""
)
inferred = next(node.infer())
assert isinstance(inferred, bases.AsyncGenerator)
assert inferred.getattr("__aiter__")
assert inferred.getattr("__anext__")
assert inferred.pytype() == "builtins.async_generator"
assert inferred.display_type() == "AsyncGenerator"
def test_async_generator_is_generator_on_older_python(self):
node = astroid.extract_node(
"""
async def a_iter(n):
for i in range(1, n + 1):
yield i
await asyncio.sleep(1)
a_iter(2) #@
"""
)
inferred = next(node.infer())
assert isinstance(inferred, bases.Generator)
assert inferred.getattr("__iter__")
assert inferred.getattr("__next__")
assert inferred.pytype() == "builtins.generator"
assert inferred.display_type() == "Generator"
@pytest.mark.skipif(sys.version_info[:2] < (3, 6), reason="needs f-string support")
def test_f_string_correct_line_numbering():
"""Test that we generate correct line numbers for f-strings"""
node = astroid.extract_node(
"""
def func_foo(arg_bar, arg_foo):
dict_foo = {}
f'{arg_bar.attr_bar}' #@
"""
)
assert node.lineno == 5
assert node.last_child().lineno == 5
assert node.last_child().last_child().lineno == 5
@pytest.mark.skipif(
sys.version_info[:2] < (3, 8), reason="needs assignment expressions"
)
def test_assignment_expression():
code = """
if __(a := 1):
pass
if __(b := test):
pass
"""
first, second = astroid.extract_node(code)
assert isinstance(first.target, nodes.AssignName)
assert first.target.name == "a"
assert isinstance(first.value, nodes.Const)
assert first.value.value == 1
assert first.as_string() == "a := 1"
assert isinstance(second.target, nodes.AssignName)
assert second.target.name == "b"
assert isinstance(second.value, nodes.Name)
assert second.value.name == "test"
assert second.as_string() == "b := test"
def test_get_doc():
node = astroid.extract_node(
"""
def func():
"Docstring"
return 1
"""
)
assert node.doc == "Docstring"
node = astroid.extract_node(
"""
def func():
...
return 1
"""
)
assert node.doc is None
@test_utils.require_version(minver="3.8")
def test_parse_fstring_debug_mode():
node = astroid.extract_node('f"{3=}"')
assert isinstance(node, nodes.JoinedStr)
assert node.as_string() == "f'3={3!r}'"
@pytest.mark.skipif(not HAS_TYPED_AST, reason="requires typed_ast")
def test_parse_type_comments_with_proper_parent():
code = """
class D: #@
@staticmethod
def g(
x # type: np.array
):
pass
"""
node = astroid.extract_node(code)
func = node.getattr("g")[0]
type_comments = func.args.type_comment_args
assert len(type_comments) == 1
type_comment = type_comments[0]
assert isinstance(type_comment, astroid.Attribute)
assert isinstance(type_comment.parent, astroid.Expr)
assert isinstance(type_comment.parent.parent, astroid.Arguments)
def test_const_itered():
code = 'a = "string"'
node = astroid.extract_node(code).value
assert isinstance(node, astroid.Const)
itered = node.itered()
assert len(itered) == 6
assert [elem.value for elem in itered] == list("string")
def test_is_generator_for_yield_in_while():
code = """
def paused_iter(iterable):
while True:
# Continue to yield the same item until `next(i)` or `i.send(False)`
while (yield value):
pass
"""
node = astroid.extract_node(code)
assert bool(node.is_generator())
def test_is_generator_for_yield_in_if():
code = """
import asyncio
def paused_iter(iterable):
if (yield from asyncio.sleep(0.01)):
pass
return
"""
node = astroid.extract_node(code)
assert bool(node.is_generator())
def test_is_generator_for_yield_in_aug_assign():
code = """
def test():
buf = ''
while True:
buf += yield
"""
node = astroid.extract_node(code)
assert bool(node.is_generator())
if __name__ == "__main__":
unittest.main()
| ruchee/vimrc | vimfiles/bundle/vim-python/submodules/astroid/tests/unittest_nodes.py | Python | mit | 42,822 | [
"VisIt"
] | 25b7f8c3435ac0ec4cc3fc6c21989339f3aa5a0c6f0a3c2e694988c555448abf |
""" The input data resolution module is a plugin that
allows to define VO input data policy in a simple way using existing
utilities in DIRAC or extension code supplied by the VO.
The arguments dictionary from the Job Wrapper includes the file catalogue
result and in principle has all the necessary information to resolve input data
for applications.
"""
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.ModuleFactory import ModuleFactory
from DIRAC.WorkloadManagementSystem.Client.PoolXMLSlice import PoolXMLSlice
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
__RCSID__ = "$Id$"
COMPONENT_NAME = 'InputDataResolution'
CREATE_CATALOG = False
class InputDataResolution( object ):
""" Defines the Input Data Policy
"""
#############################################################################
def __init__( self, argumentsDict ):
""" Standard constructor
"""
self.arguments = argumentsDict
self.name = COMPONENT_NAME
self.log = gLogger.getSubLogger( self.name )
op = Operations()
self.arguments.setdefault( 'Configuration', {} )['AllReplicas'] = op.getValue( 'InputDataPolicy/AllReplicas', False )
self.arguments['Configuration'].setdefault( 'Protocol', op.getValue( 'InputDataPolicy/Protocols/Local', '' ) )
self.arguments['Configuration'].setdefault( 'RemoteProtocol', op.getValue( 'InputDataPolicy/Protocols/Remote', [] ) )
# By default put input data into the current directory
self.arguments.setdefault( 'InputDataDirectory', 'CWD' )
#############################################################################
def execute( self ):
"""Given the arguments from the Job Wrapper, this function calls existing
utilities in DIRAC to resolve input data.
"""
resolvedInputData = self.__resolveInputData()
if not resolvedInputData['OK']:
self.log.error( 'InputData resolution failed with result:\n%s' % ( resolvedInputData['Message'] ) )
return resolvedInputData
# For local running of this module we can expose an option to ignore missing files
ignoreMissing = self.arguments.get( 'IgnoreMissing', False )
# Missing some of the input files is a fatal error unless ignoreMissing option is defined
failedReplicas = resolvedInputData['Value'].get( 'Failed', {} )
if failedReplicas and not ignoreMissing:
self.log.error( 'Failed to obtain access to the following files:\n%s'
% ( '\n'.join( sorted( failedReplicas ) ) ) )
return S_ERROR( 'Failed to access some of requested input data' )
if not resolvedInputData['Value'].get( 'Successful' ):
return S_ERROR( 'Could not access any requested input data' )
if CREATE_CATALOG:
res = self._createCatalog( resolvedInputData )
if not res['OK']:
return res
return resolvedInputData
#############################################################################
def _createCatalog( self, resolvedInputData, catalogName = 'pool_xml_catalog.xml', pfnType = 'ROOT_All' ):
""" By default uses PoolXMLSlice, VO extensions can modify at will
"""
resolvedData = resolvedInputData['Successful']
tmpDict = {}
for lfn, mdata in resolvedData.items():
tmpDict[lfn] = mdata
tmpDict[lfn]['pfntype'] = pfnType
self.log.verbose( 'Adding PFN file type %s for LFN:%s' % ( pfnType, lfn ) )
catalogName = self.arguments['Configuration'].get( 'CatalogName', catalogName )
self.log.verbose( 'Catalog name will be: %s' % catalogName )
resolvedData = tmpDict
appCatalog = PoolXMLSlice( catalogName )
return appCatalog.execute( resolvedData )
#############################################################################
def __resolveInputData( self ):
"""This method controls the execution of the DIRAC input data modules according
to the VO policy defined in the configuration service.
"""
site = self.arguments['Configuration'].get( 'SiteName', DIRAC.siteName() )
self.arguments.setdefault( 'Job', {} )
policy = self.arguments['Job'].get( 'InputDataPolicy', [] )
if policy:
# In principle this can be a list of modules with the first taking precedence
if isinstance( policy, basestring ):
policy = [policy]
self.log.info( 'Job has a specific policy setting: %s' % ( ', '.join( policy ) ) )
else:
self.log.debug( 'Attempting to resolve input data policy for site %s' % site )
inputDataPolicy = Operations().getOptionsDict( 'InputDataPolicy' )
if not inputDataPolicy['OK']:
return S_ERROR( 'Could not resolve InputDataPolicy from Operations InputDataPolicy' )
options = inputDataPolicy['Value']
policy = options.get( site, options.get( 'Default', [] ) )
if policy:
policy = [x.strip() for x in policy.split( ',' )]
if site in options:
prStr = 'Found specific'
else:
prStr = 'Applying default'
self.log.info( '%s input data policy for site %s:\n%s' % ( prStr, site, '\n'.join( policy ) ) )
dataToResolve = [] # if none, all supplied input data is resolved
successful = {}
for modulePath in policy:
result = self.__runModule( modulePath, dataToResolve )
if not result['OK']:
self.log.warn( 'Problem during %s execution' % modulePath )
return result
result = result['Value']
successful.update( result.get( 'Successful', {} ) )
dataToResolve = result.get( 'Failed', [] )
if dataToResolve:
self.log.info( '%s failed for the following files:\n%s'
% ( modulePath, '\n'.join( dataToResolve ) ) )
else:
self.log.info( 'All replicas resolved after %s execution' % ( modulePath ) )
break
if successful:
self.log.verbose( 'Successfully resolved:', str( successful ) )
return S_OK( {'Successful': successful, 'Failed':dataToResolve} )
#############################################################################
def __runModule( self, modulePath, remainingReplicas ):
"""This method provides a way to run the modules specified by the VO that
govern the input data access policy for the current site. Using the
InputDataPolicy section from Operations different modules can be defined for
particular sites or for InputDataPolicy defined in the JDL of the jobs.
"""
self.log.info( 'Attempting to run %s' % ( modulePath ) )
moduleFactory = ModuleFactory()
moduleInstance = moduleFactory.getModule( modulePath, self.arguments )
if not moduleInstance['OK']:
return moduleInstance
module = moduleInstance['Value']
result = module.execute( remainingReplicas )
return result
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| Andrew-McNab-UK/DIRAC | WorkloadManagementSystem/Client/InputDataResolution.py | Python | gpl-3.0 | 6,862 | [
"DIRAC"
] | 0a3286a0dacb306cd8c7441c4209ca3abd813a19e409544efabbd665e01fcafe |
# stdlib imports
import os
import uuid
# django imports
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models import Max
from django.db.models.signals import post_save
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.urls import reverse
from django.utils import timezone
# third-party imports
from easy_thumbnails.fields import ThumbnailerImageField
from django_countries.fields import CountryField
# ==============================================
# CitationUrl
# ==============================================
class CitationUrl(models.Model):
url = models.URLField(max_length=500)
def __str__(self):
return self.url
pass
# ==============================================
# Feature
# ==============================================
class Feature(models.Model):
slug = models.SlugField(db_index=True, unique=True)
label = models.CharField(max_length=100, unique=True)
multivalued = models.BooleanField(default=True)
class Meta:
ordering = ('label',)
def __str__(self):
return self.label
pass
# ==============================================
# FeatureOption
# ==============================================
class FeatureOption(models.Model):
feature = models.ForeignKey('Feature', models.CASCADE, related_name='options')
slug = models.SlugField(db_index=True, unique=False)
value = models.CharField(max_length=100)
class Meta:
unique_together = ('feature','slug')
def __str__(self):
return self.value
pass
# ==============================================
# License
# ==============================================
class License(models.Model):
slug = models.SlugField(unique=True)
name = models.CharField(max_length=64)
url = models.URLField(blank=True, max_length=512)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
pass
# ==============================================
# OperatingSystem
# ==============================================
class OperatingSystem(models.Model):
slug = models.SlugField(unique=True)
name = models.CharField(max_length=100)
url = models.URLField(blank=True, max_length=500)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
pass
# ==============================================
# ProgrammingLanguage
# ==============================================
class ProgrammingLanguage(models.Model):
slug = models.SlugField(unique=True)
name = models.CharField(max_length=100)
url = models.URLField(blank=True, max_length=500)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
pass
# ==============================================
# ProjectType
# ==============================================
class ProjectType(models.Model):
slug = models.SlugField(unique=True)
name = models.CharField(max_length=32)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
pass
# ==============================================
# Publication
# ==============================================
class Publication(models.Model):
title = models.CharField(max_length=250)
authors = models.CharField(blank=True, max_length=250)
year = models.PositiveIntegerField(blank=True, null=True)
number = models.IntegerField(default=1, null=True)
url = models.URLField(blank=True, max_length=500)
bibtex = models.TextField(blank=True)
cite = models.TextField(blank=True)
def __str__(self):
return self.title
pass
# ==============================================
# SuggestedSystem
# ==============================================
class SuggestedSystem(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(default=None, null=True, blank=True)
email = models.EmailField(max_length=100)
approved = models.BooleanField()
secret_key = models.UUIDField(max_length=36, default=uuid.uuid4)
url = models.URLField(blank=True, max_length=500)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
pass
# ==============================================
# System
# ==============================================
class System(models.Model):
slug = models.SlugField(unique=True)
name = models.CharField(max_length=64, blank=False)
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(default=timezone.now)
secret_key = models.UUIDField(max_length=36, default=uuid.uuid4)
view_count = models.PositiveIntegerField(default=0)
ver = models.PositiveIntegerField('Version No.', default=1)
class Meta:
ordering = ('slug',)
def __hash__(self):
return hash((
self.id,
self.name,
self.created,
self.ver,
self.secret_key
))
def __str__(self):
return self.name
def current(self):
if self.id is None:
return SystemVersion(system=self)
return self.versions.get(is_current=True)
def get_absolute_url(self):
return reverse('system', args=[self.slug])
pass
# ==============================================
# SystemACL
# ==============================================
class SystemACL(models.Model):
system = models.ForeignKey('System', models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, models.PROTECT)
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(default=timezone.now)
class Meta:
unique_together = ('system', 'user')
verbose_name = "Permission"
def __str__(self):
return "(%s, %s)" % (self.system.name, self.user.username)
pass
# ==============================================
# SystemFeature
# ==============================================
class SystemFeature(models.Model):
system = models.ForeignKey('SystemVersion', models.CASCADE, related_name='features')
feature = models.ForeignKey('Feature', models.CASCADE, related_name='system_features')
citations = models.ManyToManyField('CitationUrl', related_name='system_features')
options = models.ManyToManyField('FeatureOption', related_name='system_features')
description = models.TextField(blank=True, help_text='This field supports Markdown Syntax')
class Meta:
unique_together = ('system','feature')
def __str__(self):
return '{} > {}'.format(self.system.system.name, self.feature.label)
def values_str(self):
return ', '.join([str(l) for l in self.options.all()])
pass
# ==============================================
# SystemRedirect
# ==============================================
class SystemRedirect(models.Model):
system = models.ForeignKey('System', models.CASCADE, related_name='redirects')
slug = models.SlugField(unique=True)
def __str__(self):
return self.name
pass
# ==============================================
# SystemVisit
# ==============================================
class SystemVisit(models.Model):
system = models.ForeignKey('System', models.CASCADE, related_name='visits')
ip_address = models.GenericIPAddressField(null=False)
user_agent = models.CharField(max_length=128, blank=True, null=False)
created = models.DateTimeField(default=timezone.now)
def __str__(self):
return "(%s, %s, %s)" % (self.system.name, self.ip_address, str(self.created))
class Meta:
verbose_name = "Visit"
pass
# ==============================================
# SystemRecommendation
# ==============================================
class SystemRecommendation(models.Model):
system = models.ForeignKey('System', models.CASCADE, related_name='recommendation_to')
recommendation = models.ForeignKey('System', models.CASCADE, related_name='recommendation_from')
score = models.FloatField(blank=True, null=True)
created = models.DateTimeField(default=timezone.now)
def __str__(self):
return "(%s, %s)" % (self.system.name, self.recommendation.name)
class Meta:
verbose_name = "Recommendation"
unique_together = ('system', 'recommendation')
pass
# ==============================================
# SystemVersion
# ==============================================
class SystemVersion(models.Model):
# Internal Version Meta-data
system = models.ForeignKey('System', models.CASCADE, related_name='versions')
creator = models.ForeignKey(settings.AUTH_USER_MODEL, models.PROTECT)
meta = models.ForeignKey('SystemVersionMetadata', models.SET_NULL, blank=True, null=True)
ver = models.PositiveIntegerField('Version No.', default=1)
is_current = models.BooleanField(default=True)
comment = models.TextField(blank=True)
created = models.DateTimeField(default=timezone.now)
# Fields with citations
description = models.TextField(
blank=True,
help_text="This field support Markdown Syntax")
description_citations = models.ManyToManyField(
'CitationUrl', blank=True,
related_name='version_descriptions')
start_year = models.PositiveIntegerField(
blank=True, null=True)
start_year_citations = models.ManyToManyField(
'CitationUrl', blank=True,
related_name='version_start_years')
end_year = models.PositiveIntegerField(
blank=True, null=True)
end_year_citations = models.ManyToManyField(
'CitationUrl', blank=True,
related_name='version_end_years')
history = models.TextField(
blank=True,
help_text="This field support Markdown Syntax")
history_citations = models.ManyToManyField(
'CitationUrl', blank=True,
related_name='version_histories')
acquired_by = models.CharField(
blank=True, max_length=32,
help_text="Name of the company that first acquired the DBMS")
acquired_by_citations = models.ManyToManyField(
'CitationUrl', blank=True,
related_name='version_acquired_bys')
# General Information Fields
project_types = models.ManyToManyField(
'ProjectType', blank=True,
related_name='project_types',
verbose_name='Project Type')
developer = models.CharField(
blank=True, max_length=500,
help_text="The original organization that developed the DBMS.")
logo = ThumbnailerImageField(
blank=True, upload_to='logos/')
countries = CountryField(
blank=True, multiple=True,
verbose_name="Countries of Origin",
help_text="Country of where the DBMS company or project started")
former_names = models.CharField(
blank=True, max_length=100,
help_text="Previous names of the system")
# URLs
url = models.URLField(
blank=True, max_length=500,
help_text="URL of the DBMS company or project")
tech_docs = models.URLField(
blank=True, max_length=500,
help_text="URL of the where to find technical documentation about the DBMS")
source_url = models.URLField(
blank=True, max_length=500,
verbose_name="Source Code URL",
help_text="URL of where to download source code (if available)")
wikipedia_url = models.URLField(
blank=True, max_length=500,
verbose_name="Wikipedia URL",
help_text="URL of Wikipedia article about this system (if available)")
twitter_handle = models.CharField(
blank=True, max_length=100,
help_text="Twitter account for the database (avoid company account if possible)")
class Meta:
ordering = ('-ver',)
unique_together = ('system','ver')
def __hash__(self):
return hash((
self.created,
self.id,
self.system,
self.ver,
))
def __str__(self):
return '{} - Ver#{}'.format(self.system.name, self.ver)
def get_absolute_url(self):
return reverse('system_revision_view', args=[self.system.slug, self.ver])
def project_types_str(self):
return ', '.join( self.project_types.values_list('name', flat=True) )
def update_version(self):
created = self.id is None
if created:
aggregates = SystemVersion.objects.filter(system=instance.system).aggregate(max_ver=Max('ver'))
max_ver = aggregates['max_ver']
if max_ver is None:
instance.ver = 1
pass
else:
SystemVersion.objects.filter(system=instance.system).update(is_current=False)
instance.ver = max_ver + 1
pass
instance.system.ver = instance.ver
instance.system.save()
pass
return
def twitter_handle_url(self):
return settings.TWITTER_URL + self.twitter_handle.replace('@', '')
def twitter_card_url(self):
return settings.TWITTER_CARD_URL + self.get_twitter_card_image()
def get_twitter_card_image(self):
return self.system.slug + ".png"
def create_twitter_card(self):
from PIL import Image, ImageDraw, ImageFont
# Create a nicely formatted version of the logo for the twitter card
template = os.path.join(settings.BASE_DIR, "static", settings.TWITTER_CARD_TEMPLATE)
im1 = Image.open(template).convert("RGBA")
new_im = Image.new('RGBA', (im1.width, im1.height))
new_im.paste(im1, (0, 0))
# If there is no logo, then we will create an image of just the name
if not self.logo:
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 128)
name = self.system.name
text_size = font.getsize(name)
if name.find(" ") != -1:
name = name.replace(" ", "\n")
# Compute dimension of each line
text_size = [0, 0]
for line in name.split("\n"):
line_size = font.getsize(line)
text_size[0] = max(text_size[0], line_size[0])
text_size[1] += line_size[1] + 5
logo = Image.new('RGBA', text_size)
text_draw = ImageDraw.Draw(logo)
text_draw.text((0, 0), name, font=font, fill=(70,70,70,255))
else:
logo = Image.open(self.logo).convert("RGBA")
new_size = (0, 0)
if logo.width > logo.height:
ratio = (settings.TWITTER_CARD_MAX_WIDTH / float(logo.size[0]))
new_size = (settings.TWITTER_CARD_MAX_WIDTH, int((float(logo.size[1]) * float(ratio))))
else:
ratio = (settings.TWITTER_CARD_MAX_HEIGHT / float(logo.size[1]))
new_size = (int((float(logo.size[0]) * float(ratio))), settings.TWITTER_CARD_MAX_HEIGHT)
# Check if either the new width or height exceed the max dimensions
# We have to do this because the dimensions are not square
if new_size[0] > settings.TWITTER_CARD_MAX_WIDTH:
ratio = (settings.TWITTER_CARD_MAX_WIDTH / float(new_size[0]))
new_size = (settings.TWITTER_CARD_MAX_WIDTH, int((float(new_size[1]) * float(ratio))))
elif new_size[1] > settings.TWITTER_CARD_MAX_HEIGHT:
ratio = (settings.TWITTER_CARD_MAX_HEIGHT / float(new_size[1]))
new_size = (int((float(new_size[0]) * float(ratio))), settings.TWITTER_CARD_MAX_HEIGHT)
# Resize the mofo
logo = logo.resize(new_size, Image.ANTIALIAS)
# Figure out the center of the white part of the card
# Assume that the origin is (0,0). We will adjust by the base offset later
offset = (settings.TWITTER_CARD_BASE_OFFSET_X + settings.TWITTER_CARD_MARGIN + (settings.TWITTER_CARD_MAX_WIDTH - logo.width) // 2, \
settings.TWITTER_CARD_MARGIN + (settings.TWITTER_CARD_MAX_HEIGHT - logo.height) // 2)
new_im.paste(logo, offset, logo)
card_img = os.path.join(settings.TWITTER_CARD_ROOT, self.get_twitter_card_image())
new_im.save(card_img)
return card_img
## DEF
pass
# ==============================================
# SystemVersionMetadata
# ==============================================
class SystemVersionMetadata(models.Model):
derived_from = models.ManyToManyField(
'System', blank=True,
related_name='derived_from_systems',
verbose_name='Systems Derived From',
help_text="Systems that this system's source code is based on")
embedded = models.ManyToManyField(
'System', blank=True,
related_name='embedded_systems',
verbose_name='Systems Embedded',
help_text="Systems that this system uses on the inside (e.g., storage manager)")
inspired_by = models.ManyToManyField(
'System', blank=True,
related_name='inspired_by_systems',
verbose_name='Systems Inspired By',
help_text="Systems used for inspiration in its design but did not rely on source code")
compatible_with = models.ManyToManyField(
'System', blank=True,
related_name='compatible_with_systems',
verbose_name='Systems Compatible With',
help_text="Other systems that this system is compatible with (e.g., wire protocol, file formats).")
licenses = models.ManyToManyField(
'License', blank=True,
related_name='systems_licenses')
oses = models.ManyToManyField(
'OperatingSystem', blank=True,
related_name='systems_oses',
verbose_name='Operating Systems')
publications = models.ManyToManyField(
'Publication', blank=True,
related_name='systems_publications')
supported_languages = models.ManyToManyField(
'ProgrammingLanguage', blank=True,
related_name='systems_supported',
verbose_name='Supported Languages')
written_in = models.ManyToManyField(
'ProgrammingLanguage', blank=True,
related_name='systems_written')
def __str__(self):
system = self.systemversion_set.first()
return '{} - {} Meta'.format(system.system.name, system.ver)
def derived_from_str(self):
return ', '.join([str(l) for l in self.derived_from.all()])
def embedded_str(self):
return ', '.join([str(l) for l in self.embedded.all()])
def compatible_with_str(self):
return ', '.join([str(l) for l in self.compatible_with.all()])
def inspired_by_str(self):
return ', '.join([str(l) for l in self.inspired_by.all()])
def licenses_str(self):
return ', '.join([str(l) for l in self.licenses.all()])
def oses_str(self):
return ', '.join([str(l) for l in self.oses.all()])
def publications_str(self):
return ', '.join([str(l) for l in self.publications.all()])
def supported_languages_str(self):
return ', '.join([str(l) for l in self.supported_languages.all()])
def written_in_str(self):
return ', '.join([str(l) for l in self.written_in.all()])
pass
__all__ = (
'Feature',
'FeatureOption',
'License',
'OperatingSystem',
'ProgrammingLanguage',
'ProjectType',
'Publication',
'SuggestedSystem',
'System',
'SystemFeature',
'SystemVersion',
'SystemACL',
'SystemRecommendation',
'SystemVisit',
'SystemVersionMetadata',
)
# signal handlers
@receiver(pre_save, sender=SystemVersion)
def systemversion_pre_save(sender, **kwargs):
instance = kwargs['instance']
update_fields = kwargs['update_fields']
created = instance.id is None
if created:
aggregates = SystemVersion.objects.filter(system=instance.system).aggregate(max_ver=Max('ver'))
max_ver = aggregates['max_ver']
if max_ver is None:
instance.ver = 1
pass
else:
SystemVersion.objects.filter(system=instance.system).update(is_current=False)
instance.ver = max_ver + 1
pass
instance.system.ver = instance.ver
instance.system.save()
pass
return
| cmu-db/dbdb.io | dbdb/core/models.py | Python | apache-2.0 | 20,273 | [
"VisIt"
] | a3146a40640abd361d4bb7b081a48fd99b076d22358c3a47dd12b7a42f43cf2c |
""" LAMMPS Process
This module provides a way to run the lammps or liggghts process
"""
import os
import subprocess
class LammpsProcess(object):
""" Class runs the lammps/liggghts program
Parameters
----------
lammps_name : str
name of LAMMPS executable
log_directory : str, optional
name of directory of log file ('log.lammps') for lammps.
If not given, then pwd is where 'log.lammps' will be written.
Raises
------
RuntimeError
if Lammps did not run correctly
"""
def __init__(self, lammps_name="lammps", log_directory=None):
self._lammps_name = lammps_name
self._returncode = 0
self._stderr = ""
self._stdout = ""
if log_directory:
self._log = os.path.join(log_directory, 'log.lammps')
else:
self._log = 'log.lammps'
# see if lammps can be started
try:
self.run(" ")
except Exception:
msg = "LAMMPS could not be started."
if self._returncode == 127:
msg += " executable '{}' was not found.".format(lammps_name)
else:
msg += " stdout/err: " + self._stdout + " " + self._stderr
raise RuntimeError(msg)
def run(self, commands):
"""Run lammps with a set of commands
Parameters
----------
commands : str
set of commands to run
Raises
------
RuntimeError
if Lammps did not run correctly
"""
proc = subprocess.Popen(
[self._lammps_name, '-log', self._log], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self._stdout, self._stderr = proc.communicate(commands)
self._returncode = proc.returncode
if self._returncode != 0 or self._stderr:
msg = "LAMMPS ('{}') did not run correctly. ".format(
self._lammps_name)
msg += "Error code: {} ".format(proc.returncode)
if self._stderr:
msg += "stderr: \'{}\n\' ".format(self._stderr)
if self._stdout:
msg += "stdout: \'{}\n\'".format(self._stdout)
raise RuntimeError(msg)
| simphony/simphony-lammps-md | simlammps/io/lammps_process.py | Python | bsd-2-clause | 2,260 | [
"LAMMPS"
] | 63f3c066ee69c4c063d60450fbea9723b2b8a26f7d5e6b087456027e4f4729fe |
#!/usr/bin/env python
"""
extract_lammps_data.py is a simple script which extracts sections of text from
a LAMMPS data file.
Typical usage:
extract_lammps_data.py SECTION_NAME < FILE.DATA > EXCERPT.TXT
This extracts a section from a LAMMPS data file and saves it in EXCERPT.TXT.
More general usage:
extract_lammps_data.py [-n] SECTION_LIST < FILE.DATA > EXCERPT.TXT
For more details, see "doc/utils/docs_extract_lammps_data.txt"
"""
import sys
lammps_data_sections = set(['Atoms',
'Masses',
'Bonds',
'Bond Coeffs',
'Angles',
'Angle Coeffs',
'Dihedrals',
'Dihedral Coeffs',
'Impropers',
'Improper Coeffs',
'BondBond Coeffs', # class2 angles
'BondAngle Coeffs', # class2 angles
'MiddleBondTorsion Coeffs', # class2 dihedrals
'EndBondTorsion Coeffs', # class2 dihedrals
'AngleTorsion Coeffs', # class2 dihedrals
'AngleAngleTorsion Coeffs', # class2 dihedrals
'BondBond13 Coeffs', # class2 dihedrals
'AngleAngle Coeffs', # class2 impropers
'Angles By Type', # new. not standard LAMMPS
'Dihedrals By Type', # new. not standard LAMMPS
'Angles By Type']) # new. not standard LAMMPS
def DeleteComments(string,
escape='\\',
comment_char='#'):
escaped_state = False
for i in range(0, len(string)):
if string[i] in escape:
if escaped_state:
escaped_state = False
else:
escaped_state = True
elif string[i] == comment_char:
if not escaped_state:
return string[0:i]
return string
def ExtractDataSection(f,
section_name,
comment_char='#',
include_section_name=False,
return_line_nums=False):
inside_section = False
if section_name in ('header', 'Header'): # "Header" section includes beginning
inside_section = True
nonblank_encountered = False
nonheader_encountered = False
i = 0
for line_orig in f:
return_this_line = False
line = DeleteComments(line_orig).strip()
if line in lammps_data_sections:
nonheader_encountered = True
if section_name in ('header', 'Header'):
# The "header" section includes all lines at the beginning of the
# before any other section is encountered.
if nonheader_encountered:
return_this_line = False
else:
return_this_line = True
elif line == section_name:
inside_section = True
nonblank_encountered = False
if include_section_name:
return_this_line = True
# A block of blank lines (which dont immediately follow
# the section_name) signal the end of a section:
elif len(line) == 0:
if inside_section and include_section_name:
return_this_line = True
if nonblank_encountered:
inside_section = False
elif line[0] != comment_char:
if inside_section:
nonblank_encountered = True
return_this_line = True
if return_this_line:
if return_line_nums:
yield i
else:
yield line_orig
i += 1
def main():
lines = sys.stdin.readlines()
exclude_sections = False
if sys.argv[1] == '-n':
exclude_sections = True
del sys.argv[1]
if not exclude_sections:
for section_name in sys.argv[1:]:
for line in ExtractDataSection(lines, section_name):
sys.stdout.write(line)
else:
line_nums_exclude = set([])
for section_name in sys.argv[1:]:
for line_num in ExtractDataSection(lines,
section_name,
include_section_name=True,
return_line_nums=True):
line_nums_exclude.add(line_num)
for i in range(0, len(lines)):
if i not in line_nums_exclude:
sys.stdout.write(lines[i])
return
if __name__ == "__main__":
main()
| quang-ha/lammps | tools/moltemplate/moltemplate/extract_lammps_data.py | Python | gpl-2.0 | 4,790 | [
"LAMMPS"
] | b1718ac0c640e92416e5c0706cf01a58bdb449cef286be17f9fe2316fb3cb728 |
# Copyright (C) 2015 Hydriz Scholz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA, or visit
# <http://www.gnu.org/copyleft/gpl.html>
import DBRCore
class DBRuserprefs:
def __init__( self, db='' ):
self.dbquery = DBRCore.DBQuery( db )
self.Wiki = DBRCore.Wiki( db )
def execute( self ):
title = "User preferences"
template = '''User preferences statistics; data as of <onlyinclude>%s</onlyinclude>.
== Gender ==
{| class="wikitable sortable plainlinks" style="width:80%%;"
|- style="white-space:nowrap;"
! Gender
! Users
|-
%s
|}
== Language ==
{| class="wikitable sortable plainlinks" style="width:80%%;"
|- style="white-space:nowrap;"
! Language code
! Language name
! Users
|-
%s
|}
== Skin ==
{| class="wikitable sortable plainlinks" style="width:80%%;"
|- style="white-space:nowrap;"
! Skin
! Users
|-
%s
|}
== Gadgets ==
{| class="wikitable sortable plainlinks" style="width:80%%;"
|- style="white-space:nowrap;"
! Gadget
! Users
|-
%s
|}
[[Category:{{subst:SITENAME}} database reports|{{SUBPAGENAME}}]]
'''
genderquery = "SELECT up_value, COUNT(*) FROM user_properties WHERE up_property = 'gender' GROUP BY up_value;"
languagequery = "SELECT up_value, COUNT(*) FROM user_properties WHERE up_property = 'language' GROUP BY up_value;"
skinquery = "SELECT up_value, COUNT(*) FROM user_properties WHERE up_property = 'skin' GROUP BY up_value;"
gadgetsquery = "SELECT up_property, COUNT(*) FROM user_properties_anon WHERE up_property LIKE 'gadget-%' AND up_value = 1 GROUP BY up_property;"
gender = self.dbquery.execute( genderquery )
gender_output = []
for genderrow in gender:
up_value = '{{MediaWiki:gender-%s}}' % genderrow[0]
count = genderrow[1]
table_row = '''\
| %s
| %s
|-''' % (up_value, count)
gender_output.append(table_row)
language = self.dbquery.execute( languagequery )
language_output = []
for languagerow in language:
lang_code = languagerow[0]
lang_name = '{{#language:%s}}' % languagerow[0]
count = languagerow[1]
table_row = u'''\
| %s
| %s
| %s
|-''' % (lang_code, lang_name, count)
language_output.append(table_row)
skin = self.dbquery.execute( skinquery )
skin_output = []
for skinrow in skin:
up_value = '{{MediaWiki:skinname-%s}}' % skinrow[0]
count = skinrow[1]
table_row = u'''\
| %s
| %s
|-''' % (up_value, count)
skin_output.append(table_row)
gadgets = self.dbquery.execute( gadgetsquery )
gadgets_output = []
for gadgetsrow in gadgets:
up_property = '[[MediaWiki:%s|%s]]' % (gadgetsrow[0], gadgetsrow[0].split('gadget-', 1)[1])
count = gadgetsrow[1]
table_row = u'''\
| %s
| %s
|-''' % (up_property, count)
gadgets_output.append(table_row)
contents = template % ( self.Wiki.getDataAsOf(), '\n'.join(gender_output), '\n'.join(language_output), '\n'.join(skin_output), '\n'.join(gadgets_output) )
self.Wiki.outputToWiki( title, contents )
if __name__ == "__main__":
print "This module should not be called directly! Please use dbr.py to run the database reports." | Hydriz/DBReports | reports/userprefs.py | Python | gpl-3.0 | 3,658 | [
"VisIt"
] | 0366ee20325afceed420ea5f3d3f9f92a565cc36fd8e3d4ae859bd7bd3b6e9e5 |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
import espressomd.electrostatics
@utx.skipIfMissingFeatures(["ELECTROSTATICS"])
class ElectrostaticInteractionsTests(ut.TestCase):
# Handle to espresso system
system = espressomd.System(box_l=[20., 20., 20.])
def setUp(self):
self.system.time_step = 0.01
self.system.part.add(pos=(9.0, 2.0, 2.0), q=1)
self.system.part.add(pos=(11.0, 2.0, 2.0), q=-1)
def tearDown(self):
self.system.part.clear()
self.system.actors.clear()
def calc_dh_potential(self, r, dh_params):
kT = 1.0
q1, q2 = self.system.part[:].q
u = np.zeros_like(r)
# r<r_cut
i = np.where(r < dh_params['r_cut'])[0]
u[i] = dh_params['prefactor'] * kT * q1 * \
q2 * np.exp(-dh_params['kappa'] * r[i]) / r[i]
return u
def calc_rf_potential(self, r, rf_params):
"""Calculates the potential of the ReactionField coulomb method"""
kT = 1.0
q1, q2 = self.system.part[:].q
epsilon1 = rf_params['epsilon1']
epsilon2 = rf_params['epsilon2']
kappa = rf_params['kappa']
r_cut = rf_params['r_cut']
# prefactor calculation
B = (2 * (epsilon1 - epsilon2) * (1 + kappa * r_cut) -
epsilon2 * kappa * kappa * r_cut * r_cut) / \
((epsilon1 + 2 * epsilon2) * (1 + kappa * r_cut) +
epsilon2 * kappa * kappa * r_cut * r_cut)
offset = (1. - B / 2.) / r_cut
u = np.zeros_like(r)
# r<r_cut
i = np.where(r < rf_params['r_cut'])[0]
u[i] = rf_params['prefactor'] * kT * q1 * q2 * \
((1. / r[i] - B * np.square(r[i]) / (2. * r_cut**3)) - offset)
return u
@utx.skipIfMissingFeatures(["P3M"])
def test_p3m(self):
prefactor = 1.1
box_vol = self.system.volume()
p1, p2 = self.system.part[:]
dip = np.copy(p1.q * p1.pos + p2.q * p2.pos)
p3m_params = {'accuracy': 1e-7,
'mesh': [22, 22, 22],
'cao': 7,
'r_cut': 8.906249999999998,
'alpha': 0.387611049779351}
# reference values for energy and force calculated for prefactor = 1
ref_energy = -0.501062398379 * prefactor
ref_force1 = [0.248921612 * prefactor, 0, 0]
ref_force2 = [-ref_force1[0], 0, 0]
# check metallic case
p3m = espressomd.electrostatics.P3M(
prefactor=prefactor, epsilon='metallic', tune=False, **p3m_params)
self.system.actors.add(p3m)
self.system.integrator.run(0, recalc_forces=True)
p3m_energy = self.system.analysis.energy()['coulomb']
tol = 1e-5
np.testing.assert_allclose(p3m_energy, ref_energy, atol=tol)
np.testing.assert_allclose(np.copy(p1.f), ref_force1, atol=tol)
np.testing.assert_allclose(np.copy(p2.f), ref_force2, atol=tol)
# keep current values as reference to check for P3M dipole correction
ref_energy_metallic = self.system.analysis.energy()['coulomb']
ref_forces_metallic = np.copy(self.system.part[:].f)
self.system.actors.remove(p3m)
# check non-metallic case
tol = 1e-10
for epsilon in np.power(10., np.arange(-4, 5)):
dipole_correction = 4 * np.pi / box_vol / (1 + 2 * epsilon)
energy_correction = dipole_correction * np.linalg.norm(dip)**2
forces_correction = np.outer([p1.q, p2.q], dipole_correction * dip)
ref_energy = ref_energy_metallic + prefactor * energy_correction
ref_forces = ref_forces_metallic - prefactor * forces_correction
p3m = espressomd.electrostatics.P3M(
prefactor=prefactor, epsilon=epsilon, tune=False, **p3m_params)
self.system.actors.add(p3m)
self.system.integrator.run(0, recalc_forces=True)
p3m_forces = np.array([p1.f, p2.f])
p3m_energy = self.system.analysis.energy()['coulomb']
np.testing.assert_allclose(p3m_energy, ref_energy, atol=tol)
np.testing.assert_allclose(p3m_forces, ref_forces, atol=tol)
self.system.actors.remove(p3m)
def test_dh(self):
dh_params = dict(prefactor=1.2, kappa=0.8, r_cut=2.0)
dh = espressomd.electrostatics.DH(
prefactor=dh_params['prefactor'],
kappa=dh_params['kappa'],
r_cut=dh_params['r_cut'])
self.system.actors.add(dh)
dr = 0.001
r = np.arange(.5, 1.01 * dh_params['r_cut'], dr)
u_dh = self.calc_dh_potential(r, dh_params)
f_dh = -np.gradient(u_dh, dr)
# zero the discontinuity, and re-evaluate the derivative as a backwards
# difference
i_cut = np.argmin((dh_params['r_cut'] - r)**2)
f_dh[i_cut] = 0
f_dh[i_cut - 1] = (u_dh[i_cut - 2] - u_dh[i_cut - 1]) / dr
u_dh_core = np.zeros_like(r)
f_dh_core = np.zeros_like(r)
p1, p2 = self.system.part[:]
for i, ri in enumerate(r):
p2.pos = p1.pos + [ri, 0, 0]
self.system.integrator.run(0)
u_dh_core[i] = self.system.analysis.energy()['coulomb']
f_dh_core[i] = p1.f[0]
np.testing.assert_allclose(u_dh_core, u_dh, atol=1e-7)
np.testing.assert_allclose(f_dh_core, -f_dh, atol=1e-2)
def test_dh_pure_coulomb(self):
dh_params = dict(prefactor=1.2, kappa=0.0, r_cut=2.0)
dh = espressomd.electrostatics.DH(
prefactor=dh_params['prefactor'],
kappa=dh_params['kappa'],
r_cut=dh_params['r_cut'])
self.system.actors.add(dh)
dr = 0.001
r = np.arange(.5, 1.01 * dh_params['r_cut'], dr)
u_dh = self.calc_dh_potential(r, dh_params)
f_dh = u_dh / r
u_dh_core = np.zeros_like(r)
f_dh_core = np.zeros_like(r)
for i, ri in enumerate(r):
self.system.part[1].pos = self.system.part[0].pos + [ri, 0, 0]
self.system.integrator.run(0)
u_dh_core[i] = self.system.analysis.energy()['coulomb']
f_dh_core[i] = self.system.part[0].f[0]
np.testing.assert_allclose(u_dh_core, u_dh, atol=1e-7)
np.testing.assert_allclose(f_dh_core, -f_dh, atol=1e-7)
def test_rf(self):
"""Tests the ReactionField coulomb interaction by comparing the
potential and force against the analytic values"""
rf_params = dict(prefactor=1.0,
kappa=2.0,
epsilon1=1.0,
epsilon2=2.0,
r_cut=2.0)
rf = espressomd.electrostatics.ReactionField(
prefactor=rf_params['prefactor'],
kappa=rf_params['kappa'],
epsilon1=rf_params['epsilon1'],
epsilon2=rf_params['epsilon2'],
r_cut=rf_params['r_cut'])
self.system.actors.add(rf)
dr = 0.001
r = np.arange(.5, 1.01 * rf_params['r_cut'], dr)
u_rf = self.calc_rf_potential(r, rf_params)
f_rf = -np.gradient(u_rf, dr)
# zero the discontinuity, and re-evaluate the derivative as a backwards
# difference
i_cut = np.argmin((rf_params['r_cut'] - r)**2)
f_rf[i_cut] = 0
f_rf[i_cut - 1] = (u_rf[i_cut - 2] - u_rf[i_cut - 1]) / dr
u_rf_core = np.zeros_like(r)
f_rf_core = np.zeros_like(r)
p1, p2 = self.system.part[:]
for i, ri in enumerate(r):
p2.pos = p1.pos + [ri, 0, 0]
self.system.integrator.run(0)
u_rf_core[i] = self.system.analysis.energy()['coulomb']
f_rf_core[i] = p1.f[0]
np.testing.assert_allclose(u_rf_core, u_rf, atol=1e-7)
np.testing.assert_allclose(f_rf_core, -f_rf, atol=1e-2)
if __name__ == "__main__":
ut.main()
| fweik/espresso | testsuite/python/electrostaticInteractions.py | Python | gpl-3.0 | 8,644 | [
"ESPResSo"
] | acce82ccffe1a436cf7bce377ec97386315ad0139d0f053ff1a9fd4582d96b9e |
#!C:\Python33\python.exe -u
# -*- coding: UTF-8 -*-
# enable debugging
import cgi
import cgitb
cgitb.enable()
import struct
import array
import uuid
import mysql.connector
from mysql.connector import errorcode
import configparser
import re
import http.cookies
import os
def printRowsForChart(counts):
for row in counts[:-1]:
print("['{option}', {count}],".format(option=row[0], count=row[1]))
print("['{option}', {count}]".format(option=counts[-1][0], count=counts[-1][1]))
def printRowsForChartNoQuotes(counts):
for row in counts[:-1]:
print("[{option}, {count}],".format(option=row[0], count=row[1]))
print("[{option}, {count}]".format(option=counts[-1][0], count=counts[-1][1]))
print("Content-Type: text/html")
print("""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>Dark Souls death counter - Stats</title>
<link rel="stylesheet" type="text/css" href="../styles.css">
<style type="text/css">
</style>
</head>""")
#lists of various info to use in charts
countsOfDeaths = []
countsOfAdpp = []
countsOfPlaythroughs = []
countsOfProgress = []
countsOfSmornstein = []
countsOfOptionals = []
#default player info
name = ""
deaths = 0
progress = 0
playthrough = 0
adpp = 0
averageDeaths = 0
averageAdpp = 0
#get DB connection info
config = configparser.ConfigParser()
config.read('../conf/settings.ini')
dbInfo = config['db']
#connect to DB
try:
dbConn = mysql.connector.connect(user=dbInfo['user'], password=dbInfo['password'], host=dbInfo['host'], database=dbInfo['database'])
cursor = dbConn.cursor()
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Error: could not connect to the database")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Error: Database does not exists")
else:
print("Error: " + str(err))
else:
# get data from form
form = cgi.FieldStorage()
if "name" in form:
playerId = uuid.uuid4()
name = form["name"].value
deaths = form["deaths"].value
shitholes = 1 if "optional-shitholes" in form else 0
dragonbros = 1 if "optional-dragonbros" in form else 0
asylum = 1 if "optional-asylum" in form else 0
paintedworld = 1 if "optional-paintedworld" in form else 0
manus = 1 if "optional-manus" in form else 0
playthrough = form["playthrough"].value
progress = form["progress"].value
smornstein = form["smornstein"].value
totalProgress = float(playthrough) + float(progress)
if (totalProgress == 0):
#treat as progress = .02 but don't want to mess up charts by explicitly setting progress as such
adpp = int(deaths) * 50
else:
adpp = round(float(deaths) / totalProgress)
#input validation against patterns
dataIsValid = False
if re.match(r"^.{1,13}$", name) and \
re.match(r"^\d{1,6}$", deaths) and \
re.match(r"^\d$", str(playthrough)) and \
re.match(r"^0(\.\d\d)?$", str(progress)) and \
re.match(r"^\d\d?$", str(smornstein)):
dataIsValid = True
if dataIsValid:
#check if character already exists and set playerId
characterIsNew = True
cookieEnvVar = 'HTTP_COOKIE'
if cookieEnvVar in os.environ:
cookieString = os.environ.get(cookieEnvVar)
c = http.cookies.SimpleCookie(cookieString)
utmaName = '__utma'
if utmaName in c:
utma = c[utmaName].value
#just get the first 3 parts of utma: domain hash, unique ID, initial visit
match = re.match(r"^\d+\.\d+\.\d+", utma)
if match:
playerId = match.string[match.start():match.end()]
doesCharacterExistInDbQuery = ("select * from characters where playerid = %s and charactername = %s")
params = (playerId, name)
cursor.execute(doesCharacterExistInDbQuery, params)
if cursor.fetchall():
characterIsNew = False
#insert data from form into DB
statement = ""
if characterIsNew:
statement = ("INSERT INTO characters "
"(playerid, charactername, deaths, playthrough, progress, shitholes, dragonbros, asylum, paintedworld, manus, smornstein, adpp) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)")
statementData = (str(playerId), name, deaths, playthrough, progress, shitholes, dragonbros, asylum, paintedworld, manus, smornstein, adpp)
else:
statement = ("UPDATE characters "
"SET deaths = %s, playthrough = %s, progress = %s, shitholes = %s, dragonbros = %s, asylum = %s, paintedworld = %s, manus = %s, smornstein = %s, adpp = %s "
"WHERE playerid = %s and charactername = %s")
statementData = (deaths, playthrough, progress, shitholes, dragonbros, asylum, paintedworld, manus, smornstein, adpp, str(playerId), name)
cursor.execute(statement, statementData)
dbConn.commit()
else:
name = "";
print("<h1 align=\"center\">BAD CHARACTER DATA</h1>")
#get data from DB
#averages
queryAverages = ("select * from deathandadppaverages")
cursor.execute(queryAverages)
averages = cursor.fetchall()
averageDeaths = averages[0][0]
averageAdpp = averages[0][1]
#ADPP
queryAdpp = ("select * from adppcounts")
cursor.execute(queryAdpp)
countsOfAdpp = cursor.fetchall()
#deaths
queryDeaths = ("select * from deathcounts")
cursor.execute(queryDeaths)
countsOfDeaths = cursor.fetchall()
#playthrough
queryPlaythrough = ("select * from playthroughcounts")
cursor.execute(queryPlaythrough)
countsOfPlaythroughs = cursor.fetchall()
#progress
queryProgress = ("select * from progresscounts")
cursor.execute(queryProgress)
countsOfProgress = cursor.fetchall()
#smornstein
querySmornstein = ("select * from smornsteincounts")
cursor.execute(querySmornstein)
countsOfSmornstein = cursor.fetchall()
#optional areas
queryForOptionalValues = ("SELECT * FROM optionalcounts")
cursor.execute(queryForOptionalValues)
countsOfOptionals = cursor.fetchall()
cursor.close()
dbConn.close()
print("""
<body>
<div class="header">
</div>
<div class="wrapper">
<div class="container">
<div class="navbar">
<ul>
<li><a href="../index.html">Home</a></li>
<li><a href="stats.py">Stats</a></li>
<li><a href="../about.html">About</a></li>
</ul>
</div>
<div class="content">
<table id="table-char" style="display:none;">
<tr>
<td>Character: <span id="span-char" class="span-stat"></span></td>
</tr>
</table>
<h2 class="your-stats" style="display:none;">Your average deaths per playthrough: <span id="span-ADPP" class="span-stat"></span></h2>
<div id="chart-ADPP"></div>
<h2 class="your-stats" style="display:none;">Your total deaths across all playthroughs: <span id="span-deaths" class="span-stat"></span></h2>
<div id="chart-deaths"></div>
<h2 class="your-stats" style="display:none;">Your current playthrough: <span id="span-playthrough" class="span-stat"></span></h2>
<div id="chart-playthrough"></div>
<h2 class="your-stats" style="display:none;">Your current progress in this playthrough: <span id="span-progress" class="span-stat"></span> (approximate)</h2>
<div id="chart-progress"></div>
<h2 class="all-stats">Percent of players who completed each optional area:</h2>
<div id="chart-optional"></div>
<h2 class="all-stats">Top 5 favorite nicknames for Ornstein & Smough:</h2>
<div id="chart-smornstein"></div>
</div>
</div>
</div>
</body>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">""")
print("""
var character = "{name}";
var deaths = {deaths};
var ADPP = {adpp};
var averageDeaths = {averageDeaths};
var averageADPP = {averageAdpp};
var playthrough = {playthrough};
var progress = {progress};
var fsize = '25'; // Larger font size if the player-specific titles don't show up.
var fname = 'Marcellus SC' // Fancier font, same deal.
var fcolor = '#EEEEEE'
""".format(name=name, deaths=deaths, progress=progress, playthrough=playthrough, adpp=adpp, averageAdpp=averageAdpp, averageDeaths=averageDeaths))
print("""
if(character !== "") // Show how the submitted character compares with global stats.
// Users can still browse global stats without submitting a character,
// in which case player-specific elements won't display.
{
document.getElementById('span-char').innerHTML = character;
document.getElementById('table-char').style.display = "table";
yourStats = document.getElementsByClassName("your-stats");
for (var i = 0; i < yourStats.length; i++)
{
yourStats[i].style.display = "inline";
}
fname = 'Arial';
fsize = '15';
fcolor = '#CCBB00';
}
else // Change the remaining h2 elements to match the google chart headers
{
allstats = document.getElementsByClassName('all-stats');
for(var i=allstats.length; i>0; i--)
{
allstats[i-1].className = 'nochar-stats';
}
}
document.getElementById('span-ADPP').innerHTML = ADPP;
document.getElementById('span-deaths').innerHTML = deaths;
switch(playthrough)
{
case 0:
document.getElementById('span-playthrough').innerHTML = "New Game";
break;
case 1:
document.getElementById('span-playthrough').innerHTML = "New Game +";
break;
case 2:
document.getElementById('span-playthrough').innerHTML = "New Game +2";
break;
case 3:
document.getElementById('span-playthrough').innerHTML = "New Game +3";
break;
case 4:
document.getElementById('span-playthrough').innerHTML = "New Game +4";
break;
case 5:
document.getElementById('span-playthrough').innerHTML = "New Game +5";
break;
case 6:
document.getElementById('span-playthrough').innerHTML = "New Game +6";
break;
case 7:
document.getElementById('span-playthrough').innerHTML = "New Game +7";
break;
}
if(progress > 1)
{
progress = 1;
}
document.getElementById('span-progress').innerHTML = progress * 100 + '%';
// Load the Visualization API and the piechart package.
google.load('visualization', '1.0', {'packages':['corechart']});
// Set a callback to run when the Google Visualization API is loaded.
google.setOnLoadCallback(drawChartADPP);
google.setOnLoadCallback(drawChartDeaths);
google.setOnLoadCallback(drawChartPlaythrough);
google.setOnLoadCallback(drawChartProgress);
google.setOnLoadCallback(drawChartOptional);
google.setOnLoadCallback(drawChartSmornstein);
// Callback that creates and populates a data table,
// instantiates the pie chart, passes in the data and
// draws it.
function drawChartADPP() {
// Create and populate the data table.
var data = new google.visualization.DataTable();
data.addColumn('number', 'Deaths');
data.addColumn('number', 'Players');
data.addRows([""")
if len(countsOfAdpp) > 0:
printRowsForChartNoQuotes(countsOfAdpp)
print("""]);
var options = {
title:"Average Deaths per Playthrough for All Players:",
titleTextStyle: {color: fcolor, fontSize: fsize, fontName: fname},
width:1000, height:400,
hAxis: {title: 'Deaths', titleTextStyle: {color: '#CCCCCC'}, textStyle: {color: '#CCCCCC'}, baselineColor:'#CCCCCC', gridlines: {color:'#666666'}, viewWindow:{max:300},
ticks: [0,25,50,75,100,125,150,175,200,225,250,275,300]},
vAxis: {title: 'Players', titleTextStyle: {color: '#CCCCCC'}, textStyle: {color: '#CCCCCC'}, baselineColor:'#CCCCCC', gridlines: {color:'#666666'}},
curveType: 'function',
enableInteractivity:'false',
legend: {position: 'none'},
backgroundColor: 'none',
colors: ['#CCBB00']
};
// Create and draw the visualization.
new google.visualization.LineChart(document.getElementById('chart-ADPP')).
draw(data, options);
}
function drawChartDeaths() {
// Create and populate the data table.
var data = new google.visualization.DataTable();
data.addColumn('number', 'Deaths');
data.addColumn('number', 'Players');
data.addRows([""")
if len(countsOfDeaths) > 0:
printRowsForChartNoQuotes(countsOfDeaths)
print("""]);
var options = {
title:"Total Death Counts for All Players:",
titleTextStyle: {color: fcolor, fontSize: fsize, fontName: fname},
width:1000, height:400,
hAxis: {title: 'Deaths', titleTextStyle: {color: '#CCCCCC'}, textStyle: {color: '#CCCCCC'}, baselineColor:'#CCCCCC', gridlines: {color:'#666666'},
ticks: [0,50,100,150,200,250,300,350,400,450,500]},
vAxis: {title: 'Players', titleTextStyle: {color: '#CCCCCC'}, textStyle: {color: '#CCCCCC'}, baselineColor:'#CCCCCC', gridlines: {color:'#666666'}},
curveType:'function',
enableInteractivity:'false',
legend: {position: 'none'},
backgroundColor: 'none',
colors: ['#CCBB00']
};
// Create and draw the visualization.
new google.visualization.LineChart(document.getElementById('chart-deaths')).
draw(data, options);
}
function drawChartPlaythrough() {
// Create and populate the data table.
var data = new google.visualization.DataTable();
data.addColumn('number', 'Playthrough');
data.addColumn('number', 'Survivors');
data.addRows([""")
if len(countsOfPlaythroughs) > 0:
printRowsForChartNoQuotes(countsOfPlaythroughs)
print("""]);
var options = {
title:"Global Completion Rate:",
titleTextStyle: {color: fcolor, fontSize: fsize, fontName: fname},
curveType: "function",
width: 1000, height: 400,
vAxis: {title:"% of players completed", titleTextStyle: {color: '#CCCCCC'}, textStyle: {color: '#CCCCCC'}, baselineColor:'#CCCCCC', gridlines: {color:'#666666'}},
hAxis: {baselineColor:'#CCCCCC', gridlines: {color:'#666666'}, ticks: [{v:0, f:'NG'},{v:1, f:'NG+'},{v:2, f:'NG+2'},{v:3, f:'NG+3'},{v:4, f:'NG+4'},{v:5, f:'NG+5'},{v:6, f:'NG+6'},{v:7, f:'NG+7'}], textStyle: {color: '#CCCCCC'}},
legend: {position: 'none'},
enableInteractivity:'false',
backgroundColor: 'none',
colors: ['#CCBB00']
}
// Create and draw the visualization.
new google.visualization.LineChart(document.getElementById('chart-playthrough')).
draw(data, options);
}
function drawChartProgress() {
// Create and populate the data table.
var data = new google.visualization.DataTable();
data.addColumn('number', 'Playthrough');
data.addColumn('number', 'Survivors');
data.addRows([""")
if len(countsOfProgress) > 0:
printRowsForChartNoQuotes(countsOfProgress)
print("""]);
var options = {
title:"Global Completion Rate (within current playthrough):",
titleTextStyle: {color: fcolor, fontSize: fsize, fontName: fname},
curveType: "none",
width: 1000, height: 400,
vAxis: {title:"% of players completed", titleTextStyle: {color: '#CCCCCC'}, textStyle: {color: '#CCCCCC'}, baselineColor:'#CCCCCC', gridlines: {color:'#666666'}},
hAxis: {baselineColor:'#CCCCCC', gridlines: {color:'#666666'}, ticks: [{v:0, f:' '},{v:1, f:'Asylum Demon'},{v:2, f:'1st Bell'},{v:3, f:'2nd Bell'},{v:4, f:"Sen\'s Fortress"},{v:5, f:'Anor Londo'},{v:6, f:'1/4 Lord Souls'},{v:7, f:'2/4 Lord Souls'}, {v:8, f:'3/4 Lord Souls'}, {v:9, f:'4/4 Lord Souls'}], textStyle: {color: '#CCCCCC'}},
legend: {position: 'none'},
enableInteractivity:'false',
backgroundColor: 'none',
colors: ['#CCBB00']
}
// Create and draw the visualization.
new google.visualization.LineChart(document.getElementById('chart-progress')).
draw(data, options);
}
function drawChartOptional() {
// Create and populate the data table.
var data = new google.visualization.DataTable();
data.addColumn('string', 'Deaths');
data.addColumn('number', 'Frequency');
data.addRows([""")
if len(countsOfOptionals) > 0:
printRowsForChart(countsOfOptionals)
print("""]);
var options = {
width:1000, height:400,
hAxis: {textStyle: {color: '#CCCCCC'}},
vAxis: {title: '% of players completed', titleTextStyle: {color: '#CCCCCC'},textStyle: {color: '#CCCCCC'}, baselineColor:'#CCCCCC', gridlines: {color:'#666666'}},
bar: {groupWidth: '90%'},
enableInteractivity:'false',
legend: {position: 'none'},
backgroundColor: 'none',
colors: ['#CCBB00']
};
// Create and draw the visualization.
new google.visualization.ColumnChart(document.getElementById('chart-optional')).
draw(data, options);
}
function drawChartSmornstein() {
// Create and populate the data table.
var data = new google.visualization.DataTable();
data.addColumn('string', 'Deaths');
data.addColumn('number', 'Frequency');
data.addRows([""")
if len(countsOfSmornstein) > 0:
printRowsForChart(countsOfSmornstein)
print("""]);
var options = {
width:1000, height:400,
hAxis: {textStyle: {color: '#CCCCCC'}},
vAxis: {title: 'Votes', titleTextStyle: {color: '#CCCCCC'}, textStyle: {color: '#CCCCCC'}, baselineColor:'#CCCCCC', gridlines: {color:'#666666'}},
bar: {groupWidth: '90%'},
enableInteractivity:'false',
legend: {position: 'none'},
backgroundColor: 'none',
colors: ['#CCBB00']
};
// Create and draw the visualization.
new google.visualization.ColumnChart(document.getElementById('chart-smornstein')).
draw(data, options);
}
</script>
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-47846181-1']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</html>
""")
| RKYates/Dark-Souls-Death-Count-cgi-page | cgi-bin/stats.py | Python | gpl-3.0 | 17,316 | [
"VisIt"
] | 2d2b499a1fcf91cedf18eab7bbe92f1259d64d749e2f26f416926b08ed2cbee1 |
import ast
from unittest import mock
import _ast
from nimoy.specification import Specification
from nimoy.ast_tools.feature_blocks import FeatureBlockTransformer
from nimoy.ast_tools.feature_blocks import FeatureBlockRuleEnforcer
from nimoy.ast_tools.ast_metadata import SpecMetadata
from nimoy.runner.exceptions import InvalidFeatureBlockException
class FeatureBlockRuleEnforcerSpec(Specification):
def add_setup_and_given_as_first_blocks(self):
with setup:
spec_metadata = get_basic_spec_metadata()
enforcer = FeatureBlockRuleEnforcer(spec_metadata, 'test_it', {})
with expect:
enforcer.enforce_addition_rules('given')
enforcer.enforce_addition_rules('setup')
def only_one_given_is_allowed(self):
with setup:
spec_metadata = get_basic_spec_metadata()
spec_metadata.add_feature_block('test_it', 'given')
enforcer = FeatureBlockRuleEnforcer(spec_metadata, 'test_it', {})
with when:
enforcer.enforce_addition_rules(block_to_add)
with then:
thrown(InvalidFeatureBlockException)
with where:
block_to_add = ['given', 'setup']
def only_one_setup_is_allowed(self):
with setup:
spec_metadata = get_basic_spec_metadata()
spec_metadata.add_feature_block('test_it', 'setup')
enforcer = FeatureBlockRuleEnforcer(spec_metadata, 'test_it', {})
with when:
enforcer.enforce_addition_rules(block_to_add)
with then:
thrown(InvalidFeatureBlockException)
with where:
block_to_add = ['given', 'setup']
def setup_and_given_are_allowed_only_in_the_beginning(self):
with setup:
spec_metadata = get_basic_spec_metadata()
spec_metadata.add_feature_block('test_it', 'expect')
enforcer = FeatureBlockRuleEnforcer(spec_metadata, 'test_it', {})
with when:
enforcer.enforce_addition_rules(block_to_add)
with then:
thrown(InvalidFeatureBlockException)
with where:
block_to_add = ['given', 'setup']
def test_setup_and_given_cant_dangle(self):
with setup:
spec_metadata = get_basic_spec_metadata()
spec_metadata.add_feature_block('test_it', dangling_block)
enforcer = FeatureBlockRuleEnforcer(spec_metadata, 'test_it', {})
with when:
enforcer.enforce_tail_end_rules()
with then:
thrown(InvalidFeatureBlockException)
with where:
dangling_block = ['given', 'setup']
def then_cant_precede_when(self):
with setup:
spec_metadata = get_basic_spec_metadata()
enforcer = FeatureBlockRuleEnforcer(spec_metadata, 'test_it', {})
with when:
enforcer.enforce_addition_rules('then')
with then:
thrown(InvalidFeatureBlockException)
def when_cant_dangle(self):
with setup:
spec_metadata = get_basic_spec_metadata()
spec_metadata.add_feature_block('test_it', 'when')
enforcer = FeatureBlockRuleEnforcer(spec_metadata, 'test_it', {})
with when:
enforcer.enforce_tail_end_rules()
with then:
thrown(InvalidFeatureBlockException)
def when_cant_be_followed_by_expect(self):
with setup:
spec_metadata = get_basic_spec_metadata()
spec_metadata.add_feature_block('test_it', 'when')
enforcer = FeatureBlockRuleEnforcer(spec_metadata, 'test_it', {})
with when:
enforcer.enforce_addition_rules('expect')
with then:
thrown(InvalidFeatureBlockException)
def then_after_when(self):
with setup:
spec_metadata = get_basic_spec_metadata()
spec_metadata.add_feature_block('test_it', 'when')
enforcer = FeatureBlockRuleEnforcer(spec_metadata, 'test_it', {})
with expect:
enforcer.enforce_addition_rules('then')
def block_cant_succeed_where(self):
with setup:
spec_metadata = get_basic_spec_metadata()
spec_metadata.add_feature_block('test_it', 'where')
enforcer = FeatureBlockRuleEnforcer(spec_metadata, 'test_it', {})
with when:
enforcer.enforce_addition_rules('expect')
with then:
thrown(InvalidFeatureBlockException)
def cant_add_more_than_one_where(self):
with setup:
spec_metadata = get_basic_spec_metadata()
spec_metadata.add_feature_block('test_it', 'where')
enforcer = FeatureBlockRuleEnforcer(spec_metadata, 'test_it', {})
with when:
enforcer.enforce_addition_rules('where')
with then:
thrown(InvalidFeatureBlockException)
class FeatureBlockTransformerSpec(Specification):
@mock.patch('nimoy.ast_tools.feature_blocks.ComparisonExpressionTransformer')
def feature_was_added(self, comparison_expression_transformer):
with setup:
module_definition = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test_it(self):
with setup:
pass
with when:
pass
with then:
pass
with expect:
pass
with where:
pass
"""
node = ast.parse(module_definition, mode='exec')
spec_metadata = get_basic_spec_metadata()
with when:
FeatureBlockTransformer(spec_metadata, 'test_it').visit(node)
with then:
spec_feature_body = node.body[1].body[0].body
block_types = ['setup', 'when', 'then', 'expect', 'where']
for index, block_type in enumerate(block_types[:-1]):
spec_feature_body[index].items[0].context_expr.func.attr == '_feature_block_context'
spec_feature_body[index].items[0].context_expr.args[0].s == block_type
type(spec_feature_body[4]) == _ast.FunctionDef
spec_feature_body[4].name == 'test_it_where'
comparison_expression_transformer.call_count == 2
comparison_expression_transformer.return_value.visit.call_count == 2
spec_metadata.feature_blocks['test_it'] == block_types
def get_basic_spec_metadata():
spec_metadata = SpecMetadata('spec_name')
spec_metadata.set_owning_module('JimbobSpec')
spec_metadata.add_feature('test_it')
return spec_metadata
| Luftzig/nimoy | specs/nimoy/ast_tools/feature_blocks_spec.py | Python | apache-2.0 | 6,616 | [
"VisIt"
] | 8d7ca1e5df6b2f1f947eba69ec4eb024edfb865f2b86bac0d5641357a6f44cb6 |
"""
==========================================================
Comparison of kernel ridge and Gaussian process regression
==========================================================
Both kernel ridge regression (KRR) and Gaussian process regression (GPR) learn
a target function by employing internally the "kernel trick". KRR learns a
linear function in the space induced by the respective kernel which corresponds
to a non-linear function in the original space. The linear function in the
kernel space is chosen based on the mean-squared error loss with
ridge regularization. GPR uses the kernel to define the covariance of
a prior distribution over the target functions and uses the observed training
data to define a likelihood function. Based on Bayes theorem, a (Gaussian)
posterior distribution over target functions is defined, whose mean is used
for prediction.
A major difference is that GPR can choose the kernel's hyperparameters based
on gradient-ascent on the marginal likelihood function while KRR needs to
perform a grid search on a cross-validated loss function (mean-squared error
loss). A further difference is that GPR learns a generative, probabilistic
model of the target function and can thus provide meaningful confidence
intervals and posterior samples along with the predictions while KRR only
provides predictions.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise. The figure compares
the learned model of KRR and GPR based on a ExpSineSquared kernel, which is
suited for learning periodic functions. The kernel's hyperparameters control
the smoothness (l) and periodicity of the kernel (p). Moreover, the noise level
of the data is learned explicitly by GPR by an additional WhiteKernel component
in the kernel and by the regularization parameter alpha of KRR.
The figure shows that both methods learn reasonable models of the target
function. GPR correctly identifies the periodicity of the function to be
roughly 2*pi (6.28), while KRR chooses the doubled periodicity 4*pi. Besides
that, GPR provides reasonable confidence bounds on the prediction which are not
available for KRR. A major difference between the two methods is the time
required for fitting and predicting: while fitting KRR is fast in principle,
the grid-search for hyperparameter optimization scales exponentially with the
number of hyperparameters ("curse of dimensionality"). The gradient-based
optimization of the parameters in GPR does not suffer from this exponential
scaling and is thus considerable faster on this example with 3-dimensional
hyperparameter space. The time for predicting is similar; however, generating
the variance of the predictive distribution of GPR takes considerable longer
than just predicting the mean.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, ExpSineSquared
rng = np.random.RandomState(0)
# Generate sample data
X = 15 * rng.rand(100, 1)
y = np.sin(X).ravel()
y += 3 * (0.5 - rng.rand(X.shape[0])) # add noise
# Fit KernelRidge with parameter selection based on 5-fold cross validation
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [ExpSineSquared(l, p)
for l in np.logspace(-2, 2, 10)
for p in np.logspace(0, 2, 10)]}
kr = GridSearchCV(KernelRidge(), cv=5, param_grid=param_grid)
stime = time.time()
kr.fit(X, y)
print("Time for KRR fitting: %.3f" % (time.time() - stime))
gp_kernel = ExpSineSquared(1.0, 5.0, periodicity_bounds=(1e-2, 1e1)) \
+ WhiteKernel(1e-1)
gpr = GaussianProcessRegressor(kernel=gp_kernel)
stime = time.time()
gpr.fit(X, y)
print("Time for GPR fitting: %.3f" % (time.time() - stime))
# Predict using kernel ridge
X_plot = np.linspace(0, 20, 10000)[:, None]
stime = time.time()
y_kr = kr.predict(X_plot)
print("Time for KRR prediction: %.3f" % (time.time() - stime))
# Predict using kernel ridge
stime = time.time()
y_gpr = gpr.predict(X_plot, return_std=False)
print("Time for GPR prediction: %.3f" % (time.time() - stime))
stime = time.time()
y_gpr, y_std = gpr.predict(X_plot, return_std=True)
print("Time for GPR prediction with standard-deviation: %.3f"
% (time.time() - stime))
# Plot results
plt.figure(figsize=(10, 5))
lw = 2
plt.scatter(X, y, c='k', label='data')
plt.plot(X_plot, np.sin(X_plot), color='navy', lw=lw, label='True')
plt.plot(X_plot, y_kr, color='turquoise', lw=lw,
label='KRR (%s)' % kr.best_params_)
plt.plot(X_plot, y_gpr, color='darkorange', lw=lw,
label='GPR (%s)' % gpr.kernel_)
plt.fill_between(X_plot[:, 0], y_gpr - y_std, y_gpr + y_std, color='darkorange',
alpha=0.2)
plt.xlabel('data')
plt.ylabel('target')
plt.xlim(0, 20)
plt.ylim(-4, 4)
plt.title('GPR versus Kernel Ridge')
plt.legend(loc="best", scatterpoints=1, prop={'size': 8})
plt.show()
| RPGOne/Skynet | scikit-learn-0.18.1/examples/gaussian_process/plot_compare_gpr_krr.py | Python | bsd-3-clause | 5,191 | [
"Gaussian"
] | c236967ce1d035f60a3633a3a5576b935e7433285ae5e8ce4f1ff9197dc0dded |
#! /usr/bin/env python3
from random import randrange, choice
description = '''
Monopoly odds
Problem 84
In the game, Monopoly, the standard board is set up in the following way:
GO A1 2=CC1 A2 T1 R1 B1 7=CH1 B2 B3 JAIL
H2 C1
T2 U1
H1 C2
36=CH3 C3
R4 R2
G3 D1
33=CC3 CC2=17
G2 D2
G1 D3
G2J F3 U2 F2 F1 R3 E3 E2 22=CH2 E1 FP
A player starts on the GO square and adds the scores on two 6-sided dice to determine the number of squares they advance in a clockwise direction. Without any further rules we would expect to visit each square with equal probability: 2.5%. However, landing on G2J (Go To Jail), CC (community chest), and CH (chance) changes this distribution.
In addition to G2J, and one card from each of CC and CH, that orders the player to go directly to jail, if a player rolls three consecutive doubles, they do not advance the result of their 3rd roll. Instead they proceed directly to jail.
At the beginning of the game, the CC and CH cards are shuffled. When a player lands on CC or CH they take a card from the top of the respective pile and, after following the instructions, it is returned to the bottom of the pile. There are sixteen cards in each pile, but for the purpose of this problem we are only concerned with cards that order a movement; any instruction not concerned with movement will be ignored and the player will remain on the CC/CH square.
Community Chest (2/16 cards):
Advance to GO
Go to JAIL
Chance (10/16 cards):
Advance to GO
Go to JAIL
Go to C1
Go to E3
Go to H2
Go to R1
Go to next R (railway company)
Go to next R
Go to next U (utility company)
Go back 3 squares.
The heart of this problem concerns the likelihood of visiting a particular square. That is, the probability of finishing at that square after a roll. For this reason it should be clear that, with the exception of G2J for which the probability of finishing on it is zero, the CH squares will have the lowest probabilities, as 5/8 request a movement to another square, and it is the final square that the player finishes at on each roll that we are interested in. We shall make no distinction between "Just Visiting" and being sent to JAIL, and we shall also ignore the rule about requiring a double to "get out of jail", assuming that they pay to get out on their next turn.
By starting at GO and numbering the squares sequentially from 00 to 39 we can concatenate these two-digit numbers to produce strings that correspond with sets of squares.
Statistically it can be shown that the three most popular squares, in order, are JAIL (6.24%) = Square 10, E3 (3.18%) = Square 24, and GO (3.09%) = Square 00. So these three most popular squares can be listed with the six-digit modal string: 102400.
If, instead of using two 6-sided dice, two 4-sided dice are used, find the six-digit modal string.
'''
GO = 0
COMMUNITY_CHEST = [2, 17, 33]
CHANCE = [7, 22, 36]
JAIL = 10
GO_TO_JAIL = 30
UTILITY1 = 12
UTILITY2 = 28
def community_chest(pos):
return choice([JAIL, GO] + [pos]*14)
def next_utility(pos):
return (UTILITY2 if pos < UTILITY2 and pos > UTILITY1 else UTILITY1)
def next_rail(pos):
if pos < 5 or pos > 35: return 5
if pos < 15: return 15
if pos < 25: return 25
return 35
def chance(pos):
return choice([GO, JAIL, 11, 24, 39, 5, next_utility(pos), pos-3, next_rail(pos), next_rail(pos)] + [pos]*6)
def simulation(N):
counts = [0] * 40
pos = 0
for turn in range(0, N):
d1 = randrange(1, 5)
d2 = randrange(1, 5)
if d1 == d2:
doubles += 1
else:
doubles = 0
pos = (pos + d1 + d2) % len(counts)
if pos in COMMUNITY_CHEST:
pos = community_chest(pos)
elif pos in CHANCE:
pos = chance(pos)
elif pos == GO_TO_JAIL or doubles == 3:
pos = JAIL
counts[pos] += 1
return counts
indexedCounts = sorted(enumerate(simulation(500000)), key=lambda pr: pr[1], reverse=True)
top3 = [pr[0] for pr in indexedCounts[:3]]
print(top3)
| mbuhot/mbuhot-euler-solutions | python/problem-084.py | Python | mit | 3,941 | [
"VisIt"
] | 0aa375bd5473a29ac09e8128f50b4ccd69373f0aebb0d515b183d31240a9df94 |
import numpy as np
from ase.tasks.main import run
# molecule
# fit
atoms, task = run('molecule H2 -F 5,2 --atomize -t fitfail')
try:
atoms, task = run('molecule H2 H -t fitfail -s')
except ValueError:
pass
# fitting outside of range must fail!
assert task.data == {}
# fit in range
# when only fitting the number of points not must be odd
# in this case data['energy'] is the energy of the middle fit point
#
# test trailing space
atoms, task = run('molecule H2 -F 5,7 --atomize -t fit ')
atoms, task = run('molecule H2 H -t fit -s')
data = task.data['H2']
assert abs(data['energy'] - 1.1589) < 0.0001
assert abs(data['relaxed energy'] - 1.0705) < 0.0001
# note slightly different bondlenght from fitting
assert abs(data['distance'] - 0.77900) < 0.00001
assert abs(data['frequency'] - 0.8676) < 0.0001
assert abs(data['atomic energy'] - data['relaxed energy'] - 5.3495) < 0.0001
# opt then fit
# when fitting after optimization the number of points not need to be odd
# in this case data['energy'] is the original energy before optimization
#
# test leading space
atoms, task = run(' molecule H2 -R 0.001,FIRE -F 6,2 --atomize -t optfit')
atoms, task = run('molecule H2 H -t optfit -s')
data = task.data['H2']
assert abs(data['energy'] - 1.1589) < 0.0001
assert abs(data['relaxed energy'] - 1.0705) < 0.0001
# note slightly different bondlength from the fitting above!
assert abs(data['distance'] - 0.77905) < 0.00001
assert abs(data['frequency'] - 0.8628) < 0.0001
assert abs(data['atomic energy'] - data['relaxed energy'] - 5.3495) < 0.0001
# opt
atoms, task = run('molecule H2 -R 0.001,BFGSLineSearch --atomize -t opt')
atoms, task = run('molecule H2 H -t opt -s')
data = task.data['H2']
assert data['optimizer steps'] == 4
assert data['optimizer force calls'] == 5
assert abs(data['relaxed energy'] - 1.0705) < 0.0001
assert abs(data['distance'] - 0.77905) < 0.00001
assert abs(data['atomic energy'] - data['relaxed energy'] - 5.3495) < 0.0001
# optimization with unsufficient number of steps (ASE does not fail, simpy stops)
atoms, task = run('molecule H2 -R 0.001 --relaxsteps 1 --atomize -t opt')
atoms, task = run('molecule H2 H -t opt -s')
data = task.data['H2']
assert data['optimizer steps'] == 1
assert abs(data['relaxed energy'] - 1.1294) < 0.0001
assert abs(data['distance'] - 0.81717) < 0.00001
assert abs(data['atomic energy'] - data['relaxed energy'] - 5.2906) < 0.0001
# bulk
# fit (the system slightly distorted)
# when only fitting the number of points not must be odd
# in this case data['energy'] is the energy of the middle fit point
atoms, task = run('bulk NiO -x rocksalt -a 4.32 -F 5,-0.5 --modify=system.positions[0,2]+=0.1 -t fit')
# non-default lattice constant must be passed to the analysis part
atoms, task = run('bulk NiO -x rocksalt -a 4.32 -t fit -s')
data = task.data['NiO']
assert abs(data['fitted energy'] - 1.1455) < 0.0001
assert abs(data['volume'] - 20.2594) < 0.0001
assert abs(data['B'] - 0.9317) < 0.0001
# fit sensitivity to sampling (same initial structure)
#
# test mid space
atoms, task = run('bulk NiO -x rocksalt -a 4.32 -F 5,1 --modify=system.positions[0,2]+=0.1 -t fit')
atoms, task = run('bulk NiO -x rocksalt -a 4.32 -t fit -s')
data = task.data['NiO']
assert abs(data['fitted energy'] - 1.1455) < 0.0001
assert abs(data['volume'] - 20.2595) < 0.0001
assert abs(data['B'] - 0.9303) < 0.0001
# fit sensitivity to equation of state (same data)
try:
import scipy
atoms, task = run('bulk NiO -x rocksalt -a 4.32 --eos murnaghan -t fit -s')
data = task.data['NiO']
assert abs(data['fitted energy'] - 1.1455) < 0.0001
assert abs(data['volume'] - 20.2595) < 0.0001
assert abs(data['B'] - 0.9301) < 0.0001
except ImportError:
pass
# opt and fit (same initial structure)
atoms, task = run('bulk NiO -x rocksalt -a 4.32 -R 0.01,BFGS -F 5,-0.5 --modify=system.positions[0,2]+=0.1 -t optfit5')
atoms, task = run('bulk NiO -x rocksalt -a 4.32 -t optfit5 -s')
data = task.data['NiO']
assert data['optimizer force calls'] == data['optimizer steps'] == 3
assert abs(data['energy'] - 1.1458) < 0.0001
assert abs(data['fitted energy'] - 1.1254) < 0.0001
assert abs(data['volume'] - 20.1513) < 0.0001
assert abs(data['B'] - 0.9407) < 0.0001
# opt and fit (different initial structure)
# when fitting after optimization the number of points not need to be odd
# in this case data['energy'] is the original energy before optimization
atoms, task = run('bulk NiO -x rocksalt -a 4.32 -R 0.01 -F 6,-0.5 -t optfit6')
atoms, task = run('bulk NiO -x rocksalt -a 4.32 -t optfit6 -s')
data = task.data['NiO']
assert abs(data['energy'] - 1.1254) < 0.0001
assert abs(data['fitted energy'] - 1.1254) < 0.0001
assert abs(data['volume'] - 20.1513) < 0.0001
assert abs(data['B'] - 0.9407) < 0.0001
| grhawk/ASE | tools/ase/test/tasks/optandfit.py | Python | gpl-2.0 | 4,766 | [
"ASE"
] | 2dcd510a25f83ff86b756e1bacb8e54c53076bd77dd1c0f374d37cd48954d40a |
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
import os
import sys
# Find the AIFH core files
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
from hopfield import *
import numpy as np
# The neural network will learn these patterns.
PATTERN = [[
"O O O O O ",
" O O O O O",
"O O O O O ",
" O O O O O",
"O O O O O ",
" O O O O O",
"O O O O O ",
" O O O O O",
"O O O O O ",
" O O O O O"],
[ "OO OO OO",
"OO OO OO",
" OO OO ",
" OO OO ",
"OO OO OO",
"OO OO OO",
" OO OO ",
" OO OO ",
"OO OO OO",
"OO OO OO" ],
[ "OOOOO ",
"OOOOO ",
"OOOOO ",
"OOOOO ",
"OOOOO ",
" OOOOO",
" OOOOO",
" OOOOO",
" OOOOO",
" OOOOO" ],
[ "O O O O",
" O O O ",
" O O O ",
"O O O O",
" O O O ",
" O O O ",
"O O O O",
" O O O ",
" O O O ",
"O O O O" ],
[ "OOOOOOOOOO",
"O O",
"O OOOOOO O",
"O O O O",
"O O OO O O",
"O O OO O O",
"O O O O",
"O OOOOOO O",
"O O",
"OOOOOOOOOO" ]]
# The neural network will be tested on these patterns, to see
# which of the last set they are the closest to.
PATTERN2 = [[
" ",
" ",
" ",
" ",
" ",
" O O O O O",
"O O O O O ",
" O O O O O",
"O O O O O ",
" O O O O O"],
["OOO O O",
" O OOO OO",
" O O OO O",
" OOO O ",
"OO O OOO",
" O OOO O",
"O OO O O",
" O OOO ",
"OO OOO O ",
" O O OOO"],
["OOOOO ",
"O O OOO ",
"O O OOO ",
"O O OOO ",
"OOOOO ",
" OOOOO",
" OOO O O",
" OOO O O",
" OOO O O",
" OOOOO"],
["O OOOO O",
"OO OOOO ",
"OOO OOOO ",
"OOOO OOOO",
" OOOO OOO",
" OOOO OO",
"O OOOO O",
"OO OOOO ",
"OOO OOOO ",
"OOOO OOOO"],
["OOOOOOOOOO",
"O O",
"O O",
"O O",
"O OO O",
"O OO O",
"O O",
"O O",
"O O",
"OOOOOOOOOO"]]
HEIGHT = 10
WIDTH = 10
def convert_pattern(data, index):
result_index = 0
result = np.zeros([WIDTH*HEIGHT])
for row in range(HEIGHT):
for col in range(WIDTH):
ch = data[index][row][col]
result[result_index] = 1 if ch != ' ' else -1
result_index += 1
return result
def display(pattern1, pattern2):
index1 = 0
index2 = 0
for row in range(HEIGHT):
line = ""
for col in range(WIDTH):
if pattern1[index1]>0:
line += "O"
else:
line += " "
index1 += 1
line += " -> "
for col in range(WIDTH):
if pattern2[index2] >0 :
line += "O"
else:
line += " "
index2 += 1
print(line)
def evaluate(hopfield, pattern):
for i in range(len(pattern)):
pattern1 = convert_pattern(pattern, i)
hopfield.current_state = pattern1
cycles = hopfield.run_until_stable(100)
pattern2 = hopfield.current_state
print("Cycles until stable(max 100): {}, result=".format(cycles))
display(pattern1, pattern2)
print("----------------------")
hopfield = HopfieldNetwork(WIDTH*HEIGHT)
train = TrainHopfieldHebbian(hopfield)
for i in range(len(PATTERN)):
train.add_pattern(convert_pattern(PATTERN, i))
train.learn()
evaluate(hopfield, PATTERN)
evaluate(hopfield, PATTERN2) | JPMoresmau/aifh | vol3/vol3-python-examples/examples/example_hopfield_hebbian.py | Python | apache-2.0 | 5,507 | [
"VisIt"
] | 2cf48dab49f35bd5bc04e9eadd363aaef845f6bdeacac40f66a8cf75cadcafde |
"""This is the state that handles battles against
monsters"""
import random, sys
from itertools import izip
import pygame as pg
from .. import tools, battlegui, observer, setup
from .. components import person, attack, attackitems
from .. import constants as c
#Python 2/3 compatibility.
if sys.version_info[0] == 2:
range = xrange
class Battle(tools._State):
def __init__(self):
super(Battle, self).__init__()
self.name = 'battle'
self.music = setup.MUSIC['high_action']
self.volume = 0.4
def startup(self, current_time, game_data):
"""
Initialize state attributes.
"""
self.current_time = current_time
self.timer = current_time
self.allow_input = False
self.game_data = game_data
self.inventory = game_data['player inventory']
self.state = 'transition in'
self.next = game_data['last state']
self.run_away = False
self.player = self.make_player()
self.attack_animations = pg.sprite.Group()
self.sword = attackitems.Sword(self.player)
self.enemy_group, self.enemy_pos_list, self.enemy_list = self.make_enemies()
self.experience_points = self.get_experience_points()
self.new_gold = self.get_new_gold()
self.background = self.make_background()
self.info_box = battlegui.InfoBox(game_data,
self.experience_points,
self.new_gold)
self.arrow = battlegui.SelectArrow(self.enemy_pos_list,
self.info_box)
self.select_box = battlegui.SelectBox()
self.player_health_box = battlegui.PlayerHealth(self.select_box.rect,
self.game_data)
self.select_action_state_dict = self.make_selection_state_dict()
self.observers = [observer.Battle(self),
observer.MusicChange()]
self.player.observers.extend(self.observers)
self.observers.append(observer.SoundEffects())
self.damage_points = pg.sprite.Group()
self.player_actions = []
self.player_action_dict = self.make_player_action_dict()
self.player_level = self.game_data['player stats']['Level']
self.enemies_to_attack = []
self.action_selected = False
self.just_leveled_up = False
self.transition_rect = setup.SCREEN.get_rect()
self.transition_alpha = 255
self.temp_magic = self.game_data['player stats']['magic']['current']
def make_player_action_dict(self):
"""
Make the dict to execute player actions.
"""
action_dict = {c.PLAYER_ATTACK: self.enter_player_attack_state,
c.CURE_SPELL: self.cast_cure,
c.FIRE_SPELL: self.cast_fire_blast,
c.DRINK_HEALING_POTION: self.enter_drink_healing_potion_state,
c.DRINK_ETHER_POTION: self.enter_drink_ether_potion_state}
return action_dict
def make_enemy_level_dict(self):
new_dict = {c.OVERWORLD: 1,
c.DUNGEON: 2,
c.DUNGEON2: 2,
c.DUNGEON3: 2,
c.DUNGEON4: 2,
c.DUNGEON5: 4}
return new_dict
def set_enemy_level(self, enemy_list):
dungeon_level_dict = self.make_enemy_level_dict()
for enemy in enemy_list:
enemy.level = dungeon_level_dict[self.previous]
def get_experience_points(self):
"""
Calculate experience points based on number of enemies
and their levels.
"""
experience_total = 0
for enemy in self.enemy_list:
experience_total += (random.randint(5,10))
return experience_total
def get_new_gold(self):
"""
Calculate the gold collected at the end of the battle.
"""
gold = 0
for enemy in self.enemy_list:
max_gold = enemy.level * 20
gold += (random.randint(1, max_gold))
return gold
def make_background(self):
"""
Make the blue/black background.
"""
background = pg.sprite.Sprite()
surface = pg.Surface(c.SCREEN_SIZE).convert()
surface.fill(c.BLACK_BLUE)
background.image = surface
background.rect = background.image.get_rect()
background_group = pg.sprite.Group(background)
return background_group
def make_enemies(self):
"""
Make the enemies for the battle. Return sprite group.
"""
pos_list = []
for column in range(3):
for row in range(3):
x = (column * 100) + 100
y = (row * 100) + 100
pos_list.append([x, y])
enemy_group = pg.sprite.Group()
if self.game_data['battle type']:
enemy = person.Enemy('evilwizard', 0, 0,
'down', 'battle resting')
enemy_group.add(enemy)
else:
if self.game_data['start of game']:
for enemy in range(3):
enemy_group.add(person.Enemy('devil', 0, 0,
'down', 'battle resting'))
self.game_data['start of game'] = False
else:
for enemy in range(random.randint(1, 6)):
enemy_group.add(person.Enemy('devil', 0, 0,
'down', 'battle resting'))
for i, enemy in enumerate(enemy_group):
enemy.rect.topleft = pos_list[i]
enemy.image = pg.transform.scale2x(enemy.image)
enemy.index = i
enemy.level = self.make_enemy_level_dict()[self.previous]
if enemy.name == 'evilwizard':
enemy.health = 100
else:
enemy.health = enemy.level * 4
enemy_list = [enemy for enemy in enemy_group]
return enemy_group, pos_list[0:len(enemy_group)], enemy_list
def make_player(self):
"""
Make the sprite for the player's character.
"""
player = person.Player('left', self.game_data, 630, 220, 'battle resting', 1)
player.image = pg.transform.scale2x(player.image)
return player
def make_selection_state_dict(self):
"""
Make a dictionary of states with arrow coordinates as keys.
"""
pos_list = self.arrow.make_select_action_pos_list()
state_list = [self.enter_select_enemy_state, self.enter_select_item_state,
self.enter_select_magic_state, self.try_to_run_away]
return dict(izip(pos_list, state_list))
def update(self, surface, keys, current_time):
"""
Update the battle state.
"""
self.current_time = current_time
self.check_input(keys)
self.check_timed_events()
self.check_if_battle_won()
self.enemy_group.update(current_time)
self.player.update(keys, current_time)
self.attack_animations.update()
self.info_box.update()
self.arrow.update(keys)
self.sword.update(current_time)
self.damage_points.update()
self.execute_player_actions()
self.draw_battle(surface)
def check_input(self, keys):
"""
Check user input to navigate GUI.
"""
if self.allow_input:
if keys[pg.K_SPACE]:
if self.state == c.SELECT_ACTION:
self.notify(c.CLICK2)
enter_state_function = self.select_action_state_dict[
self.arrow.rect.topleft]
enter_state_function()
elif self.state == c.SELECT_ENEMY:
self.notify(c.CLICK2)
self.player_actions.append(c.PLAYER_ATTACK)
self.enemies_to_attack.append(self.get_enemy_to_attack())
self.action_selected = True
elif self.state == c.SELECT_ITEM:
self.notify(c.CLICK2)
if self.arrow.index == (len(self.arrow.pos_list) - 1):
self.enter_select_action_state()
elif self.info_box.item_text_list[self.arrow.index][:14] == 'Healing Potion':
if 'Healing Potion' in self.game_data['player inventory']:
self.player_actions.append(c.DRINK_HEALING_POTION)
self.action_selected = True
elif self.info_box.item_text_list[self.arrow.index][:5] == 'Ether':
if 'Ether Potion' in self.game_data['player inventory']:
self.player_actions.append(c.DRINK_ETHER_POTION)
self.action_selected = True
elif self.state == c.SELECT_MAGIC:
self.notify(c.CLICK2)
if self.arrow.index == (len(self.arrow.pos_list) - 1):
self.enter_select_action_state()
elif self.info_box.magic_text_list[self.arrow.index] == 'Cure':
magic_points = self.game_data['player inventory']['Cure']['magic points']
if self.temp_magic >= magic_points:
self.temp_magic -= magic_points
self.player_actions.append(c.CURE_SPELL)
self.action_selected = True
elif self.info_box.magic_text_list[self.arrow.index] == 'Fire Blast':
magic_points = self.game_data['player inventory']['Fire Blast']['magic points']
if self.temp_magic >= magic_points:
self.temp_magic -= magic_points
self.player_actions.append(c.FIRE_SPELL)
self.action_selected = True
self.allow_input = False
if keys[pg.K_RETURN] == False and keys[pg.K_SPACE] == False:
self.allow_input = True
def check_timed_events(self):
"""
Check if amount of time has passed for timed events.
"""
timed_states = [c.PLAYER_DAMAGED,
c.ENEMY_DAMAGED,
c.ENEMY_DEAD,
c.DRINK_HEALING_POTION,
c.DRINK_ETHER_POTION]
long_delay = timed_states[1:]
if self.state in long_delay:
if (self.current_time - self.timer) > 1000:
if self.state == c.ENEMY_DAMAGED:
if self.player_actions:
self.player_action_dict[self.player_actions[0]]()
self.player_actions.pop(0)
else:
if len(self.enemy_list):
self.enter_enemy_attack_state()
else:
self.enter_battle_won_state()
elif (self.state == c.DRINK_HEALING_POTION or
self.state == c.CURE_SPELL or
self.state == c.DRINK_ETHER_POTION):
if self.player_actions:
self.player_action_dict[self.player_actions[0]]()
self.player_actions.pop(0)
else:
if len(self.enemy_list):
self.enter_enemy_attack_state()
else:
self.enter_battle_won_state()
self.timer = self.current_time
elif self.state == c.FIRE_SPELL or self.state == c.CURE_SPELL:
if (self.current_time - self.timer) > 1500:
if self.player_actions:
if not len(self.enemy_list):
self.enter_battle_won_state()
else:
self.player_action_dict[self.player_actions[0]]()
self.player_actions.pop(0)
else:
if len(self.enemy_list):
self.enter_enemy_attack_state()
else:
self.enter_battle_won_state()
self.timer = self.current_time
elif self.state == c.RUN_AWAY:
if (self.current_time - self.timer) > 1500:
self.end_battle()
elif self.state == c.BATTLE_WON:
if (self.current_time - self.timer) > 1800:
self.enter_show_gold_state()
elif self.state == c.SHOW_GOLD:
if (self.current_time - self.timer) > 1800:
self.enter_show_experience_state()
elif self.state == c.LEVEL_UP:
if (self.current_time - self.timer) > 2200:
if self.game_data['player stats']['Level'] == 3:
self.enter_two_actions_per_turn_state()
else:
self.end_battle()
elif self.state == c.TWO_ACTIONS:
if (self.current_time - self.timer) > 3000:
self.end_battle()
elif self.state == c.SHOW_EXPERIENCE:
if (self.current_time - self.timer) > 2200:
player_stats = self.game_data['player stats']
player_stats['experience to next level'] -= self.experience_points
if player_stats['experience to next level'] <= 0:
extra_experience = player_stats['experience to next level'] * -1
player_stats['Level'] += 1
player_stats['health']['maximum'] += int(player_stats['health']['maximum']*.25)
player_stats['magic']['maximum'] += int(player_stats['magic']['maximum']*.20)
new_experience = int((player_stats['Level'] * 50) * .75)
player_stats['experience to next level'] = new_experience - extra_experience
self.enter_level_up_state()
self.just_leveled_up = True
else:
self.end_battle()
elif self.state == c.PLAYER_DAMAGED:
if (self.current_time - self.timer) > 600:
if self.enemy_index == (len(self.enemy_list) - 1):
if self.run_away:
self.enter_run_away_state()
else:
self.enter_select_action_state()
else:
self.switch_enemy()
self.timer = self.current_time
def check_if_battle_won(self):
"""
Check if state is SELECT_ACTION and there are no enemies left.
"""
if self.state == c.SELECT_ACTION:
if len(self.enemy_group) == 0:
self.enter_battle_won_state()
def notify(self, event):
"""
Notify observer of event.
"""
for new_observer in self.observers:
new_observer.on_notify(event)
def end_battle(self):
"""
End battle and flip back to previous state.
"""
if self.game_data['battle type'] == 'evilwizard':
self.game_data['crown quest'] = True
self.game_data['talked to king'] = True
self.game_data['last state'] = self.name
self.game_data['battle counter'] = random.randint(50, 255)
self.game_data['battle type'] = None
self.state = 'transition out'
def attack_enemy(self, enemy_damage):
enemy = self.player.attacked_enemy
enemy.health -= enemy_damage
self.set_enemy_indices()
if enemy:
enemy.enter_knock_back_state()
if enemy.health <= 0:
self.enemy_list.pop(enemy.index)
enemy.state = c.FADE_DEATH
self.arrow.remove_pos(self.player.attacked_enemy)
self.enemy_index = 0
def set_enemy_indices(self):
for i, enemy in enumerate(self.enemy_list):
enemy.index = i
def draw_battle(self, surface):
"""Draw all elements of battle state"""
self.background.draw(surface)
self.enemy_group.draw(surface)
self.attack_animations.draw(surface)
self.sword.draw(surface)
surface.blit(self.player.image, self.player.rect)
surface.blit(self.info_box.image, self.info_box.rect)
surface.blit(self.select_box.image, self.select_box.rect)
surface.blit(self.arrow.image, self.arrow.rect)
self.player_health_box.draw(surface)
self.damage_points.draw(surface)
self.draw_transition(surface)
def draw_transition(self, surface):
"""
Fade in and out of state.
"""
if self.state == 'transition in':
transition_image = pg.Surface(self.transition_rect.size)
transition_image.fill(c.TRANSITION_COLOR)
transition_image.set_alpha(self.transition_alpha)
surface.blit(transition_image, self.transition_rect)
self.transition_alpha -= c.TRANSITION_SPEED
if self.transition_alpha <= 0:
self.state = c.SELECT_ACTION
self.transition_alpha = 0
elif self.state == 'transition out':
transition_image = pg.Surface(self.transition_rect.size)
transition_image.fill(c.TRANSITION_COLOR)
transition_image.set_alpha(self.transition_alpha)
surface.blit(transition_image, self.transition_rect)
self.transition_alpha += c.TRANSITION_SPEED
if self.transition_alpha >= 255:
self.done = True
elif self.state == c.DEATH_FADE:
transition_image = pg.Surface(self.transition_rect.size)
transition_image.fill(c.TRANSITION_COLOR)
transition_image.set_alpha(self.transition_alpha)
surface.blit(transition_image, self.transition_rect)
self.transition_alpha += c.DEATH_TRANSITION_SPEED
if self.transition_alpha >= 255:
self.done = True
self.next = c.DEATH_SCENE
def player_damaged(self, damage):
self.game_data['player stats']['health']['current'] -= damage
if self.game_data['player stats']['health']['current'] <= 0:
self.game_data['player stats']['health']['current'] = 0
self.state = c.DEATH_FADE
def player_healed(self, heal, magic_points=0):
"""
Add health from potion to game data.
"""
health = self.game_data['player stats']['health']
health['current'] += heal
if health['current'] > health['maximum']:
health['current'] = health['maximum']
if self.state == c.DRINK_HEALING_POTION:
self.game_data['player inventory']['Healing Potion']['quantity'] -= 1
if self.game_data['player inventory']['Healing Potion']['quantity'] == 0:
del self.game_data['player inventory']['Healing Potion']
elif self.state == c.CURE_SPELL:
self.game_data['player stats']['magic']['current'] -= magic_points
def magic_boost(self, magic_points):
"""
Add magic from ether to game data.
"""
magic = self.game_data['player stats']['magic']
magic['current'] += magic_points
self.temp_magic += magic_points
if magic['current'] > magic['maximum']:
magic['current'] = magic['maximum']
self.game_data['player inventory']['Ether Potion']['quantity'] -= 1
if not self.game_data['player inventory']['Ether Potion']['quantity']:
del self.game_data['player inventory']['Ether Potion']
def set_timer_to_current_time(self):
"""Set the timer to the current time."""
self.timer = self.current_time
def cast_fire_blast(self):
"""
Cast fire blast on all enemies.
"""
self.notify(c.FIRE)
self.state = self.info_box.state = c.FIRE_SPELL
POWER = self.inventory['Fire Blast']['power']
MAGIC_POINTS = self.inventory['Fire Blast']['magic points']
self.game_data['player stats']['magic']['current'] -= MAGIC_POINTS
for enemy in self.enemy_list:
DAMAGE = random.randint(POWER//2, POWER)
self.damage_points.add(
attackitems.HealthPoints(DAMAGE, enemy.rect.topright))
enemy.health -= DAMAGE
posx = enemy.rect.x - 32
posy = enemy.rect.y - 64
fire_sprite = attack.Fire(posx, posy)
self.attack_animations.add(fire_sprite)
if enemy.health <= 0:
enemy.kill()
self.arrow.remove_pos(enemy)
else:
enemy.enter_knock_back_state()
self.enemy_list = [enemy for enemy in self.enemy_list if enemy.health > 0]
self.enemy_index = 0
self.arrow.index = 0
self.arrow.state = 'invisible'
self.set_timer_to_current_time()
def cast_cure(self):
"""
Cast cure spell on player.
"""
self.state = c.CURE_SPELL
HEAL_AMOUNT = self.inventory['Cure']['power']
MAGIC_POINTS = self.inventory['Cure']['magic points']
self.player.healing = True
self.set_timer_to_current_time()
self.arrow.state = 'invisible'
self.enemy_index = 0
self.damage_points.add(
attackitems.HealthPoints(HEAL_AMOUNT, self.player.rect.topright, False))
self.player_healed(HEAL_AMOUNT, MAGIC_POINTS)
self.info_box.state = c.DRINK_HEALING_POTION
self.notify(c.POWERUP)
def enter_select_enemy_state(self):
"""
Transition battle into the select enemy state.
"""
self.state = self.arrow.state = c.SELECT_ENEMY
self.arrow.index = 0
def enter_select_item_state(self):
"""
Transition battle into the select item state.
"""
self.state = self.info_box.state = c.SELECT_ITEM
self.arrow.become_select_item_state()
def enter_select_magic_state(self):
"""
Transition battle into the select magic state.
"""
self.state = self.info_box.state = c.SELECT_MAGIC
self.arrow.become_select_magic_state()
def try_to_run_away(self):
"""
Transition battle into the run away state.
"""
self.run_away = True
self.arrow.state = 'invisible'
self.enemy_index = 0
self.enter_enemy_attack_state()
def enter_enemy_attack_state(self):
"""
Transition battle into the Enemy attack state.
"""
self.state = self.info_box.state = c.ENEMY_ATTACK
enemy = self.enemy_list[self.enemy_index]
enemy.enter_enemy_attack_state()
def enter_player_attack_state(self):
"""
Transition battle into the Player attack state.
"""
self.state = self.info_box.state = c.PLAYER_ATTACK
enemy_to_attack = self.enemies_to_attack.pop(0)
if enemy_to_attack in self.enemy_list:
self.player.enter_attack_state(enemy_to_attack)
else:
if self.enemy_list:
self.player.enter_attack_state(self.enemy_list[0])
else:
self.enter_battle_won_state()
self.arrow.state = 'invisible'
def get_enemy_to_attack(self):
"""
Get enemy for player to attack by arrow position.
"""
enemy_posx = self.arrow.rect.x + 60
enemy_posy = self.arrow.rect.y - 20
enemy_pos = (enemy_posx, enemy_posy)
enemy_to_attack = None
for enemy in self.enemy_list:
if enemy.rect.topleft == enemy_pos:
enemy_to_attack = enemy
return enemy_to_attack
def enter_drink_healing_potion_state(self):
"""
Transition battle into the Drink Healing Potion state.
"""
self.state = self.info_box.state = c.DRINK_HEALING_POTION
self.player.healing = True
self.set_timer_to_current_time()
self.arrow.state = 'invisible'
self.enemy_index = 0
self.damage_points.add(
attackitems.HealthPoints(30,
self.player.rect.topright,
False))
self.player_healed(30)
self.notify(c.POWERUP)
def enter_drink_ether_potion_state(self):
"""
Transition battle into the Drink Ether Potion state.
"""
self.state = self.info_box.state = c.DRINK_ETHER_POTION
self.player.healing = True
self.arrow.state = 'invisible'
self.enemy_index = 0
self.damage_points.add(
attackitems.HealthPoints(30,
self.player.rect.topright,
False,
True))
self.magic_boost(30)
self.set_timer_to_current_time()
self.notify(c.POWERUP)
def enter_select_action_state(self):
"""
Transition battle into the select action state
"""
self.state = self.info_box.state = c.SELECT_ACTION
self.arrow.index = 0
self.arrow.state = self.state
def enter_player_damaged_state(self):
"""
Transition battle into the player damaged state.
"""
self.state = self.info_box.state = c.PLAYER_DAMAGED
if self.enemy_index > len(self.enemy_list) - 1:
self.enemy_index = 0
enemy = self.enemy_list[self.enemy_index]
player_damage = enemy.calculate_hit(self.inventory['equipped armor'],
self.inventory)
self.damage_points.add(
attackitems.HealthPoints(player_damage,
self.player.rect.topright))
self.info_box.set_player_damage(player_damage)
self.set_timer_to_current_time()
self.player_damaged(player_damage)
if player_damage:
sfx_num = random.randint(1,3)
self.notify('punch{}'.format(sfx_num))
self.player.damaged = True
self.player.enter_knock_back_state()
else:
self.notify(c.MISS)
def enter_enemy_damaged_state(self):
"""
Transition battle into the enemy damaged state.
"""
self.state = self.info_box.state = c.ENEMY_DAMAGED
enemy_damage = self.player.calculate_hit()
self.damage_points.add(
attackitems.HealthPoints(enemy_damage,
self.player.attacked_enemy.rect.topright))
self.info_box.set_enemy_damage(enemy_damage)
self.arrow.index = 0
self.attack_enemy(enemy_damage)
self.set_timer_to_current_time()
def switch_enemy(self):
"""
Switch which enemy the player is attacking.
"""
if self.enemy_index < len(self.enemy_list) - 1:
self.enemy_index += 1
self.enter_enemy_attack_state()
def enter_run_away_state(self):
"""
Transition battle into the run away state.
"""
self.state = self.info_box.state = c.RUN_AWAY
self.arrow.state = 'invisible'
self.player.state = c.RUN_AWAY
self.set_timer_to_current_time()
self.notify(c.RUN_AWAY)
def enter_battle_won_state(self):
"""
Transition battle into the battle won state.
"""
self.notify(c.BATTLE_WON)
self.state = self.info_box.state = c.BATTLE_WON
self.player.state = c.VICTORY_DANCE
self.set_timer_to_current_time()
def enter_show_gold_state(self):
"""
Transition battle into the show gold state.
"""
self.inventory['GOLD']['quantity'] += self.new_gold
self.state = self.info_box.state = c.SHOW_GOLD
self.set_timer_to_current_time()
def enter_show_experience_state(self):
"""
Transition battle into the show experience state.
"""
self.state = self.info_box.state = c.SHOW_EXPERIENCE
self.set_timer_to_current_time()
def enter_level_up_state(self):
"""
Transition battle into the LEVEL UP state.
"""
self.state = self.info_box.state = c.LEVEL_UP
self.info_box.reset_level_up_message()
self.set_timer_to_current_time()
def enter_two_actions_per_turn_state(self):
self.state = self.info_box.state = c.TWO_ACTIONS
self.set_timer_to_current_time()
def execute_player_actions(self):
"""
Execute the player actions.
"""
if self.player_level < 3:
if self.player_actions:
enter_state = self.player_action_dict[self.player_actions[0]]
enter_state()
self.player_actions.pop(0)
else:
if len(self.player_actions) == 2:
enter_state = self.player_action_dict[self.player_actions[0]]
enter_state()
self.player_actions.pop(0)
self.action_selected = False
else:
if self.action_selected:
self.enter_select_action_state()
self.action_selected = False
| justinmeister/The-Stolen-Crown-RPG | data/states/battle.py | Python | mit | 29,392 | [
"BLAST"
] | 4a57c56754584952e8e550a53c3b5fb502a10596b60a2ec1f2ba8cdd6b48583b |
#!/usr/bin/env python
#
# This example demonstrates the creation of multiple actors and the
# manipulation of their properties and transformations. It is a
# derivative of Cone.py, see that example for more information.
#
import vtk
import time
#
# Next we create an instance of vtkConeSource and set some of its
# properties. The instance of vtkConeSource "cone" is part of a visualization
# pipeline (it is a source process object); it produces data (output type is
# vtkPolyData) which other filters may process.
#
cone = vtk.vtkConeSource ()
cone.SetHeight( 3.0 )
cone.SetRadius( 1.0 )
cone.SetResolution( 10 )
#
# In this example we terminate the pipeline with a mapper process object.
# (Intermediate filters such as vtkShrinkPolyData could be inserted in
# between the source and the mapper.) We create an instance of
# vtkPolyDataMapper to map the polygonal data into graphics primitives. We
# connect the output of the cone souece to the input of this mapper.
#
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
#
# Create an actor to represent the first cone. The actor's properties are
# modified to give it different surface properties. By default, an actor
# is create with a property so the GetProperty() method can be used.
#
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.GetProperty().SetColor(0.2, 0.63, 0.79)
coneActor.GetProperty().SetDiffuse(0.7)
coneActor.GetProperty().SetSpecular(0.4)
coneActor.GetProperty().SetSpecularPower(20)
#
# Create a property and directly manipulate it. Assign it to the
# second actor.
#
property = vtk.vtkProperty()
property.SetColor(1.0, 0.3882, 0.2784)
property.SetDiffuse(0.7)
property.SetSpecular(0.4)
property.SetSpecularPower(20)
#
# Create a second actor and a property. The property is directly
# manipulated and then assigned to the actor. In this way, a single
# property can be shared among many actors. Note also that we use the
# same mapper as the first actor did. This way we avoid duplicating
# geometry, which may save lots of memory if the geoemtry is large.
coneActor2 = vtk.vtkActor()
coneActor2.SetMapper(coneMapper)
# coneActor2.GetProperty().SetColor(0.2, 0.63, 0.79)
coneActor2.SetProperty(property)
coneActor2.SetPosition(0, 2, 0)
#
# Create the Renderer and assign actors to it. A renderer is like a
# viewport. It is part or all of a window on the screen and it is responsible
# for drawing the actors it has. We also set the background color here.
#
ren1 = vtk.vtkRenderer()
ren1.AddActor(coneActor)
ren1.AddActor(coneActor2)
ren1.SetBackground(0.1, 0.2, 0.4)
#
# Finally we create the render window which will show up on the screen
# We put our renderer into the render window using AddRenderer. We also
# set the size to be 300 pixels by 300.
#
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetSize(300, 300)
#
# Now we loop over 360 degreeees and render the cone each time.
#
# for i in range(0,360):
# time.sleep(0.03)
# renWin.Render()
# ren1.GetActiveCamera().Azimuth( 1 )
| CMUSV-VisTrails/WorkflowRecommendation | examples/vtk_examples/Tutorial/Step4/Cone4.py | Python | bsd-3-clause | 3,069 | [
"VTK"
] | b37d09f44b78a0716e6026a249beb711f37e28f9316bb815668dc45725bb7808 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module contains the main object used to identify the coordination environments in a given structure.
If you use this module, please cite the following:
David Waroquiers, Xavier Gonze, Gian-Marco Rignanese, Cathrin Welker-Nieuwoudt, Frank Rosowski,
Michael Goebel, Stephan Schenk, Peter Degelmann, Rute Andre, Robert Glaum, and Geoffroy Hautier,
"Statistical analysis of coordination environments in oxides",
Chem. Mater., 2017, 29 (19), pp 8346–8360,
DOI: 10.1021/acs.chemmater.7b02766
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
import itertools
import logging
import time
from collections import OrderedDict
from numpy.linalg import svd
from numpy.linalg import norm
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.bond_valence import BVAnalyzer
import numpy as np
from random import shuffle
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import Plane
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import \
collinear, separation_in_list
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import \
sort_separation, sort_separation_tuple
from pymatgen.analysis.chemenv.utils.defs_utils import chemenv_citations
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import \
AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import \
EXPLICIT_PERMUTATIONS
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import \
SEPARATION_PLANE
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import \
ChemicalEnvironments
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import \
StructureEnvironments
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import \
LightStructureEnvironments
from pymatgen.analysis.chemenv.coordination_environments.voronoi import \
DetailedVoronoiContainer
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import \
MultiWeightsChemenvStrategy
debug = False
DIST_TOLERANCES = [0.02, 0.05, 0.1, 0.2, 0.3]
class AbstractGeometry(object):
"""
Class used to describe a geometry (perfect or distorted)
"""
def __init__(self, central_site=None, bare_coords=None,
centering_type='standard',
include_central_site_in_centroid=False,
optimization=None):
"""
Constructor for the abstract geometry
:param central_site: Coordinates of the central site
:param bare_coords: Coordinates of the neighbors of the central site
:param centering_type: How to center the abstract geometry
:param include_central_site_in_centroid: When the centering is on the centroid, the central site is included
if this parameter is set to True.
:raise: ValueError if the parameters are not consistent
"""
bcoords = np.array(bare_coords)
self.bare_centre = np.array(central_site)
self.bare_points_without_centre = bcoords
self.bare_points_with_centre = np.array(central_site)
self.bare_points_with_centre = np.concatenate(([self.bare_points_with_centre], bcoords))
self.centroid_without_centre = np.mean(self.bare_points_without_centre,
axis=0)
self.centroid_with_centre = np.mean(self.bare_points_with_centre,
axis=0)
self._points_wcs_csc = self.bare_points_with_centre - self.bare_centre
self._points_wocs_csc = self.bare_points_without_centre - self.bare_centre
self._points_wcs_ctwcc = self.bare_points_with_centre - self.centroid_with_centre
self._points_wocs_ctwcc = self.bare_points_without_centre - self.centroid_with_centre
self._points_wcs_ctwocc = self.bare_points_with_centre - self.centroid_without_centre
self._points_wocs_ctwocc = self.bare_points_without_centre - self.centroid_without_centre
self.centering_type = centering_type
self.include_central_site_in_centroid = include_central_site_in_centroid
self.bare_central_site = np.array(central_site)
if centering_type == 'standard':
if len(bare_coords) < 5:
if include_central_site_in_centroid:
raise ValueError(
"The center is the central site, no calculation of the centroid, "
"variable include_central_site_in_centroid should be set to False")
if central_site is None:
raise ValueError(
"Centering_type is central_site, the central site should be given")
self.centre = np.array(central_site)
else:
total = np.sum(bcoords, axis=0)
if include_central_site_in_centroid:
if central_site is None:
raise ValueError(
"The centroid includes the central site but no central site is given")
total += self.bare_centre
self.centre = total / (np.float(len(bare_coords)) + 1.0)
else:
self.centre = total / np.float(len(bare_coords))
elif centering_type == 'central_site':
if include_central_site_in_centroid:
raise ValueError(
"The center is the central site, no calculation of the centroid, "
"variable include_central_site_in_centroid should be set to False")
if central_site is None:
raise ValueError(
"Centering_type is central_site, the central site should be given")
self.centre = np.array(central_site)
elif centering_type == 'centroid':
total = np.sum(bcoords, axis=0)
if include_central_site_in_centroid:
if central_site is None:
raise ValueError(
"The centroid includes the central site but no central site is given")
total += self.bare_centre
self.centre = total / (np.float(len(bare_coords)) + 1.0)
else:
self.centre = total / np.float(len(bare_coords))
self._bare_coords = self.bare_points_without_centre
self._coords = self._bare_coords - self.centre
self.central_site = self.bare_central_site - self.centre
self.coords = self._coords
self.bare_coords = self._bare_coords
def __str__(self):
"""
String representation of the AbstractGeometry
:return: String representation of the AbstractGeometry
"""
outs = [
'\nAbstract Geometry with {n} points :'.format(n=len(self.coords))]
for pp in self.coords:
outs.append(' {pp}'.format(pp=pp))
if self.centering_type == 'standard':
if self.include_central_site_in_centroid:
outs.append(
'Points are referenced to the central site for coordination numbers < 5'
' and to the centroid (calculated with the central site) for coordination'
' numbers >= 5 : {c}\n'.format(c=self.centre))
else:
outs.append(
'Points are referenced to the central site for coordination numbers < 5'
' and to the centroid (calculated without the central site) for coordination'
' numbers >= 5 : {c}\n'.format(c=self.centre))
elif self.centering_type == 'central_site':
outs.append(
'Points are referenced to the central site : {c}\n'.format(
c=self.centre))
elif self.centering_type == 'centroid':
if self.include_central_site_in_centroid:
outs.append('Points are referenced to the centroid'
' (calculated with the central site) :\n {c}\n'.format(
c=self.centre))
else:
outs.append('Points are referenced to the centroid'
' (calculated without the central site) :\n {c}\n'.format(
c=self.centre))
return '\n'.join(outs)
@classmethod
def from_cg(cls, cg, centering_type='standard',
include_central_site_in_centroid=False):
central_site = cg.get_central_site()
bare_coords = [np.array(pt, np.float) for pt in cg.points]
return cls(central_site=central_site, bare_coords=bare_coords,
centering_type=centering_type,
include_central_site_in_centroid=include_central_site_in_centroid)
def points_wcs_csc(self, permutation=None):
if permutation is None:
return self._points_wcs_csc
return np.concatenate((self._points_wcs_csc[0:1], self._points_wocs_csc.take(permutation, axis=0)))
def points_wocs_csc(self, permutation=None):
if permutation is None:
return self._points_wocs_csc
return self._points_wocs_csc.take(permutation, axis=0)
def points_wcs_ctwcc(self, permutation=None):
if permutation is None:
return self._points_wcs_ctwcc
return np.concatenate((self._points_wcs_ctwcc[0:1], self._points_wocs_ctwcc.take(permutation, axis=0)))
def points_wocs_ctwcc(self, permutation=None):
if permutation is None:
return self._points_wocs_ctwcc
return self._points_wocs_ctwcc.take(permutation, axis=0)
def points_wcs_ctwocc(self, permutation=None):
if permutation is None:
return self._points_wcs_ctwocc
return np.concatenate((self._points_wcs_ctwocc[0:1], self._points_wocs_ctwocc.take(permutation, axis=0)))
def points_wocs_ctwocc(self, permutation=None):
if permutation is None:
return self._points_wocs_ctwocc
return self._points_wocs_ctwocc.take(permutation, axis=0)
@property
def cn(self):
return len(self.coords)
@property
def coordination_number(self):
return len(self.coords)
def symmetry_measure(points_distorted, points_perfect):
"""
Computes the continuous symmetry measure of the (distorted) set of points "points_distorted" with respect to the
(perfect) set of points "points_perfect".
:param points_distorted: List of points describing a given (distorted) polyhedron for which the symmetry measure
has to be computed with respect to the model polyhedron described by the list of points
"points_perfect".
:param points_perfect: List of "perfect" points describing a given model polyhedron.
:return: The continuous symmetry measure of the distorted polyhedron with respect to the perfect polyhedron
"""
# When there is only one point, the symmetry measure is 0.0 by definition
if len(points_distorted) == 1:
return {'symmetry_measure': 0.0, 'scaling_factor': None, 'rotation_matrix': None}
# Find the rotation matrix that aligns the distorted points to the perfect points in a least-square sense.
rot = find_rotation(points_distorted=points_distorted,
points_perfect=points_perfect)
# Find the scaling factor between the distorted points and the perfect points in a least-square sense.
scaling_factor, rotated_coords, points_perfect = find_scaling_factor(points_distorted=points_distorted,
points_perfect=points_perfect,
rot=rot)
# Compute the continuous symmetry measure [see Eq. 1 in Pinsky et al., Inorganic Chemistry 37, 5575 (1998)]
rotated_coords = scaling_factor * rotated_coords
diff = points_perfect - rotated_coords
num = np.tensordot(diff, diff)
denom = np.tensordot(points_perfect, points_perfect)
return {'symmetry_measure': num / denom * 100.0, 'scaling_factor': scaling_factor, 'rotation_matrix': rot}
def find_rotation(points_distorted, points_perfect):
"""
This finds the rotation matrix that aligns the (distorted) set of points "points_distorted" with respect to the
(perfect) set of points "points_perfect" in a least-square sense.
:param points_distorted: List of points describing a given (distorted) polyhedron for which the rotation that
aligns these points in a least-square sense to the set of perfect points "points_perfect"
:param points_perfect: List of "perfect" points describing a given model polyhedron.
:return: The rotation matrix
"""
H = np.matmul(points_distorted.T, points_perfect)
[U, S, Vt] = svd(H)
rot = np.matmul(Vt.T, U.T)
return rot
def find_scaling_factor(points_distorted, points_perfect, rot):
"""
This finds the scaling factor between the (distorted) set of points "points_distorted" and the
(perfect) set of points "points_perfect" in a least-square sense.
:param points_distorted: List of points describing a given (distorted) polyhedron for which the scaling factor has
to be obtained.
:param points_perfect: List of "perfect" points describing a given model polyhedron.
:param rot: The rotation matrix
:return: The scaling factor between the two structures and the rotated set of (distorted) points.
"""
rotated_coords = np.matmul(rot, points_distorted.T).T
num = np.tensordot(rotated_coords, points_perfect)
denom = np.tensordot(rotated_coords, rotated_coords)
return num / denom, rotated_coords, points_perfect
class LocalGeometryFinder(object):
"""
Main class used to find the local environments in a structure
"""
DEFAULT_BVA_DISTANCE_SCALE_FACTOR = 1.0
BVA_DISTANCE_SCALE_FACTORS = {'experimental': 1.0, 'GGA_relaxed': 1.015,
'LDA_relaxed': 0.995}
DEFAULT_SPG_ANALYZER_OPTIONS = {'symprec': 1e-3, 'angle_tolerance': 5}
STRUCTURE_REFINEMENT_NONE = 'none'
STRUCTURE_REFINEMENT_REFINED = 'refined'
STRUCTURE_REFINEMENT_SYMMETRIZED = 'symmetrized'
DEFAULT_STRATEGY = MultiWeightsChemenvStrategy.stats_article_weights_parameters()
PRESETS = {'DEFAULT': {'maximum_distance_factor': 2.0,
'minimum_angle_factor': 0.05,
'voronoi_normalized_distance_tolerance': 0.05,
'voronoi_normalized_angle_tolerance': 0.03,
'optimization': 2}}
def __init__(self, permutations_safe_override=False,
plane_ordering_override=True, debug_level=None,
plane_safe_permutations=False,
only_symbols=None):
"""
Constructor for the LocalGeometryFinder, initializes the list of coordination geometries
:param permutations_safe_override: If set to True, all permutations are tested (very time-consuming for large
coordination numbers!)
:param plane_ordering_override: If set to False, the ordering of the points in the plane is disabled
"""
self.allcg = AllCoordinationGeometries(
permutations_safe_override=permutations_safe_override,
only_symbols=only_symbols)
self.permutations_safe_override = permutations_safe_override
self.plane_ordering_override = plane_ordering_override
self.plane_safe_permutations = plane_safe_permutations
self.setup_parameters(centering_type='centroid',
include_central_site_in_centroid=True,
bva_distance_scale_factor=None,
structure_refinement=self.STRUCTURE_REFINEMENT_NONE)
print(chemenv_citations())
def setup_parameters(self, centering_type='standard',
include_central_site_in_centroid=False,
bva_distance_scale_factor=None,
structure_refinement=STRUCTURE_REFINEMENT_REFINED,
spg_analyzer_options=None):
"""
Setup of the parameters for the coordination geometry finder. A reference point for the geometries has to be
chosen. This can be the centroid of the structure (including or excluding the atom for which the coordination
geometry is looked for) or the atom itself. In the 'standard' centering_type, the reference point is the central
atom for coordination numbers 1, 2, 3 and 4 and the centroid for coordination numbers > 4.
:param centering_type: Type of the reference point (centering) 'standard', 'centroid' or 'central_site'
:param include_central_site_in_centroid: In case centering_type is 'centroid', the central site is included if
this value is set to True.
:param bva_distance_scale_factor: Scaling factor for the bond valence analyzer (this might be different whether
the structure is an experimental one, an LDA or a GGA relaxed one, or any
other relaxation scheme (where under- or over-estimation of bond lengths
is known).
:param structure_refinement: Refinement of the structure. Can be "none", "refined" or "symmetrized".
:param spg_analyzer_options: Options for the SpaceGroupAnalyzer (dictionary specifying "symprec"
and "angle_tolerance". See pymatgen's SpaceGroupAnalyzer for more information.
"""
self.centering_type = centering_type
self.include_central_site_in_centroid = include_central_site_in_centroid
if bva_distance_scale_factor is not None:
self.bva_distance_scale_factor = bva_distance_scale_factor
else:
self.bva_distance_scale_factor = self.DEFAULT_BVA_DISTANCE_SCALE_FACTOR
self.structure_refinement = structure_refinement
if spg_analyzer_options is None:
self.spg_analyzer_options = self.DEFAULT_SPG_ANALYZER_OPTIONS
else:
self.spg_analyzer_options = spg_analyzer_options
def setup_parameter(self, parameter, value):
"""
Setup of one specific parameter to the given value. The other parameters are unchanged. See setup_parameters
method for the list of possible parameters
:param parameter: Parameter to setup/update
:param value: Value of the parameter
"""
self.__dict__[parameter] = value
def setup_structure(self, structure):
"""
Sets up the structure for which the coordination geometries have to be identified. The structure is analyzed
with the space group analyzer and a refined structure is used
:param structure: A pymatgen Structure
:param
"""
self.initial_structure = structure.copy()
if self.structure_refinement == self.STRUCTURE_REFINEMENT_NONE:
self.structure = structure.copy()
self.spg_analyzer = None
self.symmetrized_structure = None
else:
self.spg_analyzer = SpacegroupAnalyzer(self.initial_structure,
symprec=
self.spg_analyzer_options[
'symprec'],
angle_tolerance=
self.spg_analyzer_options[
'angle_tolerance'])
if self.structure_refinement == self.STRUCTURE_REFINEMENT_REFINED:
self.structure = self.spg_analyzer.get_refined_structure()
self.symmetrized_structure = None
elif self.structure_refinement == self.STRUCTURE_REFINEMENT_SYMMETRIZED:
self.structure = self.spg_analyzer.get_refined_structure()
self.spg_analyzer_refined = SpacegroupAnalyzer(self.structure,
symprec=
self.spg_analyzer_options[
'symprec'],
angle_tolerance=
self.spg_analyzer_options
[
'angle_tolerance'])
self.symmetrized_structure = self.spg_analyzer_refined.get_symmetrized_structure()
def get_structure(self):
"""
Returns the pymatgen Structure that has been setup for the identification of geometries (the initial one
might have been refined/symmetrized using the SpaceGroupAnalyzer).
:return: The pymatgen Structure that has been setup for the identification of geometries (the initial one
might have been refined/symmetrized using the SpaceGroupAnalyzer).
"""
return self.structure
def set_structure(self, lattice, species, coords, coords_are_cartesian):
"""
Sets up the pymatgen structure for which the coordination geometries have to be identified starting from the
lattice, the species and the coordinates
:param lattice: The lattice of the structure
:param species: The species on the sites
:param coords: The coordinates of the sites
:param coords_are_cartesian: If set to True, the coordinates are given in cartesian coordinates
"""
self.setup_structure(
Structure(lattice, species, coords, coords_are_cartesian))
def compute_coordination_environments(self, structure, indices=None, only_cations=True, strategy=DEFAULT_STRATEGY,
valences='bond-valence-analysis', initial_structure_environments=None):
self.setup_structure(structure=structure)
if valences == 'bond-valence-analysis':
bva = BVAnalyzer()
try:
vals = bva.get_valences(structure=structure)
except ValueError:
vals = 'undefined'
else:
if valences == 'undefined':
vals = valences
else:
if len(valences) != len(structure):
raise ValueError('Valences do not match the number of sites in the structure')
vals = valences
# TODO: add something to compute only the neighbors sets needed for the strategy.
se = self.compute_structure_environments(only_cations=only_cations, only_indices=indices, valences=vals,
initial_structure_environments=initial_structure_environments)
lse = LightStructureEnvironments.from_structure_environments(strategy=strategy, structure_environments=se)
return lse.coordination_environments
def compute_structure_environments(self,
excluded_atoms=None,
only_atoms=None,
only_cations=True,
only_indices=None,
maximum_distance_factor=PRESETS['DEFAULT']['maximum_distance_factor'],
minimum_angle_factor=PRESETS['DEFAULT']['minimum_angle_factor'],
max_cn=None,
min_cn=None,
only_symbols=None,
valences='undefined',
additional_conditions=None,
info=None,
timelimit=None,
initial_structure_environments=None,
get_from_hints=False,
voronoi_normalized_distance_tolerance=PRESETS['DEFAULT']
['voronoi_normalized_distance_tolerance'],
voronoi_normalized_angle_tolerance=PRESETS['DEFAULT']
['voronoi_normalized_angle_tolerance'],
recompute=None,
optimization=PRESETS['DEFAULT']['optimization']):
"""
Computes and returns the StructureEnvironments object containing all the information about the coordination
environments in the structure
:param excluded_atoms: Atoms for which the coordination geometries does not have to be identified
:param only_atoms: If not set to None, atoms for which the coordination geometries have to be identified
:param only_cations: If set to True, will only compute environments for cations
:param only_indices: If not set to None, will only compute environments the atoms of the given indices
:param maximum_distance_factor: If not set to None, neighbors beyond
maximum_distance_factor*closest_neighbor_distance are not considered
:param minimum_angle_factor: If not set to None, neighbors for which the angle is lower than
minimum_angle_factor*largest_angle_neighbor are not considered
:param max_cn: maximum coordination number to be considered
:param min_cn: minimum coordination number to be considered
:param only_symbols: if not set to None, consider only coordination environments with the given symbols
:param valences: valences of the atoms
:param additional_conditions: additional conditions to be considered in the bonds (example : only bonds
between cation and anion
:param info: additional info about the calculation
:param timelimit: time limit (in secs) after which the calculation of the StructureEnvironments object stops
:param initial_structure_environments: initial StructureEnvironments object (most probably incomplete)
:param get_from_hints: whether to add neighbors sets from "hints" (e.g. capped environment => test the
neighbors without the cap)
:param voronoi_normalized_distance_tolerance: tolerance for the normalized distance used to distinguish
neighbors sets
:param voronoi_normalized_angle_tolerance: tolerance for the normalized angle used to distinguish
neighbors sets
:param recompute: whether to recompute the sites already computed (when initial_structure_environments
is not None)
:param optimization: optimization algorithm
:return: The StructureEnvironments object containing all the information about the coordination
environments in the structure
"""
time_init = time.clock()
if info is None:
info = {}
info.update({'local_geometry_finder':
{'parameters':
{'centering_type': self.centering_type,
'include_central_site_in_centroid': self.include_central_site_in_centroid,
'structure_refinement': self.structure_refinement,
'spg_analyzer_options': self.spg_analyzer_options
}
}
})
if only_symbols is not None:
self.allcg = AllCoordinationGeometries(
permutations_safe_override=self.permutations_safe_override,
only_symbols=only_symbols)
self.valences = valences
# Get a list of indices of unequivalent sites from the initial structure
self.equivalent_sites = [[site] for site in self.structure]
self.struct_sites_to_irreducible_site_list_map = list(
range(len(self.structure)))
self.sites_map = list(range(len(self.structure)))
indices = list(range(len(self.structure)))
# Get list of unequivalent sites with valence >= 0
if only_cations and self.valences != 'undefined':
sites_indices = [isite for isite in indices if
self.valences[isite] >= 0]
else:
sites_indices = [isite for isite in indices]
# Include atoms that are in the list of "only_atoms" if it is provided
if only_atoms is not None:
sites_indices = [isite for isite in sites_indices
if any([at in [sp.symbol for sp in self.structure[
isite].species_and_occu]
for at in only_atoms])]
# Exclude atoms that are in the list of excluded atoms
if excluded_atoms:
sites_indices = [isite for isite in sites_indices
if not any([at in [sp.symbol for sp in
self.structure[
isite].species_and_occu]
for at in excluded_atoms])]
if only_indices is not None:
sites_indices = [isite for isite in indices if
isite in only_indices]
# Get the VoronoiContainer for the sites defined by their indices (sites_indices)
logging.info('Getting DetailedVoronoiContainer')
if voronoi_normalized_distance_tolerance is None:
normalized_distance_tolerance = DetailedVoronoiContainer.default_normalized_distance_tolerance
else:
normalized_distance_tolerance = voronoi_normalized_distance_tolerance
if voronoi_normalized_angle_tolerance is None:
normalized_angle_tolerance = DetailedVoronoiContainer.default_normalized_angle_tolerance
else:
normalized_angle_tolerance = voronoi_normalized_angle_tolerance
self.detailed_voronoi = DetailedVoronoiContainer(self.structure,
isites=sites_indices,
valences=self.valences,
maximum_distance_factor=maximum_distance_factor,
minimum_angle_factor=minimum_angle_factor,
additional_conditions=additional_conditions,
normalized_distance_tolerance=normalized_distance_tolerance,
normalized_angle_tolerance=normalized_angle_tolerance)
logging.info('DetailedVoronoiContainer has been set up')
# Initialize the StructureEnvironments object (either from initial_structure_environments or from scratch)
if initial_structure_environments is not None:
se = initial_structure_environments
if se.structure != self.structure:
raise ValueError('Structure is not the same in initial_structure_environments')
if se.voronoi != self.detailed_voronoi:
if self.detailed_voronoi.is_close_to(se.voronoi):
self.detailed_voronoi = se.voronoi
else:
raise ValueError('Detailed Voronoi is not the same in initial_structure_environments')
se.info = info
else:
se = StructureEnvironments(voronoi=self.detailed_voronoi, valences=self.valences,
sites_map=self.sites_map, equivalent_sites=self.equivalent_sites,
ce_list=[None]*len(self.structure), structure=self.structure,
info=info)
# Set up the coordination numbers that have to be computed based on min_cn, max_cn and possibly the settings
# for an update (argument "recompute") of an existing StructureEnvironments
if min_cn is None:
min_cn = 1
if max_cn is None:
max_cn = 13
all_cns = range(min_cn, max_cn+1)
do_recompute = False
if recompute is not None:
if 'cns' in recompute:
cns_to_recompute = recompute['cns']
all_cns = list(set(all_cns).intersection(cns_to_recompute))
do_recompute = True
# Variables used for checking timelimit
max_time_one_site = 0.0
breakit = False
if optimization > 0:
self.detailed_voronoi.local_planes = [None]*len(self.structure)
self.detailed_voronoi.separations = [None]*len(self.structure)
# Loop on all the sites
for isite in range(len(self.structure)):
if isite not in sites_indices:
logging.info(' ... in site #{:d}/{:d} ({}) : '
'skipped'.format(isite, len(self.structure),
self.structure[isite].species_string))
continue
if breakit:
logging.info(' ... in site #{:d}/{:d} ({}) : '
'skipped (timelimit)'.format(isite, len(self.structure),
self.structure[isite].species_string))
continue
logging.info(' ... in site #{:d}/{:d} ({})'.format(isite, len(self.structure),
self.structure[isite].species_string))
t1 = time.clock()
if optimization > 0:
self.detailed_voronoi.local_planes[isite] = OrderedDict()
self.detailed_voronoi.separations[isite] = {}
se.init_neighbors_sets(isite=isite, additional_conditions=additional_conditions, valences=valences)
to_add_from_hints = []
nb_sets_info = {}
for cn, nb_sets in se.neighbors_sets[isite].items():
if cn not in all_cns:
continue
for inb_set, nb_set in enumerate(nb_sets):
logging.debug(' ... getting environments for nb_set ({:d}, {:d})'.format(cn, inb_set))
tnbset1 = time.clock()
ce = self.update_nb_set_environments(se=se, isite=isite, cn=cn, inb_set=inb_set, nb_set=nb_set,
recompute=do_recompute, optimization=optimization)
tnbset2 = time.clock()
if cn not in nb_sets_info:
nb_sets_info[cn] = {}
nb_sets_info[cn][inb_set] = {'time': tnbset2 - tnbset1}
if get_from_hints:
for cg_symbol, cg_dict in ce:
cg = self.allcg[cg_symbol]
# Get possibly missing neighbors sets
if cg.neighbors_sets_hints is None:
continue
logging.debug(' ... getting hints from cg with mp_symbol "{}" ...'.format(cg_symbol))
hints_info = {'csm': cg_dict['symmetry_measure'],
'nb_set': nb_set,
'permutation': cg_dict['permutation']}
for nb_sets_hints in cg.neighbors_sets_hints:
suggested_nb_set_voronoi_indices = nb_sets_hints.hints(hints_info)
for inew, new_nb_set_voronoi_indices in enumerate(suggested_nb_set_voronoi_indices):
logging.debug(' hint # {:d}'.format(inew))
new_nb_set = se.NeighborsSet(structure=se.structure, isite=isite,
detailed_voronoi=se.voronoi,
site_voronoi_indices=new_nb_set_voronoi_indices,
sources={'origin': 'nb_set_hints',
'hints_type': nb_sets_hints.hints_type,
'suggestion_index': inew,
'cn_map_source': [cn, inb_set],
'cg_source_symbol': cg_symbol})
cn_new_nb_set = len(new_nb_set)
if max_cn is not None and cn_new_nb_set > max_cn:
continue
if min_cn is not None and cn_new_nb_set < min_cn:
continue
if new_nb_set in [ta['new_nb_set'] for ta in to_add_from_hints]:
has_nb_set = True
elif not cn_new_nb_set in se.neighbors_sets[isite]:
has_nb_set = False
else:
has_nb_set = new_nb_set in se.neighbors_sets[isite][cn_new_nb_set]
if not has_nb_set:
to_add_from_hints.append({'isite': isite,
'new_nb_set': new_nb_set,
'cn_new_nb_set': cn_new_nb_set})
logging.debug(' => to be computed'.format(inew))
else:
logging.debug(' => already present'.format(inew))
logging.debug(' ... getting environments for nb_sets added from hints')
for missing_nb_set_to_add in to_add_from_hints:
se.add_neighbors_set(isite=isite, nb_set=missing_nb_set_to_add['new_nb_set'])
for missing_nb_set_to_add in to_add_from_hints:
isite_new_nb_set = missing_nb_set_to_add['isite']
cn_new_nb_set = missing_nb_set_to_add['cn_new_nb_set']
new_nb_set = missing_nb_set_to_add['new_nb_set']
inew_nb_set = se.neighbors_sets[isite_new_nb_set][cn_new_nb_set].index(new_nb_set)
logging.debug(' ... getting environments for nb_set ({:d}, {:d}) - '
'from hints'.format(cn_new_nb_set, inew_nb_set))
tnbset1 = time.clock()
self.update_nb_set_environments(se=se,
isite=isite_new_nb_set,
cn=cn_new_nb_set,
inb_set=inew_nb_set,
nb_set=new_nb_set,
optimization=optimization)
tnbset2 = time.clock()
if cn not in nb_sets_info:
nb_sets_info[cn] = {}
nb_sets_info[cn][inew_nb_set] = {'time': tnbset2 - tnbset1}
t2 = time.clock()
se.update_site_info(isite=isite, info_dict={'time': t2 - t1, 'nb_sets_info': nb_sets_info})
if timelimit is not None:
time_elapsed = t2 - time_init
time_left = timelimit - time_elapsed
if time_left < 2.0 * max_time_one_site:
breakit = True
max_time_one_site = max(max_time_one_site, t2 - t1)
logging.info(' ... computed in {:.2f} seconds'.format(t2 - t1))
time_end = time.clock()
logging.info(' ... compute_structure_environments ended in {:.2f} seconds'.format(time_end-time_init))
return se
def update_nb_set_environments(self, se, isite, cn, inb_set, nb_set, recompute=False, optimization=None):
ce = se.get_coordination_environments(isite=isite, cn=cn, nb_set=nb_set)
if ce is not None and not recompute:
return ce
ce = ChemicalEnvironments()
if optimization == 2:
neighb_coords = nb_set.neighb_coordsOpt
else:
neighb_coords = nb_set.neighb_coords
self.setup_local_geometry(isite, coords=neighb_coords, optimization=optimization)
if optimization > 0:
nb_set.local_planes = OrderedDict()
nb_set.separations = {}
cncgsm = self.get_coordination_symmetry_measures_optim(nb_set=nb_set, optimization=optimization)
else:
cncgsm = self.get_coordination_symmetry_measures()
for cg in cncgsm:
other_csms = {
'csm_wocs_ctwocc': cncgsm[cg]['csm_wocs_ctwocc'],
'csm_wocs_ctwcc': cncgsm[cg]['csm_wocs_ctwcc'],
'csm_wocs_csc': cncgsm[cg]['csm_wocs_csc'],
'csm_wcs_ctwocc': cncgsm[cg]['csm_wcs_ctwocc'],
'csm_wcs_ctwcc': cncgsm[cg]['csm_wcs_ctwcc'],
'csm_wcs_csc': cncgsm[cg]['csm_wcs_csc'],
'rotation_matrix_wocs_ctwocc': cncgsm[cg]['rotation_matrix_wocs_ctwocc'],
'rotation_matrix_wocs_ctwcc': cncgsm[cg]['rotation_matrix_wocs_ctwcc'],
'rotation_matrix_wocs_csc': cncgsm[cg]['rotation_matrix_wocs_csc'],
'rotation_matrix_wcs_ctwocc': cncgsm[cg]['rotation_matrix_wcs_ctwocc'],
'rotation_matrix_wcs_ctwcc': cncgsm[cg]['rotation_matrix_wcs_ctwcc'],
'rotation_matrix_wcs_csc': cncgsm[cg]['rotation_matrix_wcs_csc'],
'scaling_factor_wocs_ctwocc': cncgsm[cg]['scaling_factor_wocs_ctwocc'],
'scaling_factor_wocs_ctwcc': cncgsm[cg]['scaling_factor_wocs_ctwcc'],
'scaling_factor_wocs_csc': cncgsm[cg]['scaling_factor_wocs_csc'],
'scaling_factor_wcs_ctwocc': cncgsm[cg]['scaling_factor_wcs_ctwocc'],
'scaling_factor_wcs_ctwcc': cncgsm[cg]['scaling_factor_wcs_ctwcc'],
'scaling_factor_wcs_csc': cncgsm[cg]['scaling_factor_wcs_csc'],
'translation_vector_wocs_ctwocc': cncgsm[cg]['translation_vector_wocs_ctwocc'],
'translation_vector_wocs_ctwcc': cncgsm[cg]['translation_vector_wocs_ctwcc'],
'translation_vector_wocs_csc': cncgsm[cg]['translation_vector_wocs_csc'],
'translation_vector_wcs_ctwocc': cncgsm[cg]['translation_vector_wcs_ctwocc'],
'translation_vector_wcs_ctwcc': cncgsm[cg]['translation_vector_wcs_ctwcc'],
'translation_vector_wcs_csc': cncgsm[cg]['translation_vector_wcs_csc']
}
ce.add_coord_geom(cg, cncgsm[cg]['csm'],
algo=cncgsm[cg]['algo'],
permutation=cncgsm[cg]['indices'],
local2perfect_map=cncgsm[cg][
'local2perfect_map'],
perfect2local_map=cncgsm[cg][
'perfect2local_map'],
detailed_voronoi_index={'cn': cn,
'index': inb_set},
other_symmetry_measures=other_csms,
rotation_matrix=cncgsm[cg]['rotation_matrix'],
scaling_factor=cncgsm[cg]['scaling_factor']
)
se.update_coordination_environments(isite=isite, cn=cn, nb_set=nb_set, ce=ce)
return ce
def setup_local_geometry(self, isite, coords, optimization=None):
"""
Sets up the AbstractGeometry for the local geometry of site with index isite.
:param isite: Index of the site for which the local geometry has to be set up
:param coords: The coordinates of the (local) neighbors
"""
self.local_geometry = AbstractGeometry(
central_site=self.structure.cart_coords[isite],
bare_coords=coords,
centering_type=self.centering_type,
include_central_site_in_centroid=
self.include_central_site_in_centroid,
optimization=optimization)
def setup_test_perfect_environment(self, symbol, randomness=False,
max_random_dist=0.1,
symbol_type='mp_symbol',
indices='RANDOM',
random_translation='NONE',
random_rotation='NONE',
random_scale='NONE',
points=None):
if symbol_type == 'IUPAC':
cg = self.allcg.get_geometry_from_IUPAC_symbol(symbol)
elif symbol_type == 'MP' or symbol_type == 'mp_symbol':
cg = self.allcg.get_geometry_from_mp_symbol(symbol)
else:
raise ValueError('Wrong mp_symbol to setup coordination geometry')
neighb_coords = []
if points is not None:
mypoints = points
else:
mypoints = cg.points
if randomness:
rv = np.random.random_sample(3)
while norm(rv) > 1.0:
rv = np.random.random_sample(3)
coords = [np.zeros(3, np.float) + max_random_dist * rv]
for pp in mypoints:
rv = np.random.random_sample(3)
while norm(rv) > 1.0:
rv = np.random.random_sample(3)
neighb_coords.append(np.array(pp) + max_random_dist * rv)
else:
coords = [np.zeros(3, np.float)]
for pp in mypoints:
neighb_coords.append(np.array(pp))
if indices == 'RANDOM':
shuffle(neighb_coords)
elif indices == 'ORDERED':
pass
else:
neighb_coords = [neighb_coords[ii] for ii in indices]
# Scaling the test environment
if random_scale == 'RANDOM':
scale = 0.1*np.random.random_sample() + 0.95
elif random_scale == 'NONE':
scale = 1.0
else:
scale = random_scale
coords = [scale * cc for cc in coords]
neighb_coords = [scale * cc for cc in neighb_coords]
# Rotating the test environment
if random_rotation == 'RANDOM':
uu = np.random.random_sample(3) + 0.1
uu = uu / norm(uu)
theta = np.pi * np.random.random_sample()
cc = np.cos(theta)
ss = np.sin(theta)
ux = uu[0]
uy = uu[1]
uz = uu[2]
RR = np.matrix([[ux*ux+(1.0-ux*ux)*cc, ux*uy*(1.0-cc)-uz*ss, ux*uz*(1.0-cc)+uy*ss],
[ux*uy*(1.0-cc)+uz*ss, uy*uy+(1.0-uy*uy)*cc, uy*uz*(1.0-cc)-ux*ss],
[ux*uz*(1.0-cc)-uy*ss, uy*uz*(1.0-cc)+ux*ss, uz*uz+(1.0-uz*uz)*cc]])
elif random_rotation == 'NONE':
RR = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]
else:
RR = random_rotation
newcoords = []
for cc in coords:
newcc = RR * np.matrix(cc).T
newcoords.append(newcc.getA1())
coords = newcoords
newcoords = []
for cc in neighb_coords:
newcc = RR * np.matrix(cc).T
newcoords.append(newcc.getA1())
neighb_coords = newcoords
# Translating the test environment
if random_translation == 'RANDOM':
translation = 10.0 * (2.0*np.random.random_sample(3)-1.0)
elif random_translation == 'NONE':
translation = np.zeros(3, np.float)
else:
translation = random_translation
coords = [cc + translation for cc in coords]
neighb_coords = [cc + translation for cc in neighb_coords]
coords.extend(neighb_coords)
myspecies = ["O"] * (len(coords))
myspecies[0] = "Cu"
amin = np.min([cc[0] for cc in coords])
amax = np.max([cc[0] for cc in coords])
bmin = np.min([cc[1] for cc in coords])
bmax = np.max([cc[1] for cc in coords])
cmin = np.min([cc[2] for cc in coords])
cmax = np.max([cc[2] for cc in coords])
factor = 5.0
aa = factor * max([amax - amin, bmax - bmin, cmax - cmin])
lattice = Lattice.cubic(a=aa)
structure = Structure(lattice=lattice, species=myspecies, coords=coords,
to_unit_cell=False, coords_are_cartesian=True)
self.setup_structure(structure=structure)
self.setup_local_geometry(isite=0, coords=neighb_coords)
self.perfect_geometry = AbstractGeometry.from_cg(cg=cg)
def setup_random_structure(self, coordination):
"""
Sets up a purely random structure with a given coordination.
:param coordination: coordination number for the random structure
"""
aa = 0.4
bb = -0.2
coords = list()
for ii in range(coordination + 1):
coords.append(aa * np.random.random_sample(3, ) + bb)
self.set_structure(
lattice=np.array([[10, 0, 0], [0, 10, 0], [0, 0, 10]], np.float),
species=["Si"] * (coordination + 1),
coords=coords,
coords_are_cartesian=False)
self.setup_random_indices_local_geometry(coordination)
def setup_random_indices_local_geometry(self, coordination):
"""
Sets up random indices for the local geometry, for testing purposes
:param coordination: coordination of the local geometry
"""
self.icentral_site = 0
self.indices = list(range(1, coordination + 1))
np.random.shuffle(self.indices)
def setup_ordered_indices_local_geometry(self, coordination):
"""
Sets up ordered indices for the local geometry, for testing purposes
:param coordination: coordination of the local geometry
"""
self.icentral_site = 0
self.indices = list(range(1, coordination + 1))
def setup_explicit_indices_local_geometry(self, explicit_indices):
"""
Sets up explicit indices for the local geometry, for testing purposes
:param explicit_indices: explicit indices for the neighbors (set of numbers
from 0 to CN-1 in a given order)
"""
self.icentral_site = 0
self.indices = [ii + 1 for ii in explicit_indices]
def get_coordination_symmetry_measures(self, only_minimum=True,
all_csms=True, optimization=None):
"""
Returns the continuous symmetry measures of the current local geometry in a dictionary.
:return: the continuous symmetry measures of the current local geometry in a dictionary.
"""
test_geometries = self.allcg.get_implemented_geometries(
len(self.local_geometry.coords))
if len(self.local_geometry.coords) == 1:
if len(test_geometries) == 0:
return {}
result_dict = {'S:1': {'csm': 0.0, 'indices': [0], 'algo': 'EXPLICIT',
'local2perfect_map': {0: 0}, 'perfect2local_map': {0: 0},
'scaling_factor': None, 'rotation_matrix': None, 'translation_vector': None}}
if all_csms:
for csmtype in ['wocs_ctwocc', 'wocs_ctwcc', 'wocs_csc', 'wcs_ctwocc', 'wcs_ctwcc', 'wcs_csc']:
result_dict['S:1']['csm_{}'.format(csmtype)] = 0.0
result_dict['S:1']['scaling_factor_{}'.format(csmtype)] = None
result_dict['S:1']['rotation_matrix_{}'.format(csmtype)] = None
result_dict['S:1']['translation_vector_{}'.format(csmtype)] = None
return result_dict
result_dict = {}
for geometry in test_geometries:
self.perfect_geometry = AbstractGeometry.from_cg(cg=geometry,
centering_type=self.centering_type,
include_central_site_in_centroid=
self.include_central_site_in_centroid)
points_perfect = self.perfect_geometry.points_wcs_ctwcc()
cgsm = self.coordination_geometry_symmetry_measures(geometry,
points_perfect=points_perfect,
optimization=optimization)
result, permutations, algos, local2perfect_maps, perfect2local_maps = cgsm
if only_minimum:
if len(result) > 0:
imin = np.argmin([rr['symmetry_measure'] for rr in result])
if geometry.algorithms is not None:
algo = algos[imin]
else:
algo = algos
result_dict[geometry.mp_symbol] = {'csm': result[imin]['symmetry_measure'],
'indices': permutations[
imin],
'algo': algo,
'local2perfect_map':
local2perfect_maps[
imin],
'perfect2local_map':
perfect2local_maps[
imin],
'scaling_factor': 1.0 / result[imin]['scaling_factor'],
'rotation_matrix':
np.linalg.inv(result[imin]['rotation_matrix']),
'translation_vector': result[imin]['translation_vector']}
if all_csms:
self._update_results_all_csms(result_dict, permutations, imin, geometry)
else:
result_dict[geometry.mp_symbol] = {'csm': result,
'indices': permutations,
'algo': algos,
'local2perfect_map': local2perfect_maps,
'perfect2local_map': perfect2local_maps}
return result_dict
def _update_results_all_csms(self, result_dict, permutations, imin, geometry):
permutation = permutations[imin]
# Without central site, centered on the centroid (centroid does not include the central site)
# result_dict[geometry.mp_symbol]['csm_wocs_ctwocc'] = \
# result[imin]
pdist = self.local_geometry.points_wocs_ctwocc(
permutation=permutation)
pperf = self.perfect_geometry.points_wocs_ctwocc()
sm_info = symmetry_measure(points_distorted=pdist,
points_perfect=pperf)
result_dict[geometry.mp_symbol]['csm_wocs_ctwocc'] = sm_info['symmetry_measure']
result_dict[geometry.mp_symbol]['rotation_matrix_wocs_ctwocc'] = \
np.linalg.inv(sm_info['rotation_matrix'])
result_dict[geometry.mp_symbol]['scaling_factor_wocs_ctwocc'] = 1.0 / sm_info['scaling_factor']
result_dict[geometry.mp_symbol]['translation_vector_wocs_ctwocc'] = \
self.local_geometry.centroid_without_centre
# Without central site, centered on the centroid (centroid includes the central site)
pdist = self.local_geometry.points_wocs_ctwcc(
permutation=permutation)
pperf = self.perfect_geometry.points_wocs_ctwcc()
sm_info = symmetry_measure(points_distorted=pdist,
points_perfect=pperf)
result_dict[geometry.mp_symbol]['csm_wocs_ctwcc'] = sm_info['symmetry_measure']
result_dict[geometry.mp_symbol]['rotation_matrix_wocs_ctwcc'] = \
np.linalg.inv(sm_info['rotation_matrix'])
result_dict[geometry.mp_symbol]['scaling_factor_wocs_ctwcc'] = 1.0 / sm_info['scaling_factor']
result_dict[geometry.mp_symbol]['translation_vector_wocs_ctwcc'] = \
self.local_geometry.centroid_with_centre
# Without central site, centered on the central site
pdist = self.local_geometry.points_wocs_csc(
permutation=permutation)
pperf = self.perfect_geometry.points_wocs_csc()
sm_info = symmetry_measure(points_distorted=pdist,
points_perfect=pperf)
result_dict[geometry.mp_symbol]['csm_wocs_csc'] = sm_info['symmetry_measure']
result_dict[geometry.mp_symbol]['rotation_matrix_wocs_csc'] = \
np.linalg.inv(sm_info['rotation_matrix'])
result_dict[geometry.mp_symbol]['scaling_factor_wocs_csc'] = 1.0 / sm_info['scaling_factor']
result_dict[geometry.mp_symbol]['translation_vector_wocs_csc'] = \
self.local_geometry.bare_centre
# With central site, centered on the centroid (centroid does not include the central site)
pdist = self.local_geometry.points_wcs_ctwocc(
permutation=permutation)
pperf = self.perfect_geometry.points_wcs_ctwocc()
sm_info = symmetry_measure(points_distorted=pdist,
points_perfect=pperf)
result_dict[geometry.mp_symbol]['csm_wcs_ctwocc'] = sm_info['symmetry_measure']
result_dict[geometry.mp_symbol]['rotation_matrix_wcs_ctwocc'] = \
np.linalg.inv(sm_info['rotation_matrix'])
result_dict[geometry.mp_symbol]['scaling_factor_wcs_ctwocc'] = 1.0 / sm_info['scaling_factor']
result_dict[geometry.mp_symbol]['translation_vector_wcs_ctwocc'] = \
self.local_geometry.centroid_without_centre
# With central site, centered on the centroid (centroid includes the central site)
pdist = self.local_geometry.points_wcs_ctwcc(
permutation=permutation)
pperf = self.perfect_geometry.points_wcs_ctwcc()
sm_info = symmetry_measure(points_distorted=pdist,
points_perfect=pperf)
result_dict[geometry.mp_symbol]['csm_wcs_ctwcc'] = sm_info['symmetry_measure']
result_dict[geometry.mp_symbol]['rotation_matrix_wcs_ctwcc'] = \
np.linalg.inv(sm_info['rotation_matrix'])
result_dict[geometry.mp_symbol]['scaling_factor_wcs_ctwcc'] = 1.0 / sm_info['scaling_factor']
result_dict[geometry.mp_symbol]['translation_vector_wcs_ctwcc'] = \
self.local_geometry.centroid_with_centre
# With central site, centered on the central site
pdist = self.local_geometry.points_wcs_csc(
permutation=permutation)
pperf = self.perfect_geometry.points_wcs_csc()
sm_info = symmetry_measure(points_distorted=pdist,
points_perfect=pperf)
result_dict[geometry.mp_symbol]['csm_wcs_csc'] = sm_info['symmetry_measure']
result_dict[geometry.mp_symbol]['rotation_matrix_wcs_csc'] = \
np.linalg.inv(sm_info['rotation_matrix'])
result_dict[geometry.mp_symbol]['scaling_factor_wcs_csc'] = 1.0 / sm_info['scaling_factor']
result_dict[geometry.mp_symbol]['translation_vector_wcs_csc'] = \
self.local_geometry.bare_centre
def get_coordination_symmetry_measures_optim(self, only_minimum=True,
all_csms=True, nb_set=None, optimization=None):
"""
Returns the continuous symmetry measures of the current local geometry in a dictionary.
:return: the continuous symmetry measures of the current local geometry in a dictionary.
"""
cn = len(self.local_geometry.coords)
test_geometries = self.allcg.get_implemented_geometries(cn)
if all([cg.algorithms[0].algorithm_type == EXPLICIT_PERMUTATIONS for cg in test_geometries]):
return self.get_coordination_symmetry_measures(only_minimum=only_minimum, all_csms=all_csms,
optimization=optimization)
if not all([all([algo.algorithm_type == SEPARATION_PLANE
for algo in cg.algorithms]) for cg in test_geometries]):
raise ValueError('All algorithms should be EXPLICIT_PERMUTATIONS or SEPARATION_PLANE')
result_dict = {}
for geometry in test_geometries:
self.perfect_geometry = AbstractGeometry.from_cg(cg=geometry,
centering_type=self.centering_type,
include_central_site_in_centroid=
self.include_central_site_in_centroid)
points_perfect = self.perfect_geometry.points_wcs_ctwcc()
cgsm = self.coordination_geometry_symmetry_measures_sepplane_optim(geometry,
points_perfect=points_perfect,
nb_set=nb_set,
optimization=optimization)
result, permutations, algos, local2perfect_maps, perfect2local_maps = cgsm
if only_minimum:
if len(result) > 0:
imin = np.argmin([rr['symmetry_measure'] for rr in result])
if geometry.algorithms is not None:
algo = algos[imin]
else:
algo = algos
result_dict[geometry.mp_symbol] = {'csm': result[imin]['symmetry_measure'],
'indices': permutations[
imin],
'algo': algo,
'local2perfect_map':
local2perfect_maps[
imin],
'perfect2local_map':
perfect2local_maps[
imin],
'scaling_factor': 1.0 / result[imin]['scaling_factor'],
'rotation_matrix':
np.linalg.inv(result[imin]['rotation_matrix']),
'translation_vector': result[imin]['translation_vector']}
if all_csms:
self._update_results_all_csms(result_dict, permutations, imin, geometry)
return result_dict
def coordination_geometry_symmetry_measures(self, coordination_geometry,
tested_permutations=False,
points_perfect=None,
optimization=None):
"""
Returns the symmetry measures of a given coordination_geometry for a set of permutations depending on
the permutation setup. Depending on the parameters of the LocalGeometryFinder and on the coordination
geometry, different methods are called.
:param coordination_geometry: Coordination geometry for which the symmetry measures are looked for
:return: the symmetry measures of a given coordination_geometry for a set of permutations
:raise: NotImplementedError if the permutation_setup does not exists
"""
if tested_permutations:
tested_permutations = set()
if self.permutations_safe_override:
raise ValueError('No permutations safe override anymore')
csms = []
permutations = []
algos = []
local2perfect_maps = []
perfect2local_maps = []
for algo in coordination_geometry.algorithms:
if algo.algorithm_type == EXPLICIT_PERMUTATIONS:
return self.coordination_geometry_symmetry_measures_standard(
coordination_geometry, algo,
points_perfect=points_perfect,
optimization=optimization)
if algo.algorithm_type == SEPARATION_PLANE:
cgsm = self.coordination_geometry_symmetry_measures_separation_plane(
coordination_geometry,
algo,
tested_permutations=tested_permutations,
points_perfect=points_perfect)
csm, perm, algo, local2perfect_map, perfect2local_map = cgsm
csms.extend(csm)
permutations.extend(perm)
algos.extend(algo)
local2perfect_maps.extend(local2perfect_map)
perfect2local_maps.extend(perfect2local_map)
return csms, permutations, algos, local2perfect_maps, perfect2local_maps
def coordination_geometry_symmetry_measures_sepplane_optim(self, coordination_geometry,
points_perfect=None,
nb_set=None, optimization=None):
"""
Returns the symmetry measures of a given coordination_geometry for a set of permutations depending on
the permutation setup. Depending on the parameters of the LocalGeometryFinder and on the coordination
geometry, different methods are called.
:param coordination_geometry: Coordination geometry for which the symmetry measures are looked for
:return: the symmetry measures of a given coordination_geometry for a set of permutations
:raise: NotImplementedError if the permutation_setup does not exists
"""
csms = []
permutations = []
algos = []
local2perfect_maps = []
perfect2local_maps = []
for algo in coordination_geometry.algorithms:
if algo.algorithm_type == SEPARATION_PLANE:
cgsm = self.coordination_geometry_symmetry_measures_separation_plane_optim(
coordination_geometry,
algo,
points_perfect=points_perfect,
nb_set=nb_set,
optimization=optimization)
csm, perm, algo, local2perfect_map, perfect2local_map = cgsm
csms.extend(csm)
permutations.extend(perm)
algos.extend(algo)
local2perfect_maps.extend(local2perfect_map)
perfect2local_maps.extend(perfect2local_map)
return csms, permutations, algos, local2perfect_maps, perfect2local_maps
def coordination_geometry_symmetry_measures_standard(self,
coordination_geometry,
algo,
points_perfect=None,
optimization=None):
"""
Returns the symmetry measures for a set of permutations (whose setup depends on the coordination geometry)
for the coordination geometry "coordination_geometry". Standard implementation looking for the symmetry
measures of each permutation
:param coordination_geometry: The coordination geometry to be investigated
:return: The symmetry measures for the given coordination geometry for each permutation investigated
"""
# permutations_symmetry_measures = np.zeros(len(algo.permutations),
# np.float)
if optimization == 2:
permutations_symmetry_measures = [None] * len(algo.permutations)
permutations = list()
algos = list()
local2perfect_maps = list()
perfect2local_maps = list()
for iperm, perm in enumerate(algo.permutations):
local2perfect_map = {}
perfect2local_map = {}
permutations.append(perm)
for iperfect, ii in enumerate(perm):
perfect2local_map[iperfect] = ii
local2perfect_map[ii] = iperfect
local2perfect_maps.append(local2perfect_map)
perfect2local_maps.append(perfect2local_map)
points_distorted = self.local_geometry.points_wcs_ctwcc(
permutation=perm)
sm_info = symmetry_measure(points_distorted=points_distorted,
points_perfect=points_perfect)
sm_info['translation_vector'] = self.local_geometry.centroid_with_centre
permutations_symmetry_measures[iperm] = sm_info
algos.append(str(algo))
return permutations_symmetry_measures, permutations, algos, local2perfect_maps, perfect2local_maps
else:
permutations_symmetry_measures = [None] * len(algo.permutations)
permutations = list()
algos = list()
local2perfect_maps = list()
perfect2local_maps = list()
for iperm, perm in enumerate(algo.permutations):
local2perfect_map = {}
perfect2local_map = {}
permutations.append(perm)
for iperfect, ii in enumerate(perm):
perfect2local_map[iperfect] = ii
local2perfect_map[ii] = iperfect
local2perfect_maps.append(local2perfect_map)
perfect2local_maps.append(perfect2local_map)
points_distorted = self.local_geometry.points_wcs_ctwcc(
permutation=perm)
sm_info = symmetry_measure(points_distorted=points_distorted,
points_perfect=points_perfect)
sm_info['translation_vector'] = self.local_geometry.centroid_with_centre
permutations_symmetry_measures[iperm] = sm_info
algos.append(str(algo))
return permutations_symmetry_measures, permutations, algos, local2perfect_maps, perfect2local_maps
def coordination_geometry_symmetry_measures_separation_plane(self,
coordination_geometry,
separation_plane_algo,
testing=False,
tested_permutations=False,
points_perfect=None):
"""
Returns the symmetry measures of the given coordination geometry "coordination_geometry" using separation
facets to reduce the complexity of the system. Caller to the refined 2POINTS, 3POINTS and other ...
:param coordination_geometry: The coordination geometry to be investigated
:return: The symmetry measures for the given coordination geometry for each plane and permutation investigated
"""
permutations = list()
permutations_symmetry_measures = list()
plane_separations = list()
algos = list()
perfect2local_maps = list()
local2perfect_maps = list()
if testing:
separation_permutations = list()
nplanes = 0
for npoints in range(separation_plane_algo.minimum_number_of_points,
min(separation_plane_algo.maximum_number_of_points,
4) + 1):
for points_combination in itertools.combinations(
self.local_geometry.coords, npoints):
if npoints == 2:
if collinear(points_combination[0], points_combination[1],
self.local_geometry.central_site,
tolerance=0.25):
continue
plane = Plane.from_3points(points_combination[0],
points_combination[1],
self.local_geometry.central_site)
elif npoints == 3:
if collinear(points_combination[0], points_combination[1],
points_combination[2], tolerance=0.25):
continue
plane = Plane.from_3points(points_combination[0],
points_combination[1],
points_combination[2])
elif npoints > 3:
plane = Plane.from_npoints(points_combination,
best_fit='least_square_distance')
else:
raise ValueError(
'Wrong number of points to initialize separation plane')
cgsm = self._cg_csm_separation_plane(
coordination_geometry=coordination_geometry,
sepplane=separation_plane_algo,
local_plane=plane,
plane_separations=plane_separations,
dist_tolerances=DIST_TOLERANCES,
testing=testing,
tested_permutations=tested_permutations,
points_perfect=points_perfect)
csm, perm, algo = cgsm[0], cgsm[1], cgsm[2]
if csm is not None:
permutations_symmetry_measures.extend(csm)
permutations.extend(perm)
for thisperm in perm:
p2l = {}
l2p = {}
for i_p, pp in enumerate(thisperm):
p2l[i_p] = pp
l2p[pp] = i_p
perfect2local_maps.append(p2l)
local2perfect_maps.append(l2p)
algos.extend(algo)
if testing:
separation_permutations.extend(cgsm[3])
nplanes += 1
if nplanes > 0:
break
if nplanes == 0:
return self.coordination_geometry_symmetry_measures_fallback_random(
coordination_geometry,
points_perfect=points_perfect)
if testing:
return permutations_symmetry_measures, permutations, separation_permutations
return permutations_symmetry_measures, permutations, algos, local2perfect_maps, perfect2local_maps
def coordination_geometry_symmetry_measures_separation_plane_optim(self,
coordination_geometry,
separation_plane_algo,
points_perfect=None,
nb_set=None,
optimization=None):
"""
Returns the symmetry measures of the given coordination geometry "coordination_geometry" using separation
facets to reduce the complexity of the system. Caller to the refined 2POINTS, 3POINTS and other ...
:param coordination_geometry: The coordination geometry to be investigated
:return: The symmetry measures for the given coordination geometry for each plane and permutation investigated
"""
if optimization == 2:
cgcsmoptim = self._cg_csm_separation_plane_optim2
elif optimization == 1:
cgcsmoptim = self._cg_csm_separation_plane_optim1
else:
raise ValueError('Optimization should be 1 or 2')
cn = len(self.local_geometry.coords)
permutations = list()
permutations_symmetry_measures = list()
algos = list()
perfect2local_maps = list()
local2perfect_maps = list()
if separation_plane_algo.separation in nb_set.separations:
for sep_indices, (local_plane, npsep) in nb_set.separations[separation_plane_algo.separation].items():
cgsm = cgcsmoptim(coordination_geometry=coordination_geometry,
sepplane=separation_plane_algo,
local_plane=local_plane,
points_perfect=points_perfect,
separation_indices=npsep)
csm, perm, algo, cgsm_stop_search = cgsm[0], cgsm[1], cgsm[2], cgsm[3]
permutations_symmetry_measures.extend(csm)
permutations.extend(perm)
for thisperm in perm:
p2l = {}
l2p = {}
for i_p, pp in enumerate(thisperm):
p2l[i_p] = pp
l2p[pp] = i_p
perfect2local_maps.append(p2l)
local2perfect_maps.append(l2p)
algos.extend(algo)
# Get the local planes and separations up to 3 points
for npoints in range(self.allcg.minpoints[cn], min(self.allcg.maxpoints[cn], 3) + 1):
for ipoints_combination in itertools.combinations(
range(self.local_geometry.cn), npoints):
if ipoints_combination in nb_set.local_planes:
continue
# Set up new plane
nb_set.local_planes[ipoints_combination] = None
points_combination = [self.local_geometry.coords[ip] for ip in ipoints_combination]
if npoints == 2:
if collinear(points_combination[0], points_combination[1],
self.local_geometry.central_site,
tolerance=0.25):
continue
plane = Plane.from_3points(points_combination[0],
points_combination[1],
self.local_geometry.central_site)
elif npoints == 3:
if collinear(points_combination[0], points_combination[1],
points_combination[2], tolerance=0.25):
continue
plane = Plane.from_3points(points_combination[0],
points_combination[1],
points_combination[2])
elif npoints > 3:
plane = Plane.from_npoints(points_combination,
best_fit='least_square_distance')
else:
raise ValueError(
'Wrong number of points to initialize separation plane')
# Takes a lot of time and happens rarely ...
# if any([plane.is_same_plane_as(plane2) for comb2, plane2 in nb_set.local_planes.items() if plane2 is not None]):
# continue
nb_set.local_planes[ipoints_combination] = plane
# Get the separations for this plane
# TODO: check sensitivity to delta/delta_factor parameter
dig = plane.distances_indices_groups(points=self.local_geometry._coords, delta_factor=0.1,
sign=True)
grouped_indices = dig[2]
new_seps = []
for ng in range(1, len(grouped_indices) + 1):
inplane = list(itertools.chain(*grouped_indices[:ng]))
if len(inplane) > self.allcg.maxpoints_inplane[cn]:
break
inplane = [ii[0] for ii in inplane]
outplane = list(itertools.chain(*grouped_indices[ng:]))
s1 = [ii_sign[0] for ii_sign in outplane if ii_sign[1] < 0]
s2 = [ii_sign[0] for ii_sign in outplane if ii_sign[1] > 0]
separation = sort_separation_tuple([s1, inplane, s2])
sep = tuple([len(gg) for gg in separation])
if sep not in self.allcg.separations_cg[cn]:
continue
if sep not in nb_set.separations:
nb_set.separations[sep] = {}
mysep = [np.array(ss, dtype=np.int8) for ss in separation]
nb_set.separations[sep][separation] = (plane, mysep)
if sep == separation_plane_algo.separation:
new_seps.append(mysep)
for separation_indices in new_seps:
cgsm = cgcsmoptim(coordination_geometry=coordination_geometry,
sepplane=separation_plane_algo,
local_plane=plane,
points_perfect=points_perfect,
separation_indices=separation_indices)
csm, perm, algo, cgsm_stop_search = cgsm[0], cgsm[1], cgsm[2], cgsm[3]
permutations_symmetry_measures.extend(csm)
permutations.extend(perm)
for thisperm in perm:
p2l = {}
l2p = {}
for i_p, pp in enumerate(thisperm):
p2l[i_p] = pp
l2p[pp] = i_p
perfect2local_maps.append(p2l)
local2perfect_maps.append(l2p)
algos.extend(algo)
if len(permutations_symmetry_measures) == 0:
return self.coordination_geometry_symmetry_measures_fallback_random(
coordination_geometry,
points_perfect=points_perfect)
return permutations_symmetry_measures, permutations, algos, local2perfect_maps, perfect2local_maps
def _cg_csm_separation_plane(self, coordination_geometry, sepplane,
local_plane,
plane_separations,
dist_tolerances=DIST_TOLERANCES,
testing=False, tested_permutations=False,
points_perfect=None):
argref_separation = sepplane.argsorted_ref_separation_perm
plane_found = False
for dist_tolerance in dist_tolerances:
permutations = []
permutations_symmetry_measures = []
if testing:
separation_permutations = []
algo = 'NOT_FOUND'
separation = local_plane.indices_separate(
self.local_geometry._coords, dist_tolerance)
# Do not consider facets leading to the same separation indices
separation = sort_separation(separation)
if separation_in_list(separation, plane_separations):
continue
# Do not consider a separation which does not follow the reference separation of the perfect
# coordination geometry
if len(separation[1]) != len(sepplane.plane_points):
continue
if len(separation[0]) == len(sepplane.point_groups[0]):
this_separation = separation
plane_separations.append(this_separation)
elif len(separation[0]) == len(sepplane.point_groups[1]):
this_separation = [list(separation[2]), list(separation[1]),
list(separation[0])]
plane_separations.append(this_separation)
else:
continue
if sepplane.ordered_plane:
inp = [pp for ip, pp in enumerate(self.local_geometry._coords)
if ip in this_separation[1]]
if sepplane.ordered_point_groups[0]:
pp_s0 = [pp for ip, pp in
enumerate(self.local_geometry._coords) if
ip in this_separation[0]]
ordind_s0 = local_plane.project_and_to2dim_ordered_indices(
pp_s0)
sep0 = [this_separation[0][ii] for ii in ordind_s0]
else:
sep0 = list(this_separation[0])
if sepplane.ordered_point_groups[1]:
pp_s2 = [pp for ip, pp in
enumerate(self.local_geometry._coords) if
ip in this_separation[2]]
ordind_s2 = local_plane.project_and_to2dim_ordered_indices(
pp_s2)
sep2 = [this_separation[2][ii] for ii in ordind_s2]
else:
sep2 = list(this_separation[2])
separation_perm = list(sep0)
ordind = local_plane.project_and_to2dim_ordered_indices(inp)
separation_perm.extend(
[this_separation[1][ii] for ii in ordind])
algo = 'SEPARATION_PLANE_2POINTS_ORDERED'
separation_perm.extend(sep2)
else:
separation_perm = list(this_separation[0])
separation_perm.extend(this_separation[1])
algo = 'SEPARATION_PLANE_2POINTS'
separation_perm.extend(this_separation[2])
if self.plane_safe_permutations:
sep_perms = sepplane.safe_separation_permutations(
ordered_plane=sepplane.ordered_plane,
ordered_point_groups=sepplane.ordered_point_groups)
else:
sep_perms = sepplane.permutations
plane_found = True
for i_sep_perm, sep_perm in enumerate(sep_perms):
perm1 = [separation_perm[ii] for ii in sep_perm]
pp = [perm1[ii] for ii in argref_separation]
# Skip permutations that have already been performed
if tested_permutations != False and coordination_geometry.equivalent_indices is not None:
tuple_ref_perm = coordination_geometry.ref_permutation(pp)
if tuple_ref_perm in tested_permutations:
continue
tested_permutations.add(tuple_ref_perm)
permutations.append(pp)
if testing:
separation_permutations.append(sep_perm)
points_distorted = self.local_geometry.points_wcs_ctwcc(
permutation=pp)
sm_info = symmetry_measure(points_distorted=points_distorted,
points_perfect=points_perfect)
sm_info['translation_vector'] = self.local_geometry.centroid_with_centre
permutations_symmetry_measures.append(sm_info)
if plane_found:
break
if len(permutations_symmetry_measures) > 0:
if testing:
return permutations_symmetry_measures, permutations, algo, separation_permutations
else:
return permutations_symmetry_measures, permutations, [
sepplane.algorithm_type] * len(permutations)
else:
if plane_found:
return permutations_symmetry_measures, permutations, []
else:
return None, None, None
def _cg_csm_separation_plane_optim1(self, coordination_geometry,
sepplane,
local_plane,
points_perfect=None,
separation_indices=None):
argref_separation = sepplane.argsorted_ref_separation_perm
permutations = []
permutations_symmetry_measures = []
stop_search = False
# TODO: do not do that several times ... also keep in memory
if sepplane.ordered_plane:
inp = [pp for ip, pp in enumerate(self.local_geometry._coords)
if ip in separation_indices[1]]
if sepplane.ordered_point_groups[0]:
pp_s0 = [pp for ip, pp in
enumerate(self.local_geometry._coords) if
ip in separation_indices[0]]
ordind_s0 = local_plane.project_and_to2dim_ordered_indices(
pp_s0)
sep0 = [separation_indices[0][ii] for ii in ordind_s0]
else:
sep0 = list(separation_indices[0])
if sepplane.ordered_point_groups[1]:
pp_s2 = [pp for ip, pp in
enumerate(self.local_geometry._coords) if
ip in separation_indices[2]]
ordind_s2 = local_plane.project_and_to2dim_ordered_indices(
pp_s2)
sep2 = [separation_indices[2][ii] for ii in ordind_s2]
else:
sep2 = list(separation_indices[2])
separation_perm = list(sep0)
ordind = local_plane.project_and_to2dim_ordered_indices(inp)
separation_perm.extend(
[separation_indices[1][ii] for ii in ordind])
separation_perm.extend(sep2)
else:
separation_perm = list(separation_indices[0])
separation_perm.extend(separation_indices[1])
separation_perm.extend(separation_indices[2])
if self.plane_safe_permutations:
sep_perms = sepplane.safe_separation_permutations(
ordered_plane=sepplane.ordered_plane,
ordered_point_groups=sepplane.ordered_point_groups)
else:
sep_perms = sepplane.permutations
for i_sep_perm, sep_perm in enumerate(sep_perms):
perm1 = [separation_perm[ii] for ii in sep_perm]
pp = [perm1[ii] for ii in argref_separation]
permutations.append(pp)
points_distorted = self.local_geometry.points_wcs_ctwcc(
permutation=pp)
sm_info = symmetry_measure(points_distorted=points_distorted,
points_perfect=points_perfect)
sm_info['translation_vector'] = self.local_geometry.centroid_with_centre
permutations_symmetry_measures.append(sm_info)
if len(permutations_symmetry_measures) > 0:
return permutations_symmetry_measures, permutations, [sepplane.algorithm_type] * len(permutations), stop_search
else:
return [], [], [], stop_search
def _cg_csm_separation_plane_optim2(self, coordination_geometry,
sepplane,
local_plane,
points_perfect=None,
separation_indices=None):
argref_separation = sepplane.argsorted_ref_separation_perm
permutations = []
permutations_symmetry_measures = []
stop_search = False
# TODO: do not do that several times ... also keep in memory
if sepplane.ordered_plane:
inp = self.local_geometry.coords.take(separation_indices[1], axis=0)
if sepplane.ordered_point_groups[0]:
pp_s0 = self.local_geometry.coords.take(separation_indices[0], axis=0)
ordind_s0 = local_plane.project_and_to2dim_ordered_indices(
pp_s0)
# sep0 = [separation_indices[0][ii] for ii in ordind_s0]
sep0 = separation_indices[0].take(ordind_s0)
else:
# sep0 = list(separation_indices[0])
sep0 = separation_indices[0]
if sepplane.ordered_point_groups[1]:
pp_s2 = self.local_geometry.coords.take(separation_indices[2], axis=0)
ordind_s2 = local_plane.project_and_to2dim_ordered_indices(
pp_s2)
# sep2 = [separation_indices[2][ii] for ii in ordind_s2]
sep2 = separation_indices[2].take(ordind_s2)
else:
# sep2 = list(separation_indices[2])
sep2 = separation_indices[2]
# separation_perm = list(sep0)
ordind = local_plane.project_and_to2dim_ordered_indices(inp)
# separation_perm.extend(
# [separation_indices[1][ii] for ii in ordind])
inp1 = separation_indices[1].take(ordind)
# separation_perm.extend(sep2)
separation_perm = np.concatenate((sep0, inp1, sep2))
else:
# separation_perm = list(separation_indices[0])
# separation_perm.extend(separation_indices[1])
# separation_perm.extend(separation_indices[2])
separation_perm = np.concatenate(separation_indices)
if self.plane_safe_permutations:
sep_perms = sepplane.safe_separation_permutations(
ordered_plane=sepplane.ordered_plane,
ordered_point_groups=sepplane.ordered_point_groups)
else:
sep_perms = sepplane.permutations
for i_sep_perm, sep_perm in enumerate(sep_perms):
perm1 = separation_perm.take(sep_perm)
pp = perm1.take(argref_separation)
permutations.append(pp)
points_distorted = self.local_geometry.points_wcs_ctwcc(
permutation=pp)
sm_info = symmetry_measure(points_distorted=points_distorted,
points_perfect=points_perfect)
sm_info['translation_vector'] = self.local_geometry.centroid_with_centre
permutations_symmetry_measures.append(sm_info)
if len(permutations_symmetry_measures) > 0:
return permutations_symmetry_measures, permutations, [sepplane.algorithm_type] * len(permutations), stop_search
else:
return [], [], [], stop_search
def coordination_geometry_symmetry_measures_fallback_random(self,
coordination_geometry,
NRANDOM=10,
points_perfect=None):
"""
Returns the symmetry measures for a random set of permutations for the coordination geometry
"coordination_geometry". Fallback implementation for the plane separation algorithms measures
of each permutation
:param coordination_geometry: The coordination geometry to be investigated
:param NRANDOM: Number of random permutations to be tested
:return: The symmetry measures for the given coordination geometry for each permutation investigated
"""
permutations_symmetry_measures = [None] * NRANDOM
permutations = list()
algos = list()
perfect2local_maps = list()
local2perfect_maps = list()
for iperm in range(NRANDOM):
perm = np.random.permutation(
coordination_geometry.coordination_number)
permutations.append(perm)
p2l = {}
l2p = {}
for i_p, pp in enumerate(perm):
p2l[i_p] = pp
l2p[pp] = i_p
perfect2local_maps.append(p2l)
local2perfect_maps.append(l2p)
points_distorted = self.local_geometry.points_wcs_ctwcc(
permutation=perm)
sm_info = symmetry_measure(points_distorted=points_distorted,
points_perfect=points_perfect)
sm_info['translation_vector'] = self.local_geometry.centroid_with_centre
permutations_symmetry_measures[iperm] = sm_info
algos.append('APPROXIMATE_FALLBACK')
return permutations_symmetry_measures, permutations, algos, local2perfect_maps, perfect2local_maps
| gpetretto/pymatgen | pymatgen/analysis/chemenv/coordination_environments/coordination_geometry_finder.py | Python | mit | 98,192 | [
"pymatgen"
] | 50ac18300969034016fb97f445b81c66cbd8fe58de4aa5c3b9b90b387a6d5ae5 |
"""Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
pick weighted random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from _collections_abc import Set as _Set, Sequence as _Sequence
from hashlib import sha512 as _sha512
import itertools as _itertools
import bisect as _bisect
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate", "getrandbits", "choices",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If *a* is an int, all bits are used.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1 (provided for reproducing random
sequences from older versions of Python), the algorithm for str and
bytes generates a narrower range of seeds.
"""
if version == 1 and isinstance(a, (str, bytes)):
x = ord(a[0]) << 7 if a else 0
for c in a:
x = ((1000003 * x) ^ ord(c)) & 0xFFFFFFFFFFFFFFFF
x ^= len(a)
a = -2 if x == -1 else x
if version == 2 and isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a += _sha512(a).digest()
a = int.from_bytes(a, 'big')
super().seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super().getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super().setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple(x % (2**32) for x in internalstate)
except ValueError as e:
raise TypeError from e
super().setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
# Issue 17489: Since __reduce__ was defined to fix #759889 this is no
# longer called; we leave it here because it has been here since random was
# rewritten back in 2001 and why risk breaking something.
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is None:
if istart > 0:
return self._randbelow(istart)
raise ValueError("empty range for randrange()")
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
if step == 1 and width > 0:
return istart + self._randbelow(width)
if step == 1:
raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError("zero step for randrange()")
if n <= 0:
raise ValueError("empty range for randrange()")
return istart + istep*self._randbelow(n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, int=int, maxsize=1<<BPF, type=type,
Method=_MethodType, BuiltinMethod=_BuiltinMethodType):
"Return a random int in the range [0,n). Raises ValueError if n==0."
random = self.random
getrandbits = self.getrandbits
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
if type(random) is BuiltinMethod or type(getrandbits) is Method:
k = n.bit_length() # don't use (n-1) here because n can be 1
r = getrandbits(k) # 0 <= r < 2**k
while r >= n:
r = getrandbits(k)
return r
# There's an overridden random() method but no new getrandbits() method,
# so we can only use random() from here.
if n >= maxsize:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large.\n"
"To remove the range limitation, add a getrandbits() method.")
return int(random() * n)
rem = maxsize % n
limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
r = random()
while r >= limit:
r = random()
return int(r*maxsize) % n
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
try:
i = self._randbelow(len(seq))
except ValueError:
raise IndexError('Cannot choose from an empty sequence')
return seq[i]
def shuffle(self, x, random=None):
"""Shuffle list x in place, and return None.
Optional argument random is a 0-argument function returning a
random float in [0.0, 1.0); if it is the default None, the
standard random.random will be used.
"""
if random is None:
randbelow = self._randbelow
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = randbelow(i+1)
x[i], x[j] = x[j], x[i]
else:
_int = int
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use range as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(range(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
if isinstance(population, _Set):
population = tuple(population)
if not isinstance(population, _Sequence):
raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
randbelow = self._randbelow
n = len(population)
if not 0 <= k <= n:
raise ValueError("Sample larger than population or is negative")
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
# An n-length list is smaller than a k-length set
pool = list(population)
for i in range(k): # invariant: non-selected at [0,n-i)
j = randbelow(n-i)
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
for i in range(k):
j = randbelow(n)
while j in selected:
j = randbelow(n)
selected_add(j)
result[i] = population[j]
return result
def choices(self, population, weights=None, *, cum_weights=None, k=1):
"""Return a k sized list of population elements chosen with replacement.
If the relative weights or cumulative weights are not specified,
the selections are made with equal probability.
"""
random = self.random
if cum_weights is None:
if weights is None:
_int = int
total = len(population)
return [population[_int(random() * total)] for i in range(k)]
cum_weights = list(_itertools.accumulate(weights))
elif weights is not None:
raise TypeError('Cannot specify both weights and cumulative weights')
if len(cum_weights) != len(population):
raise ValueError('The number of weights does not match the population')
bisect = _bisect.bisect
total = cum_weights[-1]
return [population[bisect(cum_weights, random() * total)] for i in range(k)]
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
try:
c = 0.5 if mode is None else (mode - low) / (high - low)
except ZeroDivisionError:
return low
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.0)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.0))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / u ** (1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * (-_log(u)) ** (1.0/beta)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print(n, 'times', func.__name__)
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print(round(t1-t0, 3), 'sec,', end=' ')
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print('avg %g, stddev %g, min %g, max %g\n' % \
(avg, stddev, smallest, largest))
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
choices = _inst.choices
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
| thecodinghub/news-for-good | news/Lib/random.py | Python | bsd-3-clause | 27,228 | [
"Gaussian"
] | 04aec4c586afd2e71b839312b04961e0ef01c2e4037712113c3b571def8723c7 |
"""Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Karan Desai <karandesai281196@gmail.com>
# Noel Dawe <noel@dawe.me>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Michael Eickenberg <michael.eickenberg@gmail.com>
# Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu>
# Christian Lorentzen <lorentzen.ch@googlemail.com>
# Ashutosh Hathidara <ashutoshhathidara98@gmail.com>
# License: BSD 3 clause
import numpy as np
import warnings
from .._loss.glm_distribution import TweedieDistribution
from ..utils.validation import (check_array, check_consistent_length,
_num_samples)
from ..utils.validation import column_or_1d
from ..utils.validation import _deprecate_positional_args
from ..utils.validation import _check_sample_weight
from ..utils.stats import _weighted_percentile
from ..exceptions import UndefinedMetricWarning
__ALL__ = [
"max_error",
"mean_absolute_error",
"mean_squared_error",
"mean_squared_log_error",
"median_absolute_error",
"mean_absolute_percentage_error",
"r2_score",
"explained_variance_score",
"mean_tweedie_deviance",
"mean_poisson_deviance",
"mean_gamma_deviance",
]
def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like
y_pred : array-like
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
dtype: str or list, default="numeric"
the dtype argument passed to check_array
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False, dtype=dtype)
y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
allowed_multioutput_str = ('raw_values', 'uniform_average',
'variance_weighted')
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}".format(
allowed_multioutput_str,
multioutput))
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
@_deprecate_positional_args
def mean_absolute_error(y_true, y_pred, *,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.85...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if isinstance(multioutput, str):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_absolute_percentage_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute percentage error regression loss
Note here that we do not represent the output as a percentage in range
[0, 100]. Instead, we represent it in range [0, 1/eps]. Read more in the
:ref:`User Guide <mean_absolute_percentage_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
If input is list then the shape must be (n_outputs,).
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats in the range [0, 1/eps]
If multioutput is 'raw_values', then mean absolute percentage error
is returned for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAPE output is non-negative floating point. The best value is 0.0.
But note the fact that bad predictions can lead to arbitarily large
MAPE values, especially if some y_true values are very close to zero.
Note that we return a large value instead of `inf` when y_true is zero.
Examples
--------
>>> from sklearn.metrics import mean_absolute_percentage_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_percentage_error(y_true, y_pred)
0.3273...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_percentage_error(y_true, y_pred)
0.5515...
>>> mean_absolute_percentage_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.6198...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
epsilon = np.finfo(np.float64).eps
mape = np.abs(y_pred - y_true) / np.maximum(np.abs(y_true), epsilon)
output_errors = np.average(mape,
weights=sample_weight, axis=0)
if isinstance(multioutput, str):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
@_deprecate_positional_args
def mean_squared_error(y_true, y_pred, *,
sample_weight=None,
multioutput='uniform_average', squared=True):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE value.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred, squared=False)
0.612...
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred)
0.708...
>>> mean_squared_error(y_true, y_pred, squared=False)
0.822...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
array([0.41666667, 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.825...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if not squared:
output_errors = np.sqrt(output_errors)
if isinstance(multioutput, str):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
@_deprecate_positional_args
def mean_squared_log_error(y_true, y_pred, *,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared logarithmic error regression loss
Read more in the :ref:`User Guide <mean_squared_log_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors when the input is of multioutput
format.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_log_error
>>> y_true = [3, 5, 2.5, 7]
>>> y_pred = [2.5, 5, 4, 8]
>>> mean_squared_log_error(y_true, y_pred)
0.039...
>>> y_true = [[0.5, 1], [1, 2], [7, 6]]
>>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]]
>>> mean_squared_log_error(y_true, y_pred)
0.044...
>>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
array([0.00462428, 0.08377444])
>>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.060...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
if (y_true < 0).any() or (y_pred < 0).any():
raise ValueError("Mean Squared Logarithmic Error cannot be used when "
"targets contain negative values.")
return mean_squared_error(np.log1p(y_true), np.log1p(y_pred),
sample_weight=sample_weight,
multioutput=multioutput)
@_deprecate_positional_args
def median_absolute_error(y_true, y_pred, *, multioutput='uniform_average',
sample_weight=None):
"""Median absolute error regression loss
Median absolute error output is non-negative floating point. The best value
is 0.0. Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values. Array-like value defines
weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.24
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> median_absolute_error(y_true, y_pred)
0.75
>>> median_absolute_error(y_true, y_pred, multioutput='raw_values')
array([0.5, 1. ])
>>> median_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.85
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is None:
output_errors = np.median(np.abs(y_pred - y_true), axis=0)
else:
sample_weight = _check_sample_weight(sample_weight, y_pred)
output_errors = _weighted_percentile(np.abs(y_pred - y_true),
sample_weight=sample_weight)
if isinstance(multioutput, str):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
@_deprecate_positional_args
def explained_variance_score(y_true, y_pred, *,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average', 'variance_weighted'} or \
array-like of shape (n_outputs,), default='uniform_average'
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred)
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, str):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
@_deprecate_positional_args
def r2_score(y_true, y_pred, *, sample_weight=None,
multioutput="uniform_average"):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average', 'variance_weighted'}, \
array-like of shape (n_outputs,) or None, default='uniform_average'
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default is "uniform_average".
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
.. versionchanged:: 0.19
Default value of multioutput is 'uniform_average'.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
This metric is not well-defined for single samples and will return a NaN
value if n_samples is less than two.
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred)
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred,
... multioutput='variance_weighted')
0.938...
>>> y_true = [1, 2, 3]
>>> y_pred = [1, 2, 3]
>>> r2_score(y_true, y_pred)
1.0
>>> y_true = [1, 2, 3]
>>> y_pred = [2, 2, 2]
>>> r2_score(y_true, y_pred)
0.0
>>> y_true = [1, 2, 3]
>>> y_pred = [3, 2, 1]
>>> r2_score(y_true, y_pred)
-3.0
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
if _num_samples(y_pred) < 2:
msg = "R^2 score is not well-defined with less than two samples."
warnings.warn(msg, UndefinedMetricWarning)
return float('nan')
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, str):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def max_error(y_true, y_pred):
"""
max_error metric calculates the maximum residual error.
Read more in the :ref:`User Guide <max_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated target values.
Returns
-------
max_error : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import max_error
>>> y_true = [3, 2, 7, 1]
>>> y_pred = [4, 2, 7, 1]
>>> max_error(y_true, y_pred)
1
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred, None)
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in max_error")
return np.max(np.abs(y_true - y_pred))
@_deprecate_positional_args
def mean_tweedie_deviance(y_true, y_pred, *, sample_weight=None, power=0):
"""Mean Tweedie deviance regression loss.
Read more in the :ref:`User Guide <mean_tweedie_deviance>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
power : float, default=0
Tweedie power parameter. Either power <= 0 or power >= 1.
The higher `p` the less weight is given to extreme
deviations between true and predicted targets.
- power < 0: Extreme stable distribution. Requires: y_pred > 0.
- power = 0 : Normal distribution, output corresponds to
mean_squared_error. y_true and y_pred can be any real numbers.
- power = 1 : Poisson distribution. Requires: y_true >= 0 and
y_pred > 0.
- 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0
and y_pred > 0.
- power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
- power = 3 : Inverse Gaussian distribution. Requires: y_true > 0
and y_pred > 0.
- otherwise : Positive stable distribution. Requires: y_true > 0
and y_pred > 0.
Returns
-------
loss : float
A non-negative floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_tweedie_deviance
>>> y_true = [2, 0, 1, 4]
>>> y_pred = [0.5, 0.5, 2., 2.]
>>> mean_tweedie_deviance(y_true, y_pred, power=1)
1.4260...
"""
y_type, y_true, y_pred, _ = _check_reg_targets(
y_true, y_pred, None, dtype=[np.float64, np.float32])
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in mean_tweedie_deviance")
check_consistent_length(y_true, y_pred, sample_weight)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
sample_weight = sample_weight[:, np.newaxis]
dist = TweedieDistribution(power=power)
dev = dist.unit_deviance(y_true, y_pred, check_input=True)
return np.average(dev, weights=sample_weight)
@_deprecate_positional_args
def mean_poisson_deviance(y_true, y_pred, *, sample_weight=None):
"""Mean Poisson deviance regression loss.
Poisson deviance is equivalent to the Tweedie deviance with
the power parameter `power=1`.
Read more in the :ref:`User Guide <mean_tweedie_deviance>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values. Requires y_true >= 0.
y_pred : array-like of shape (n_samples,)
Estimated target values. Requires y_pred > 0.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
A non-negative floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_poisson_deviance
>>> y_true = [2, 0, 1, 4]
>>> y_pred = [0.5, 0.5, 2., 2.]
>>> mean_poisson_deviance(y_true, y_pred)
1.4260...
"""
return mean_tweedie_deviance(
y_true, y_pred, sample_weight=sample_weight, power=1
)
@_deprecate_positional_args
def mean_gamma_deviance(y_true, y_pred, *, sample_weight=None):
"""Mean Gamma deviance regression loss.
Gamma deviance is equivalent to the Tweedie deviance with
the power parameter `power=2`. It is invariant to scaling of
the target variable, and measures relative errors.
Read more in the :ref:`User Guide <mean_tweedie_deviance>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values. Requires y_true > 0.
y_pred : array-like of shape (n_samples,)
Estimated target values. Requires y_pred > 0.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
A non-negative floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_gamma_deviance
>>> y_true = [2, 0.5, 1, 4]
>>> y_pred = [0.5, 0.5, 2., 2.]
>>> mean_gamma_deviance(y_true, y_pred)
1.0568...
"""
return mean_tweedie_deviance(
y_true, y_pred, sample_weight=sample_weight, power=2
)
| bnaul/scikit-learn | sklearn/metrics/_regression.py | Python | bsd-3-clause | 31,835 | [
"Gaussian"
] | 87e249010e50a0d0482f30e12c90a98bafd142ea17368f96cba584c1391a650e |
# -*- coding: utf-8 -*-
# ---- automatic determination of chunking parameters -----
# functions taken from
# http://www.unidata.ucar.edu/staff/russ/public/chunk_shape_3D.py
# see also
"""
http://www.unidata.ucar.edu/blogs/developer/entry/chunking_data_choosing_shapes
"""
import math
import operator
def binlist(n, width=0):
"""Return list of bits that represent a non-negative integer.
n -- non-negative integer
width -- number of bits in returned zero-filled list (default 0)
"""
return map(int, list(bin(n)[2:].zfill(width)))
def numVals(shape):
"""Return number of values in chunk of specified shape, given by a list of
dimension lengths.
shape -- list of variable dimension sizes"""
if(len(shape) == 0):
return 1
return reduce(operator.mul, shape)
def perturbShape(shape, onbits):
"""Return shape perturbed by adding 1 to elements corresponding to 1 bits
in onbits
shape -- list of variable dimension sizes
onbits -- non-negative integer less than 2**len(shape)
"""
return map(sum, zip(shape, binlist(onbits, len(shape))))
def chunk_shape_3D(varShape, valSize=4, chunkSize=4096):
"""
Return a 'good shape' for a 3D variable, assuming balanced 1D, 2D access
varShape -- length 3 list of variable dimension sizes
chunkSize -- maximum chunksize desired, in bytes (default 4096)
valSize -- size of each data value, in bytes (default 4)
Returns integer chunk lengths of a chunk shape that provides
balanced access of 1D subsets and 2D subsets of a netCDF or HDF5
variable var with shape (T, X, Y), where the 1D subsets are of the
form var[:,x,y] and the 2D slices are of the form var[t,:,:],
typically 1D time series and 2D spatial slices. 'Good shape' for
chunks means that the number of chunks accessed to read either
kind of 1D or 2D subset is approximately equal, and the size of
each chunk (uncompressed) is no more than chunkSize, which is
often a disk block size.
"""
rank = 3 # this is a special case of n-dimensional function chunk_shape
# ideal number of values in a chunk
chunkVals = chunkSize / float(valSize)
# ideal number of chunks
numChunks = varShape[0]*varShape[1]*varShape[2] / chunkVals
axisChunks = numChunks ** 0.25 # ideal number of chunks along each 2D axis
cFloor = [] # will be first estimate of good chunk shape
# cFloor = [varShape[0] // axisChunks**2, varShape[1] // axisChunks,
# varShape[2] // axisChunks]
# except that each chunk shape dimension must be at least 1
# chunkDim = max(1.0, varShape[0] // axisChunks**2)
if varShape[0] / axisChunks**2 < 1.0:
chunkDim = 1.0
axisChunks = axisChunks / math.sqrt(varShape[0]/axisChunks**2)
else:
chunkDim = varShape[0] // axisChunks**2
cFloor.append(chunkDim)
# factor to increase other dims if some must be increased to 1.0
prod = 1.0
for i in range(1, rank):
if varShape[i] / axisChunks < 1.0:
prod *= axisChunks / varShape[i]
for i in range(1, rank):
if varShape[i] / axisChunks < 1.0:
chunkDim = 1.0
else:
chunkDim = (prod*varShape[i]) // axisChunks
cFloor.append(chunkDim)
# cFloor is typically too small, (numVals(cFloor) < chunkSize)
# Adding 1 to each shape dim results in chunks that are too large,
# (numVals(cCeil) > chunkSize). Want to just add 1 to some of the
# axes to get as close as possible to chunkSize without exceeding
# it. Here we use brute force, compute numVals(cCand) for all
# 2**rank candidates and return the one closest to chunkSize
# without exceeding it.
bestChunkSize = 0
cBest = cFloor
for i in range(8):
# cCand = map(sum,zip(cFloor, binlist(i, rank)))
cCand = perturbShape(cFloor, i)
thisChunkSize = valSize * numVals(cCand)
if bestChunkSize < thisChunkSize <= chunkSize:
bestChunkSize = thisChunkSize
cBest = list(cCand) # make a copy of best candidate so far
return map(int, cBest)
| Chilipp/nc2map | nc_utils.py | Python | gpl-2.0 | 4,129 | [
"NetCDF"
] | fd134f397db17eb8ecf98eeaac16292e30ac9dd2543eda68bff9b8f9c40029ff |
"""
Acceptance tests for studio related to the outline page.
"""
from nose.plugins.attrib import attr
from datetime import datetime, timedelta
import itertools
from pytz import UTC
from bok_choy.promise import EmptyPromise
from ..pages.studio.overview import CourseOutlinePage, ContainerPage, ExpandCollapseLinkState
from ..pages.studio.utils import add_discussion
from ..pages.lms.courseware import CoursewarePage
from ..fixtures.course import XBlockFixtureDesc
from .base_studio_test import StudioCourseTest
from .helpers import load_data_str
from ..pages.lms.progress import ProgressPage
SECTION_NAME = 'Test Section'
SUBSECTION_NAME = 'Test Subsection'
UNIT_NAME = 'Test Unit'
class CourseOutlineTest(StudioCourseTest):
"""
Base class for all course outline tests
"""
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(CourseOutlineTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
""" Install a course with sections/problems, tabs, updates, and handouts """
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME).add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('html', 'Test HTML Component'),
XBlockFixtureDesc('discussion', 'Test Discussion Component')
)
)
)
)
@attr('shard_2')
class WarningMessagesTest(CourseOutlineTest):
"""
Feature: Warning messages on sections, subsections, and units
"""
__test__ = True
STAFF_ONLY_WARNING = 'Contains staff only content'
LIVE_UNPUBLISHED_WARNING = 'Unpublished changes to live content'
FUTURE_UNPUBLISHED_WARNING = 'Unpublished changes to content that will release in the future'
NEVER_PUBLISHED_WARNING = 'Unpublished units will not be released'
class PublishState:
NEVER_PUBLISHED = 1
UNPUBLISHED_CHANGES = 2
PUBLISHED = 3
VALUES = [NEVER_PUBLISHED, UNPUBLISHED_CHANGES, PUBLISHED]
class UnitState:
""" Represents the state of a unit """
def __init__(self, is_released, publish_state, is_locked):
""" Creates a new UnitState with the given properties """
self.is_released = is_released
self.publish_state = publish_state
self.is_locked = is_locked
@property
def name(self):
""" Returns an appropriate name based on the properties of the unit """
result = "Released " if self.is_released else "Unreleased "
if self.publish_state == WarningMessagesTest.PublishState.NEVER_PUBLISHED:
result += "Never Published "
elif self.publish_state == WarningMessagesTest.PublishState.UNPUBLISHED_CHANGES:
result += "Unpublished Changes "
else:
result += "Published "
result += "Locked" if self.is_locked else "Unlocked"
return result
def populate_course_fixture(self, course_fixture):
""" Install a course with various configurations that could produce warning messages """
# Define the dimensions that map to the UnitState constructor
features = [
[True, False], # Possible values for is_released
self.PublishState.VALUES, # Possible values for publish_state
[True, False] # Possible values for is_locked
]
# Add a fixture for every state in the product of features
course_fixture.add_children(*[
self._build_fixture(self.UnitState(*state)) for state in itertools.product(*features)
])
def _build_fixture(self, unit_state):
""" Returns an XBlockFixtureDesc with a section, subsection, and possibly unit that has the given state. """
name = unit_state.name
start = (datetime(1984, 3, 4) if unit_state.is_released else datetime.now(UTC) + timedelta(1)).isoformat()
subsection = XBlockFixtureDesc('sequential', name, metadata={'start': start})
# Children of never published subsections will be added on demand via _ensure_unit_present
return XBlockFixtureDesc('chapter', name).add_children(
subsection if unit_state.publish_state == self.PublishState.NEVER_PUBLISHED
else subsection.add_children(
XBlockFixtureDesc('vertical', name, metadata={'visible_to_staff_only': unit_state.is_locked})
)
)
def test_released_never_published_locked(self):
""" Tests that released never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_never_published_unlocked(self):
""" Tests that released never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_released_unpublished_changes_locked(self):
""" Tests that released unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_unpublished_changes_unlocked(self):
""" Tests that released unpublished changes unlocked units display 'Unpublished changes to live content' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.LIVE_UNPUBLISHED_WARNING
)
def test_released_published_locked(self):
""" Tests that released published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_published_unlocked(self):
""" Tests that released published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def test_unreleased_never_published_locked(self):
""" Tests that unreleased never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_never_published_unlocked(self):
""" Tests that unreleased never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_unreleased_unpublished_changes_locked(self):
""" Tests that unreleased unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_unpublished_changes_unlocked(self):
"""
Tests that unreleased unpublished changes unlocked units display 'Unpublished changes to content that will
release in the future'
"""
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.FUTURE_UNPUBLISHED_WARNING
)
def test_unreleased_published_locked(self):
""" Tests that unreleased published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_published_unlocked(self):
""" Tests that unreleased published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def _verify_unit_warning(self, unit_state, expected_status_message):
"""
Verifies that the given unit's messages match the expected messages.
If expected_status_message is None, then the unit status message is expected to not be present.
"""
self._ensure_unit_present(unit_state)
self.course_outline_page.visit()
section = self.course_outline_page.section(unit_state.name)
subsection = section.subsection_at(0)
subsection.toggle_expand()
unit = subsection.unit_at(0)
if expected_status_message == self.STAFF_ONLY_WARNING:
self.assertEqual(section.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(subsection.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(unit.status_message, self.STAFF_ONLY_WARNING)
else:
self.assertFalse(section.has_status_message)
self.assertFalse(subsection.has_status_message)
if expected_status_message:
self.assertEqual(unit.status_message, expected_status_message)
else:
self.assertFalse(unit.has_status_message)
def _ensure_unit_present(self, unit_state):
""" Ensures that a unit with the given state is present on the course outline """
if unit_state.publish_state == self.PublishState.PUBLISHED:
return
name = unit_state.name
self.course_outline_page.visit()
subsection = self.course_outline_page.section(name).subsection(name)
subsection.toggle_expand()
if unit_state.publish_state == self.PublishState.UNPUBLISHED_CHANGES:
unit = subsection.unit(name).go_to()
add_discussion(unit)
elif unit_state.publish_state == self.PublishState.NEVER_PUBLISHED:
subsection.add_unit()
unit = ContainerPage(self.browser, None)
unit.wait_for_page()
if unit.is_staff_locked != unit_state.is_locked:
unit.toggle_staff_lock()
@attr('shard_2')
class EditingSectionsTest(CourseOutlineTest):
"""
Feature: Editing Release date, Due date and grading type.
"""
__test__ = True
def test_can_edit_subsection(self):
"""
Scenario: I can edit settings of subsection.
Given that I have created a subsection
Then I see release date, due date and grading policy of subsection in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date, due date and grading policy fields present
And they have correct initial values
Then I set new values for these fields
And I click save button on the modal
Then I see release date, due date and grading policy of subsection in course outline
"""
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(subsection.release_date)
# Verify that Due date and Policy hidden by default
self.assertFalse(subsection.due_date)
self.assertFalse(subsection.policy)
modal = subsection.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertTrue(modal.has_due_date())
self.assertTrue(modal.has_policy())
# Verify initial values
self.assertEqual(modal.release_date, u'1/1/1970')
self.assertEqual(modal.due_date, u'')
self.assertEqual(modal.policy, u'Not Graded')
# Set new values
modal.release_date = '3/12/1972'
modal.due_date = '7/21/2014'
modal.policy = 'Lab'
modal.save()
self.assertIn(u'Released: Mar 12, 1972', subsection.release_date)
self.assertIn(u'Due: Jul 21, 2014', subsection.due_date)
self.assertIn(u'Lab', subsection.policy)
def test_can_edit_section(self):
"""
Scenario: I can edit settings of section.
Given that I have created a section
Then I see release date of section in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date field present
And it has correct initial value
Then I set new value for this field
And I click save button on the modal
Then I see release date of section in course outline
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
modal = section.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertFalse(modal.has_due_date())
self.assertFalse(modal.has_policy())
# Verify initial value
self.assertEqual(modal.release_date, u'1/1/1970')
# Set new value
modal.release_date = '5/14/1969'
modal.save()
self.assertIn(u'Released: May 14, 1969', section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
def test_subsection_is_graded_in_lms(self):
"""
Scenario: I can grade subsection from course outline page.
Given I visit progress page
And I see that problem in subsection has grading type "Practice"
Then I visit course outline page
And I click on the configuration icon of subsection
And I set grading policy to "Lab"
And I click save button on the modal
Then I visit progress page
And I see that problem in subsection has grading type "Problem"
"""
progress_page = ProgressPage(self.browser, self.course_id)
progress_page.visit()
progress_page.wait_for_page()
self.assertEqual(u'Practice', progress_page.grading_formats[0])
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
modal = subsection.edit()
# Set new values
modal.policy = 'Lab'
modal.save()
progress_page.visit()
self.assertEqual(u'Problem', progress_page.grading_formats[0])
def test_unchanged_release_date_is_not_saved(self):
"""
Scenario: Saving a subsection without changing the release date will not override the release date
Given that I have created a section with a subsection
When I open the settings modal for the subsection
And I pressed save
And I open the settings modal for the section
And I change the release date to 07/20/1969
And I press save
Then the subsection and the section have the release date 07/20/1969
"""
self.course_outline_page.visit()
modal = self.course_outline_page.section_at(0).subsection_at(0).edit()
modal.save()
modal = self.course_outline_page.section_at(0).edit()
modal.release_date = '7/20/1969'
modal.save()
release_text = 'Released: Jul 20, 1969'
self.assertIn(release_text, self.course_outline_page.section_at(0).release_date)
self.assertIn(release_text, self.course_outline_page.section_at(0).subsection_at(0).release_date)
@attr('shard_2')
class EditNamesTest(CourseOutlineTest):
"""
Feature: Click-to-edit section/subsection names
"""
__test__ = True
def set_name_and_verify(self, item, old_name, new_name, expected_name):
"""
Changes the display name of item from old_name to new_name, then verifies that its value is expected_name.
"""
self.assertEqual(item.name, old_name)
item.change_name(new_name)
self.assertFalse(item.in_editable_form())
self.assertEqual(item.name, expected_name)
def test_edit_section_name(self):
"""
Scenario: Click-to-edit section name
Given that I have created a section
When I click on the name of section
Then the section name becomes editable
And given that I have edited the section name
When I click outside of the edited section name
Then the section name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'Changed',
'Changed'
)
def test_edit_subsection_name(self):
"""
Scenario: Click-to-edit subsection name
Given that I have created a subsection
When I click on the name of subsection
Then the subsection name becomes editable
And given that I have edited the subsection name
When I click outside of the edited subsection name
Then the subsection name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'Changed',
'Changed'
)
def test_edit_empty_section_name(self):
"""
Scenario: Click-to-edit section name, enter empty name
Given that I have created a section
And I have clicked to edit the name of the section
And I have entered an empty section name
When I click outside of the edited section name
Then the section name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'',
'Test Section'
)
def test_edit_empty_subsection_name(self):
"""
Scenario: Click-to-edit subsection name, enter empty name
Given that I have created a subsection
And I have clicked to edit the name of the subsection
And I have entered an empty subsection name
When I click outside of the edited subsection name
Then the subsection name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'',
'Test Subsection'
)
def test_editing_names_does_not_expand_collapse(self):
"""
Scenario: A section stays in the same expand/collapse state while its name is edited
Given that I have created a section
And the section is collapsed
When I click on the name of the section
Then the section is collapsed
And given that I have entered a new name
Then the section is collapsed
And given that I press ENTER to finalize the name
Then the section is collapsed
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).toggle_expand()
self.assertFalse(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).edit_name()
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).enter_name('Changed')
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).finalize_name()
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
@attr('shard_2')
class CreateSectionsTest(CourseOutlineTest):
"""
Feature: Create new sections/subsections/units
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a completely empty course to easily test adding things to it """
pass
def test_create_new_section_from_top_button(self):
"""
Scenario: Create new section from button at top of page
Given that I am on the course outline
When I click the "+ Add section" button at the top of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button(self):
"""
Scenario: Create new section from button at bottom of page
Given that I am on the course outline
When I click the "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_subsection(self):
"""
Scenario: Create new subsection
Given that I have created a section
When I click the "+ Add subsection" button in that section
Then I see a new subsection added to the bottom of the section
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
subsections = self.course_outline_page.section_at(0).subsections()
self.assertEqual(len(subsections), 1)
self.assertTrue(subsections[0].in_editable_form())
def test_create_new_unit(self):
"""
Scenario: Create new unit
Given that I have created a section
And that I have created a subsection within that section
When I click the "+ Add unit" button in that subsection
Then I am redirected to a New Unit page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).add_unit()
unit_page = ContainerPage(self.browser, None)
EmptyPromise(unit_page.is_browser_on_page, 'Browser is on the unit page').fulfill()
self.assertTrue(unit_page.is_inline_editing_display_name())
@attr('shard_2')
class DeleteContentTest(CourseOutlineTest):
"""
Feature: Deleting sections/subsections/units
"""
__test__ = True
def test_delete_section(self):
"""
Scenario: Delete section
Given that I am on the course outline
When I click the delete button for a section on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the section
When I click "Yes, I want to delete this component"
Then the confirmation message should close
And the section should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
def test_cancel_delete_section(self):
"""
Scenario: Cancel delete of section
Given that I clicked the delte button for a section on the course outline
And I received a confirmation message, asking me if I really want to delete the component
When I click "Cancel"
Then the confirmation message should close
And the section should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
def test_delete_subsection(self):
"""
Scenario: Delete subsection
Given that I am on the course outline
When I click the delete button for a subsection on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the subsection
When I click "Yes, I want to delete this component"
Then the confiramtion message should close
And the subsection should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 0)
def test_cancel_delete_subsection(self):
"""
Scenario: Cancel delete of subsection
Given that I clicked the delete button for a subsection on the course outline
And I received a confirmation message, asking me if I really want to delete the subsection
When I click "cancel"
Then the confirmation message should close
And the subsection should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
def test_delete_unit(self):
"""
Scenario: Delete unit
Given that I am on the course outline
When I click the delete button for a unit on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the unit
When I click "Yes, I want to delete this unit"
Then the confirmation message should close
And the unit should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).toggle_expand()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 0)
def test_cancel_delete_unit(self):
"""
Scenario: Cancel delete of unit
Given that I clicked the delete button for a unit on the course outline
And I received a confirmation message, asking me if I really want to delete the unit
When I click "Cancel"
Then the confirmation message should close
And the unit should remain in the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).toggle_expand()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
def test_delete_all_no_content_message(self):
"""
Scenario: Delete all sections/subsections/units in a course, "no content" message should appear
Given that I delete all sections, subsections, and units in a course
When I visit the course outline
Then I will see a message that says, "You haven't added any content to this course yet"
Add see a + Add Section button
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.has_no_content_message)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
self.assertTrue(self.course_outline_page.has_no_content_message)
@attr('shard_2')
class ExpandCollapseMultipleSectionsTest(CourseOutlineTest):
"""
Feature: Courses with multiple sections can expand and collapse all sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a course with two sections """
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 2')
)
)
)
def verify_all_sections(self, collapsed):
"""
Verifies that all sections are collapsed if collapsed is True, otherwise all expanded.
"""
for section in self.course_outline_page.sections():
self.assertEqual(collapsed, section.is_collapsed)
def toggle_all_sections(self):
"""
Toggles the expand collapse state of all sections.
"""
for section in self.course_outline_page.sections():
section.toggle_expand()
def test_expanded_by_default(self):
"""
Scenario: The default layout for the outline page is to show sections in expanded view
Given I have a course with sections
When I navigate to the course outline page
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with multiple sections
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
for section in self.course_outline_page.sections():
section.delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_collapse_all_when_all_expanded(self):
"""
Scenario: Collapse all sections when all sections are expanded
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_collapse_all_when_some_expanded(self):
"""
Scenario: Collapsing all sections when 1 or more sections are already collapsed
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I collapse the first section
And I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.section_at(0).toggle_expand()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_expand_all_when_all_collapsed(self):
"""
Scenario: Expanding all sections when all sections are collapsed
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_expand_all_when_some_collapsed(self):
"""
Scenario: Expanding all sections when 1 or more sections are already expanded
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I expand the first section
And I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.course_outline_page.section_at(0).toggle_expand()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
@attr('shard_2')
class ExpandCollapseSingleSectionTest(CourseOutlineTest):
"""
Feature: Courses with a single section can expand and collapse all sections.
"""
__test__ = True
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with one section
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_old_subsection_stays_collapsed_after_creation(self):
"""
Scenario: Collapsed subsection stays collapsed after creating a new subsection
Given I have a course with one section and subsection
And I navigate to the course outline page
Then the subsection is collapsed
And when I create a new subsection
Then the first subsection is collapsed
And the second subsection is expanded
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.course_outline_page.section_at(0).add_subsection()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.assertFalse(self.course_outline_page.section_at(0).subsection_at(1).is_collapsed)
@attr('shard_2')
class ExpandCollapseEmptyTest(CourseOutlineTest):
"""
Feature: Courses with no sections initially can expand and collapse all sections after addition.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Expand/collapse for a course with no sections
Given I have a course with no sections
When I navigate to the course outline page
Then I do not see the "Collapse All Sections" link
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
def test_link_appears_after_section_creation(self):
"""
Scenario: Collapse link appears after creating first section of a course
Given I have a course with no sections
When I navigate to the course outline page
And I add a section
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.course_outline_page.add_section_from_top_button()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.assertFalse(self.course_outline_page.section_at(0).is_collapsed)
@attr('shard_2')
class DefaultStatesEmptyTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with an empty course
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_empty_course_message(self):
"""
Scenario: Empty course state
Given that I am in a course with no sections, subsections, nor units
When I visit the course outline
Then I will see a message that says "You haven't added any content to this course yet"
And see a + Add Section button
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.has_no_content_message)
self.assertTrue(self.course_outline_page.bottom_add_section_button.is_present())
@attr('shard_2')
class DefaultStatesContentTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with a course with content
"""
__test__ = True
def test_view_live(self):
"""
Scenario: View Live version from course outline
Given that I am on the course outline
When I click the "View Live" button
Then a new tab will open to the course on the LMS
"""
self.course_outline_page.visit()
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 3)
self.assertEqual(courseware.xblock_component_type(0), 'problem')
self.assertEqual(courseware.xblock_component_type(1), 'html')
self.assertEqual(courseware.xblock_component_type(2), 'discussion')
@attr('shard_2')
class UnitNavigationTest(CourseOutlineTest):
"""
Feature: Navigate to units
"""
__test__ = True
def test_navigate_to_unit(self):
"""
Scenario: Click unit name to navigate to unit page
Given that I have expanded a section/subsection so I can see unit names
When I click on a unit name
Then I will be taken to the appropriate unit page
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).toggle_expand()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
self.assertTrue(unit.is_browser_on_page)
| nicky-ji/edx-nicky | common/test/acceptance/tests/test_studio_outline.py | Python | agpl-3.0 | 42,804 | [
"VisIt"
] | 9dea53e9706244807a54c79afc9a885faf349fe3e0c13998e58d771d8e160e2d |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""\
Hydrogen bond autocorrelation --- :mod:`MDAnalysis.analysis.hydrogenbonds.hbond_autocorrel`
===========================================================================================
:Author: Richard J. Gowers
:Year: 2014
:Copyright: GNU Public License v3
.. versionadded:: 0.9.0
.. versionchanged:: 2.0.0
Module moved from :mod:`MDAnalysis.analysis.hbonds.hbond_autocorrel` to
:mod:`MDAnalysis.analysis.hydrogenbonds.hbond_autocorrel`.
Description
-----------
Calculates the time autocorrelation function, :math:`C_x(t)`, for the hydrogen
bonds in the selections passed to it. The population of hydrogen bonds at a
given startpoint, :math:`t_0`, is evaluated based on geometric criteria and
then the lifetime of these bonds is monitored over time. Multiple passes
through the trajectory are used to build an average of the behaviour.
.. math::
C_x(t) = \\left \\langle \\frac{h_{ij}(t_0) h_{ij}(t_0 + t)}{h_{ij}(t_0)^2} \\right\\rangle
The subscript :math:`x` refers to the definition of lifetime being used, either
continuous or intermittent. The continuous definition measures the time that
a particular hydrogen bond remains continuously attached, whilst the
intermittent definition allows a bond to break and then subsequently reform and
be counted again. The relevent lifetime, :math:`\\tau_x`, can then be found
via integration of this function
.. math::
\\tau_x = \\int_0^\\infty C_x(t) dt`
For this, the observed behaviour is fitted to a multi exponential function,
using 2 exponents for the continuous lifetime and 3 for the intermittent
lifetime.
:math:`C_x(t) = A_1 \\exp( - t / \\tau_1)
+ A_2 \\exp( - t / \\tau_2)
[+ A_3 \\exp( - t / \\tau_3)]`
Where the final pre expoential factor :math:`A_n` is subject to the condition:
:math:`A_n = 1 - \\sum\\limits_{i=1}^{n-1} A_i`
For further details see [Gowers2015]_.
.. rubric:: References
.. [Gowers2015] Richard J. Gowers and Paola Carbone,
A multiscale approach to model hydrogen bonding: The case of polyamide
The Journal of Chemical Physics, 142, 224907 (2015),
DOI:http://dx.doi.org/10.1063/1.4922445
Input
-----
Three AtomGroup selections representing the **hydrogens**, **donors** and
**acceptors** that you wish to analyse. Note that the **hydrogens** and
**donors** selections must be aligned, that is **hydrogens[0]** and
**donors[0]** must represent a bonded pair. For systems such as water,
this will mean that each oxygen appears twice in the **donors** AtomGroup.
The function :func:`find_hydrogen_donors` can be used to construct the donor
AtomGroup
::
import MDAnalysis as mda
from MDAnalysis.analysis import hbonds
from MDAnalysis.tests.datafiles import waterPSF, waterDCD
u = mda.Universe(waterPSF, waterDCD)
hydrogens = u.select_atoms('name H*')
donors = hbonds.find_hydrogen_donors(hydrogens)
Note that this requires the Universe to have bond information. If this isn't
present in the topology file, the
:meth:`MDAnalysis.core.groups.AtomGroup.guess_bonds` method can be used
as so
::
import MDAnalysis as mda
from MDAnalysis.analysis import hbonds
from MDAnalysis.tests.datafiles import GRO
# we could load the Universe with guess_bonds=True
# but this would guess **all** bonds
u = mda.Universe(GRO)
water = u.select_atoms('resname SOL and not type DUMMY')
# guess bonds only within our water atoms
# this adds the bond information directly to the Universe
water.guess_bonds()
hydrogens = water.select_atoms('type H')
# this is now possible as we guessed the bonds
donors = hbonds.find_hydrogen_donors(hydrogens)
The keyword **exclusions** allows a tuple of array addresses to be provided,
(Hidx, Aidx),these pairs of hydrogen-acceptor are then not permitted to be
counted as part of the analysis. This could be used to exclude the
consideration of hydrogen bonds within the same functional group, or to perform
analysis on strictly intermolecular hydrogen bonding.
Hydrogen bonds are defined on the basis of geometric criteria; a
Hydrogen-Acceptor distance of less then **dist_crit** and a
Donor-Hydrogen-Acceptor angle of greater than **angle_crit**.
The length of trajectory to analyse in ps, **sample_time**, is used to choose
what length to analyse.
Multiple passes, controlled by the keyword **nruns**, through the trajectory
are performed and an average calculated. For each pass, **nsamples** number
of points along the run are calculated.
Output
------
All results of the analysis are available through the *solution* attribute.
This is a dictionary with the following keys
- *results* The raw results of the time autocorrelation function.
- *time* Time axis, in ps, for the results.
- *fit* Results of the exponential curve fitting procedure. For the
*continuous* lifetime these are (A1, tau1, tau2), for the
*intermittent* lifetime these are (A1, A2, tau1, tau2, tau3).
- *tau* Calculated time constant from the fit.
- *estimate* Estimated values generated by the calculated fit.
The *results* and *time* values are only filled after the :meth:`run` method,
*fit*, *tau* and *estimate* are filled after the :meth:`solve` method has been
used.
Worked Example for Polyamide
----------------------------
This example finds the continuous hydrogen bond lifetime between N-H..O in a
polyamide system. This will use the default geometric definition for hydrogen
bonds of length 3.0 Å and angle of 130 degrees.
It will observe a window of 2.0 ps (`sample_time`) and try to gather 1000
sample point within this time window (this relies upon the trajectory being
sampled frequently enough). This process is repeated for 20 different start
points to build a better average.
::
import MDAnalysis as mda
from MDAnalysis.analysis import hbonds
from MDAnalysis.tests.datafiles import TRZ_psf, TRZ
import matplotlib.pyplot as plt
# load system
u = mda.Universe(TRZ_psf, TRZ)
# select atoms of interest into AtomGroups
H = u.select_atoms('name Hn')
N = u.select_atoms('name N')
O = u.select_atoms('name O')
# create analysis object
hb_ac = hbonds.HydrogenBondAutoCorrel(u,
acceptors=O, hydrogens=H, donors=N,
bond_type='continuous',
sample_time=2.0, nsamples=1000, nruns=20)
# call run to gather results
hb_ac.run()
# attempt to fit results to exponential equation
hb_ac.solve()
# grab results from inside object
tau = hb_ac.solution['tau']
time = hb_ac.solution['time']
results = hb_ac.solution['results']
estimate = hb_ac.solution['estimate']
# plot to check!
plt.plot(time, results, 'ro')
plt.plot(time, estimate)
plt.show()
Functions and Classes
---------------------
.. autofunction:: find_hydrogen_donors
.. autoclass:: HydrogenBondAutoCorrel
:members:
"""
import numpy as np
import scipy.optimize
import warnings
from MDAnalysis.lib.log import ProgressBar
from MDAnalysis.lib.distances import capped_distance, calc_angles, calc_bonds
from MDAnalysis.core.groups import requires
from MDAnalysis.due import due, Doi
due.cite(Doi("10.1063/1.4922445"),
description="Hydrogen bonding autocorrelation time",
path='MDAnalysis.analysis.hbonds.hbond_autocorrel',
)
del Doi
@requires('bonds')
def find_hydrogen_donors(hydrogens):
"""Returns the donor atom for each hydrogen
Parameters
----------
hydrogens : AtomGroup
the hydrogens that will form hydrogen bonds
Returns
-------
donors : AtomGroup
the donor atom for each hydrogen, found via bond information
.. versionadded:: 0.20.0
"""
return sum(h.bonded_atoms[0] for h in hydrogens)
class HydrogenBondAutoCorrel(object):
"""Perform a time autocorrelation of the hydrogen bonds in the system.
Parameters
----------
universe : Universe
MDAnalysis Universe that all selections belong to
hydrogens : AtomGroup
AtomGroup of Hydrogens which can form hydrogen bonds
acceptors : AtomGroup
AtomGroup of all Acceptor atoms
donors : AtomGroup
The atoms which are connected to the hydrogens. This group
must be identical in length to the hydrogen group and matched,
ie hydrogens[0] is bonded to donors[0].
For water, this will mean a donor appears twice in this
group, once for each hydrogen.
bond_type : str
Which definition of hydrogen bond lifetime to consider, either
'continuous' or 'intermittent'.
exclusions : ndarray, optional
Indices of Hydrogen-Acceptor pairs to be excluded.
With nH and nA Hydrogens and Acceptors, a (nH x nA) array of distances
is calculated, *exclusions* is used as a mask on this array to exclude
some pairs.
angle_crit : float, optional
The angle (in degrees) which all bonds must be greater than [130.0]
dist_crit : float, optional
The maximum distance (in Angstroms) for a hydrogen bond [3.0]
sample_time : float, optional
The amount of time, in ps, that you wish to observe hydrogen
bonds for [100]
nruns : int, optional
The number of different start points within the trajectory
to use [1]
nsamples : int, optional
Within each run, the number of frames to analyse [50]
pbc : bool, optional
Whether to consider periodic boundaries in calculations [``True``]
..versionchanged: 1.0.0
``save_results()`` method was removed. You can instead use ``np.savez()``
on :attr:`HydrogenBondAutoCorrel.solution['time']` and
:attr:`HydrogenBondAutoCorrel.solution['results']` instead.
"""
def __init__(self, universe,
hydrogens=None, acceptors=None, donors=None,
bond_type=None,
exclusions=None,
angle_crit=130.0, dist_crit=3.0, # geometric criteria
sample_time=100, # expected length of the decay in ps
time_cut=None, # cutoff time for intermittent hbonds
nruns=1, # number of times to iterate through the trajectory
nsamples=50, # number of different points to sample in a run
pbc=True):
#warnings.warn("This class is deprecated, use analysis.hbonds.HydrogenBondAnalysis "
# "which has .autocorrelation function",
# category=DeprecationWarning)
self.u = universe
# check that slicing is possible
try:
self.u.trajectory[0]
except Exception:
raise ValueError("Trajectory must support slicing") from None
self.h = hydrogens
self.a = acceptors
self.d = donors
if not len(self.h) == len(self.d):
raise ValueError("Donors and Hydrogen groups must be identical "
"length. Try using `find_hydrogen_donors`.")
self.exclusions = exclusions
if self.exclusions:
if not len(self.exclusions[0]) == len(self.exclusions[1]):
raise ValueError(
"'exclusion' must be two arrays of identical length")
self.bond_type = bond_type
if self.bond_type not in ['continuous', 'intermittent']:
raise ValueError(
"bond_type must be either 'continuous' or 'intermittent'")
self.a_crit = np.deg2rad(angle_crit)
self.d_crit = dist_crit
self.pbc = pbc
self.sample_time = sample_time
self.nruns = nruns
self.nsamples = nsamples
self._slice_traj(sample_time)
self.time_cut = time_cut
self.solution = {
'results': None, # Raw results
'time': None, # Time axis of raw results
'fit': None, # coefficients for fit
'tau': None, # integral of exponential fit
'estimate': None # y values of fit against time
}
def _slice_traj(self, sample_time):
"""Set up start and end points in the trajectory for the
different passes
"""
dt = self.u.trajectory.dt # frame step size in time
req_frames = int(sample_time / dt) # the number of frames required
n_frames = len(self.u.trajectory)
if req_frames > n_frames:
warnings.warn("Number of required frames ({}) greater than the"
" number of frames in trajectory ({})"
.format(req_frames, n_frames), RuntimeWarning)
numruns = self.nruns
if numruns > n_frames:
numruns = n_frames
warnings.warn("Number of runs ({}) greater than the number of"
" frames in trajectory ({})"
.format(self.nruns, n_frames), RuntimeWarning)
self._starts = np.arange(0, n_frames, n_frames / numruns, dtype=int)
# limit stop points using clip
self._stops = np.clip(self._starts + req_frames, 0, n_frames)
self._skip = req_frames // self.nsamples
if self._skip == 0: # If nsamples > req_frames
warnings.warn("Desired number of sample points too high, using {0}"
.format(req_frames), RuntimeWarning)
self._skip = 1
def run(self, force=False):
"""Run all the required passes
Parameters
----------
force : bool, optional
Will overwrite previous results if they exist
"""
# if results exist, don't waste any time
if self.solution['results'] is not None and not force:
return
main_results = np.zeros_like(np.arange(self._starts[0],
self._stops[0],
self._skip),
dtype=np.float32)
# for normalising later
counter = np.zeros_like(main_results, dtype=np.float32)
for i, (start, stop) in ProgressBar(enumerate(zip(self._starts,
self._stops)), total=self.nruns,
desc="Performing run"):
# needed else trj seek thinks a np.int64 isn't an int?
results = self._single_run(int(start), int(stop))
nresults = len(results)
if nresults == len(main_results):
main_results += results
counter += 1.0
else:
main_results[:nresults] += results
counter[:nresults] += 1.0
main_results /= counter
self.solution['time'] = np.arange(
len(main_results),
dtype=np.float32) * self.u.trajectory.dt * self._skip
self.solution['results'] = main_results
def _single_run(self, start, stop):
"""Perform a single pass of the trajectory"""
self.u.trajectory[start]
# Calculate partners at t=0
box = self.u.dimensions if self.pbc else None
# 2d array of all distances
pair, d = capped_distance(self.h.positions, self.a.positions, max_cutoff=self.d_crit, box=box)
if self.exclusions:
# set to above dist crit to exclude
exclude = np.column_stack((self.exclusions[0], self.exclusions[1]))
pair = np.delete(pair, np.where(pair==exclude), 0)
hidx, aidx = np.transpose(pair)
a = calc_angles(self.d.positions[hidx], self.h.positions[hidx],
self.a.positions[aidx], box=box)
# from amongst those, who also satisfiess angle crit
idx2 = np.where(a > self.a_crit)
hidx = hidx[idx2]
aidx = aidx[idx2]
nbonds = len(hidx) # number of hbonds at t=0
results = np.zeros_like(np.arange(start, stop, self._skip),
dtype=np.float32)
if self.time_cut:
# counter for time criteria
count = np.zeros(nbonds, dtype=np.float64)
for i, ts in enumerate(self.u.trajectory[start:stop:self._skip]):
box = self.u.dimensions if self.pbc else None
d = calc_bonds(self.h.positions[hidx], self.a.positions[aidx],
box=box)
a = calc_angles(self.d.positions[hidx], self.h.positions[hidx],
self.a.positions[aidx], box=box)
winners = (d < self.d_crit) & (a > self.a_crit)
results[i] = winners.sum()
if self.bond_type == 'continuous':
# Remove losers for continuous definition
hidx = hidx[np.where(winners)]
aidx = aidx[np.where(winners)]
elif self.bond_type == 'intermittent':
if self.time_cut:
# Add to counter of where losers are
count[~ winners] += self._skip * self.u.trajectory.dt
count[winners] = 0 # Reset timer for winners
# Remove if you've lost too many times
# New arrays contain everything but removals
hidx = hidx[count < self.time_cut]
aidx = aidx[count < self.time_cut]
count = count[count < self.time_cut]
else:
pass
if len(hidx) == 0: # Once everyone has lost, the fun stops
break
results /= nbonds
return results
def solve(self, p_guess=None):
"""Fit results to an multi exponential decay and integrate to find
characteristic time
Parameters
----------
p_guess : tuple of floats, optional
Initial guess for the leastsq fit, must match the shape of the
expected coefficients
Continuous defition results are fitted to a double exponential with
:func:`scipy.optimize.leastsq`, intermittent definition are fit to a
triple exponential.
The results of this fitting procedure are saved into the *fit*,
*tau* and *estimate* keywords in the solution dict.
- *fit* contains the coefficients, (A1, tau1, tau2) or
(A1, A2, tau1, tau2, tau3)
- *tau* contains the calculated lifetime in ps for the hydrogen
bonding
- *estimate* contains the estimate provided by the fit of the time
autocorrelation function
In addition, the output of the :func:`~scipy.optimize.leastsq` function
is saved into the solution dict
- *infodict*
- *mesg*
- *ier*
"""
if self.solution['results'] is None:
raise ValueError(
"Results have not been generated use, the run method first")
# Prevents an odd bug with leastsq where it expects
# double precision data sometimes...
time = self.solution['time'].astype(np.float64)
results = self.solution['results'].astype(np.float64)
def within_bounds(p):
"""Returns True/False if boundary conditions are met or not.
Uses length of p to detect whether it's handling continuous /
intermittent
Boundary conditions are:
0 < A_x < 1
sum(A_x) < 1
0 < tau_x
"""
if len(p) == 3:
A1, tau1, tau2 = p
return (A1 > 0.0) & (A1 < 1.0) & \
(tau1 > 0.0) & (tau2 > 0.0)
elif len(p) == 5:
A1, A2, tau1, tau2, tau3 = p
return (A1 > 0.0) & (A1 < 1.0) & (A2 > 0.0) & \
(A2 < 1.0) & ((A1 + A2) < 1.0) & \
(tau1 > 0.0) & (tau2 > 0.0) & (tau3 > 0.0)
def err(p, x, y):
"""Custom residual function, returns real residual if all
boundaries are met, else returns a large number to trick the
leastsq algorithm
"""
if within_bounds(p):
return y - self._my_solve(x, *p)
else:
return np.full_like(y, 100000)
def double(x, A1, tau1, tau2):
""" Sum of two exponential functions """
A2 = 1 - A1
return A1 * np.exp(-x / tau1) + A2 * np.exp(-x / tau2)
def triple(x, A1, A2, tau1, tau2, tau3):
""" Sum of three exponential functions """
A3 = 1 - (A1 + A2)
return A1 * np.exp(-x / tau1) + A2 * np.exp(-x / tau2) + A3 * np.exp(-x / tau3)
if self.bond_type == 'continuous':
self._my_solve = double
if p_guess is None:
p_guess = (0.5, 10 * self.sample_time, self.sample_time)
p, cov, infodict, mesg, ier = scipy.optimize.leastsq(
err, p_guess, args=(time, results), full_output=True)
self.solution['fit'] = p
A1, tau1, tau2 = p
A2 = 1 - A1
self.solution['tau'] = A1 * tau1 + A2 * tau2
else:
self._my_solve = triple
if p_guess is None:
p_guess = (0.33, 0.33, 10 * self.sample_time,
self.sample_time, 0.1 * self.sample_time)
p, cov, infodict, mesg, ier = scipy.optimize.leastsq(
err, p_guess, args=(time, results), full_output=True)
self.solution['fit'] = p
A1, A2, tau1, tau2, tau3 = p
A3 = 1 - A1 - A2
self.solution['tau'] = A1 * tau1 + A2 * tau2 + A3 * tau3
self.solution['infodict'] = infodict
self.solution['mesg'] = mesg
self.solution['ier'] = ier
if ier in [1, 2, 3, 4]: # solution found if ier is one of these values
self.solution['estimate'] = self._my_solve(
self.solution['time'], *p)
else:
warnings.warn("Solution to results not found", RuntimeWarning)
def __repr__(self):
return ("<MDAnalysis HydrogenBondAutoCorrel analysis measuring the "
"{btype} lifetime of {n} different hydrogens>"
"".format(btype=self.bond_type, n=len(self.h)))
| MDAnalysis/mdanalysis | package/MDAnalysis/analysis/hydrogenbonds/hbond_autocorrel.py | Python | gpl-2.0 | 23,217 | [
"MDAnalysis"
] | ca42100e15000e3d3ce250413bf56217a2c59da27b49a3c78849aa1dc867f3f3 |
from Errors import error, message
import ExprNodes
import Nodes
import Builtin
import PyrexTypes
from Cython import Utils
from PyrexTypes import py_object_type, unspecified_type
from Visitor import CythonTransform, EnvTransform
class TypedExprNode(ExprNodes.ExprNode):
# Used for declaring assignments of a specified type without a known entry.
def __init__(self, type):
self.type = type
object_expr = TypedExprNode(py_object_type)
class MarkParallelAssignments(EnvTransform):
# Collects assignments inside parallel blocks prange, with parallel.
# Perhaps it's better to move it to ControlFlowAnalysis.
# tells us whether we're in a normal loop
in_loop = False
parallel_errors = False
def __init__(self, context):
# Track the parallel block scopes (with parallel, for i in prange())
self.parallel_block_stack = []
return super(MarkParallelAssignments, self).__init__(context)
def mark_assignment(self, lhs, rhs, inplace_op=None):
if isinstance(lhs, (ExprNodes.NameNode, Nodes.PyArgDeclNode)):
if lhs.entry is None:
# TODO: This shouldn't happen...
return
if self.parallel_block_stack:
parallel_node = self.parallel_block_stack[-1]
previous_assignment = parallel_node.assignments.get(lhs.entry)
# If there was a previous assignment to the variable, keep the
# previous assignment position
if previous_assignment:
pos, previous_inplace_op = previous_assignment
if (inplace_op and previous_inplace_op and
inplace_op != previous_inplace_op):
# x += y; x *= y
t = (inplace_op, previous_inplace_op)
error(lhs.pos,
"Reduction operator '%s' is inconsistent "
"with previous reduction operator '%s'" % t)
else:
pos = lhs.pos
parallel_node.assignments[lhs.entry] = (pos, inplace_op)
parallel_node.assigned_nodes.append(lhs)
elif isinstance(lhs, ExprNodes.SequenceNode):
for arg in lhs.args:
self.mark_assignment(arg, object_expr)
else:
# Could use this info to infer cdef class attributes...
pass
def visit_WithTargetAssignmentStatNode(self, node):
self.mark_assignment(node.lhs, node.rhs)
self.visitchildren(node)
return node
def visit_SingleAssignmentNode(self, node):
self.mark_assignment(node.lhs, node.rhs)
self.visitchildren(node)
return node
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.mark_assignment(lhs, node.rhs)
self.visitchildren(node)
return node
def visit_InPlaceAssignmentNode(self, node):
self.mark_assignment(node.lhs, node.create_binop_node(), node.operator)
self.visitchildren(node)
return node
def visit_ForInStatNode(self, node):
# TODO: Remove redundancy with range optimization...
is_special = False
sequence = node.iterator.sequence
target = node.target
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.current_env().lookup(function.name)
if not entry or entry.is_builtin:
if function.name == 'reversed' and len(sequence.args) == 1:
sequence = sequence.args[0]
elif function.name == 'enumerate' and len(sequence.args) == 1:
if target.is_sequence_constructor and len(target.args) == 2:
iterator = sequence.args[0]
if iterator.is_name:
iterator_type = iterator.infer_type(self.current_env())
if iterator_type.is_builtin_type:
# assume that builtin types have a length within Py_ssize_t
self.mark_assignment(
target.args[0],
ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
target = target.args[1]
sequence = sequence.args[0]
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.current_env().lookup(function.name)
if not entry or entry.is_builtin:
if function.name in ('range', 'xrange'):
is_special = True
for arg in sequence.args[:2]:
self.mark_assignment(target, arg)
if len(sequence.args) > 2:
self.mark_assignment(
target,
ExprNodes.binop_node(node.pos,
'+',
sequence.args[0],
sequence.args[2]))
if not is_special:
# A for-loop basically translates to subsequent calls to
# __getitem__(), so using an IndexNode here allows us to
# naturally infer the base type of pointers, C arrays,
# Python strings, etc., while correctly falling back to an
# object type when the base type cannot be handled.
self.mark_assignment(target, ExprNodes.IndexNode(
node.pos,
base = sequence,
index = ExprNodes.IntNode(node.pos, value = '0')))
self.visitchildren(node)
return node
def visit_ForFromStatNode(self, node):
self.mark_assignment(node.target, node.bound1)
if node.step is not None:
self.mark_assignment(node.target,
ExprNodes.binop_node(node.pos,
'+',
node.bound1,
node.step))
self.visitchildren(node)
return node
def visit_WhileStatNode(self, node):
self.visitchildren(node)
return node
def visit_ExceptClauseNode(self, node):
if node.target is not None:
self.mark_assignment(node.target, object_expr)
self.visitchildren(node)
return node
def visit_FromCImportStatNode(self, node):
pass # Can't be assigned to...
def visit_FromImportStatNode(self, node):
for name, target in node.items:
if name != "*":
self.mark_assignment(target, object_expr)
self.visitchildren(node)
return node
def visit_DefNode(self, node):
# use fake expressions with the right result type
if node.star_arg:
self.mark_assignment(
node.star_arg, TypedExprNode(Builtin.tuple_type))
if node.starstar_arg:
self.mark_assignment(
node.starstar_arg, TypedExprNode(Builtin.dict_type))
EnvTransform.visit_FuncDefNode(self, node)
return node
def visit_DelStatNode(self, node):
for arg in node.args:
self.mark_assignment(arg, arg)
self.visitchildren(node)
return node
def visit_ParallelStatNode(self, node):
if self.parallel_block_stack:
node.parent = self.parallel_block_stack[-1]
else:
node.parent = None
nested = False
if node.is_prange:
if not node.parent:
node.is_parallel = True
else:
node.is_parallel = (node.parent.is_prange or not
node.parent.is_parallel)
nested = node.parent.is_prange
else:
node.is_parallel = True
# Note: nested with parallel() blocks are handled by
# ParallelRangeTransform!
# nested = node.parent
nested = node.parent and node.parent.is_prange
self.parallel_block_stack.append(node)
nested = nested or len(self.parallel_block_stack) > 2
if not self.parallel_errors and nested and not node.is_prange:
error(node.pos, "Only prange() may be nested")
self.parallel_errors = True
if node.is_prange:
child_attrs = node.child_attrs
node.child_attrs = ['body', 'target', 'args']
self.visitchildren(node)
node.child_attrs = child_attrs
self.parallel_block_stack.pop()
if node.else_clause:
node.else_clause = self.visit(node.else_clause)
else:
self.visitchildren(node)
self.parallel_block_stack.pop()
self.parallel_errors = False
return node
def visit_YieldExprNode(self, node):
if self.parallel_block_stack:
error(node.pos, "Yield not allowed in parallel sections")
return node
def visit_ReturnStatNode(self, node):
node.in_parallel = bool(self.parallel_block_stack)
return node
class MarkOverflowingArithmetic(CythonTransform):
# It may be possible to integrate this with the above for
# performance improvements (though likely not worth it).
might_overflow = False
def __call__(self, root):
self.env_stack = []
self.env = root.scope
return super(MarkOverflowingArithmetic, self).__call__(root)
def visit_safe_node(self, node):
self.might_overflow, saved = False, self.might_overflow
self.visitchildren(node)
self.might_overflow = saved
return node
def visit_neutral_node(self, node):
self.visitchildren(node)
return node
def visit_dangerous_node(self, node):
self.might_overflow, saved = True, self.might_overflow
self.visitchildren(node)
self.might_overflow = saved
return node
def visit_FuncDefNode(self, node):
self.env_stack.append(self.env)
self.env = node.local_scope
self.visit_safe_node(node)
self.env = self.env_stack.pop()
return node
def visit_NameNode(self, node):
if self.might_overflow:
entry = node.entry or self.env.lookup(node.name)
if entry:
entry.might_overflow = True
return node
def visit_BinopNode(self, node):
if node.operator in '&|^':
return self.visit_neutral_node(node)
else:
return self.visit_dangerous_node(node)
visit_UnopNode = visit_neutral_node
visit_UnaryMinusNode = visit_dangerous_node
visit_InPlaceAssignmentNode = visit_dangerous_node
visit_Node = visit_safe_node
def visit_assignment(self, lhs, rhs):
if (isinstance(rhs, ExprNodes.IntNode)
and isinstance(lhs, ExprNodes.NameNode)
and Utils.long_literal(rhs.value)):
entry = lhs.entry or self.env.lookup(lhs.name)
if entry:
entry.might_overflow = True
def visit_SingleAssignmentNode(self, node):
self.visit_assignment(node.lhs, node.rhs)
self.visitchildren(node)
return node
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.visit_assignment(lhs, node.rhs)
self.visitchildren(node)
return node
class PyObjectTypeInferer(object):
"""
If it's not declared, it's a PyObject.
"""
def infer_types(self, scope):
"""
Given a dict of entries, map all unspecified types to a specified type.
"""
for name, entry in scope.entries.items():
if entry.type is unspecified_type:
entry.type = py_object_type
class SimpleAssignmentTypeInferer(object):
"""
Very basic type inference.
"""
# TODO: Implement a real type inference algorithm.
# (Something more powerful than just extending this one...)
def infer_types(self, scope):
enabled = scope.directives['infer_types']
verbose = scope.directives['infer_types.verbose']
if enabled == True:
spanning_type = aggressive_spanning_type
elif enabled is None: # safe mode
spanning_type = safe_spanning_type
else:
for entry in scope.entries.values():
if entry.type is unspecified_type:
entry.type = py_object_type
return
dependancies_by_entry = {} # entry -> dependancies
entries_by_dependancy = {} # dependancy -> entries
ready_to_infer = []
for name, entry in scope.entries.items():
if entry.type is unspecified_type:
if entry.in_closure or entry.from_closure:
# cross-closure type inference is not currently supported
entry.type = py_object_type
continue
all = set()
for assmt in entry.cf_assignments:
all.update(assmt.type_dependencies(scope))
if all:
dependancies_by_entry[entry] = all
for dep in all:
if dep not in entries_by_dependancy:
entries_by_dependancy[dep] = set([entry])
else:
entries_by_dependancy[dep].add(entry)
else:
ready_to_infer.append(entry)
def resolve_dependancy(dep):
if dep in entries_by_dependancy:
for entry in entries_by_dependancy[dep]:
entry_deps = dependancies_by_entry[entry]
entry_deps.remove(dep)
if not entry_deps and entry != dep:
del dependancies_by_entry[entry]
ready_to_infer.append(entry)
# Try to infer things in order...
while True:
while ready_to_infer:
entry = ready_to_infer.pop()
types = [assmt.rhs.infer_type(scope)
for assmt in entry.cf_assignments]
if types and Utils.all(types):
entry.type = spanning_type(types, entry.might_overflow, entry.pos)
else:
# FIXME: raise a warning?
# print "No assignments", entry.pos, entry
entry.type = py_object_type
if verbose:
message(entry.pos, "inferred '%s' to be of type '%s'" % (entry.name, entry.type))
resolve_dependancy(entry)
# Deal with simple circular dependancies...
for entry, deps in dependancies_by_entry.items():
if len(deps) == 1 and deps == set([entry]):
types = [assmt.infer_type(scope)
for assmt in entry.cf_assignments
if assmt.type_dependencies(scope) == ()]
if types:
entry.type = spanning_type(types, entry.might_overflow, entry.pos)
types = [assmt.infer_type(scope)
for assmt in entry.cf_assignments]
entry.type = spanning_type(types, entry.might_overflow, entry.pos) # might be wider...
resolve_dependancy(entry)
del dependancies_by_entry[entry]
if ready_to_infer:
break
if not ready_to_infer:
break
# We can't figure out the rest with this algorithm, let them be objects.
for entry in dependancies_by_entry:
entry.type = py_object_type
if verbose:
message(entry.pos, "inferred '%s' to be of type '%s' (default)" % (entry.name, entry.type))
def find_spanning_type(type1, type2):
if type1 is type2:
result_type = type1
elif type1 is PyrexTypes.c_bint_type or type2 is PyrexTypes.c_bint_type:
# type inference can break the coercion back to a Python bool
# if it returns an arbitrary int type here
return py_object_type
else:
result_type = PyrexTypes.spanning_type(type1, type2)
if result_type in (PyrexTypes.c_double_type, PyrexTypes.c_float_type,
Builtin.float_type):
# Python's float type is just a C double, so it's safe to
# use the C type instead
return PyrexTypes.c_double_type
return result_type
def aggressive_spanning_type(types, might_overflow, pos):
result_type = reduce(find_spanning_type, types)
if result_type.is_reference:
result_type = result_type.ref_base_type
if result_type.is_const:
result_type = result_type.const_base_type
if result_type.is_cpp_class:
result_type.check_nullary_constructor(pos)
return result_type
def safe_spanning_type(types, might_overflow, pos):
result_type = reduce(find_spanning_type, types)
if result_type.is_const:
result_type = result_type.const_base_type
if result_type.is_reference:
result_type = result_type.ref_base_type
if result_type.is_cpp_class:
result_type.check_nullary_constructor(pos)
if result_type.is_pyobject:
# In theory, any specific Python type is always safe to
# infer. However, inferring str can cause some existing code
# to break, since we are also now much more strict about
# coercion from str to char *. See trac #553.
if result_type.name == 'str':
return py_object_type
else:
return result_type
elif result_type is PyrexTypes.c_double_type:
# Python's float type is just a C double, so it's safe to use
# the C type instead
return result_type
elif result_type is PyrexTypes.c_bint_type:
# find_spanning_type() only returns 'bint' for clean boolean
# operations without other int types, so this is safe, too
return result_type
elif result_type.is_ptr and not (result_type.is_int and result_type.rank == 0):
# Any pointer except (signed|unsigned|) char* can't implicitly
# become a PyObject.
return result_type
elif result_type.is_cpp_class:
# These can't implicitly become Python objects either.
return result_type
elif result_type.is_struct:
# Though we have struct -> object for some structs, this is uncommonly
# used, won't arise in pure Python, and there shouldn't be side
# effects, so I'm declaring this safe.
return result_type
# TODO: double complex should be OK as well, but we need
# to make sure everything is supported.
elif result_type.is_int and not might_overflow:
return result_type
return py_object_type
def get_type_inferer():
return SimpleAssignmentTypeInferer()
| larsmans/cython | Cython/Compiler/TypeInference.py | Python | apache-2.0 | 19,496 | [
"VisIt"
] | 1437120e72dc25fb08449767ddcca7488ff3c251066af942feba6f537d049b49 |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import numpy as np
import six
from bigdl.util.common import JTensor
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import callJavaFunc
from bigdl.util.common import get_spark_context
from bigdl.util.common import to_list
from bigdl.util.common import INTMAX, INTMIN, DOUBLEMAX
from bigdl.util.common import get_activation_by_name
from bigdl.optim.optimizer import L1Regularizer, L2Regularizer, L1L2Regularizer
from py4j.java_gateway import JavaObject
from bigdl.transform.vision.image import ImageFrame
if sys.version >= '3':
long = int
unicode = str
class Node(JavaValue):
"""
Represent a node in a graph. The connections between nodes are directed.
"""
def __init__(self, jvalue, bigdl_type, *args):
self.value = jvalue if jvalue else callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), *args)
self.bigdl_type = bigdl_type
@classmethod
def of(cls, jvalue, bigdl_type="float"):
return Node(jvalue, bigdl_type)
def element(self):
return Layer.of(self.value.element())
class Layer(JavaValue):
"""
Layer is the basic component of a neural network
and it's also the base class of layers.
Layer can connect to others to construct a complex neural network.
"""
def __init__(self, jvalue, bigdl_type, *args):
if (jvalue):
assert(type(jvalue) == JavaObject)
self.value = jvalue
else:
self.value = callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), *args)
self.bigdl_type = bigdl_type
def set_running_mean(self, running_mean):
"""
:param running_mean: a ndarray
"""
callBigDlFunc(self.bigdl_type, "setRunningMean",
self.value, JTensor.from_ndarray(running_mean))
return self
def set_running_std(self, running_std):
"""
:param running_mean: a ndarray
"""
callBigDlFunc(self.bigdl_type, "setRunningStd",
self.value, JTensor.from_ndarray(running_std))
return self
def __str__(self):
"""
>>> conv2 = SpatialConvolution(6, 12, 5, 5).set_name("conv2")
creating: createSpatialConvolution
>>> print(conv2)
SpatialConvolution[conv2](6 -> 12, 5 x 5, 1, 1, 0, 0)
"""
return self.value.toString()
def __call__(self, x=None):
"""
Some other modules point to current module
:param x: upstream module nodes. x is either a Node or list of Node.
:return: node containing current module
"""
x = x if x else []
return Node.of(callBigDlFunc(self.bigdl_type,
"createNode",
self,
to_list(x)))
@classmethod
def of(cls, jvalue, bigdl_type="float"):
"""
Create a Python Layer base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Layer
"""
model = Layer(jvalue, bigdl_type)
return model
def set_name(self, name):
"""
Give this model a name. There would be a generated name
consist of class name and UUID if user doesn't set it.
"""
callJavaFunc(get_spark_context(), self.value.setName, name)
return self
def name(self):
"""
Name of this layer
"""
return callJavaFunc(get_spark_context(), self.value.getName)
def set_seed(self, seed=123):
"""
You can control the random seed which used to init weights for this model.
:param seed: random seed
:return: Model itself.
"""
callBigDlFunc(self.bigdl_type, "setModelSeed", seed)
return self
def get_dtype(self):
if "float" == self.bigdl_type:
return "float32"
else:
return "float64"
@staticmethod
def check_input(input):
"""
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: (list of JTensor, isTable)
"""
def to_jtensor(i):
if isinstance(i, np.ndarray):
return JTensor.from_ndarray(i)
elif isinstance(i, JTensor):
return i
else:
raise Exception("Error unknown input type %s" % type(i))
if type(input) is list:
if len(input) == 0:
raise Exception('Error when checking: empty input')
return list(map(lambda i: to_jtensor(i), input)), True
else:
return [to_jtensor(input)], False
@staticmethod
def convert_output(output):
if type(output) is JTensor:
return output.to_ndarray()
elif(len(output) == 1):
return output[0].to_ndarray()
else:
return [x.to_ndarray() for x in output]
def forward(self, input):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Takes an input object, and computes the corresponding output of the module
:param input: ndarray or list of ndarray
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: ndarray or list of ndarray
"""
jinput, input_is_table = self.check_input(input)
output = callBigDlFunc(self.bigdl_type,
"modelForward",
self.value,
jinput,
input_is_table)
return self.convert_output(output)
def backward(self, input, grad_output):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Performs a back-propagation step through the module, with respect to the given input. In
general this method makes the assumption forward(input) has been called before, with the same
input. This is necessary for optimization reasons. If you do not respect this rule, backward()
will compute incorrect gradients.
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:param grad_output: ndarray or list of ndarray or JTensor or list of JTensor.
:return: ndarray or list of ndarray
"""
jinput, input_is_table = self.check_input(input)
jgrad_output, grad_output_is_table = self.check_input(grad_output)
output = callBigDlFunc(self.bigdl_type,
"modelBackward",
self.value,
jinput,
input_is_table,
jgrad_output,
grad_output_is_table)
return self.convert_output(output)
def zero_grad_parameters(self):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
If the module has parameters, this will zero the accumulation of the gradients with respect
to these parameters. Otherwise, it does nothing.
"""
callJavaFunc(get_spark_context(), self.value.zeroGradParameters)
def update_parameters(self, learning_rate):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
"""
callBigDlFunc(self.bigdl_type,
"updateParameters",
self.value,
learning_rate)
def reset(self):
"""
Initialize the model weights.
"""
callJavaFunc(get_spark_context(), self.value.reset)
return self
def parameters(self):
"""
Get the model parameters which containing: weight, bias, gradBias, gradWeight
:return: dict(layername -> dict(parametername -> ndarray))
"""
name_to_params = callBigDlFunc(self.bigdl_type,
"modelGetParameters",
self.value)
def to_ndarray(params):
return dict((param_name,
np.array(values[0], dtype=self.get_dtype()).reshape(
values[1])) for param_name, values in
params.items())
return dict((layer_name, to_ndarray(params)) for layer_name, params in
name_to_params.items())
def evaluate(self, *args):
"""
No argument passed in:
Evaluate the model to set train = false, useful when doing test/forward
:return: layer itself
Three arguments passed in:
A method to benchmark the model quality.
:param val_rdd: the input data
:param batch_size: batch size
:param val_methods: a list of validation methods. i.e: Top1Accuracy,Top5Accuracy and Loss.
:return: a list of the metrics result
"""
if len(args) == 0:
callBigDlFunc(self.bigdl_type,
"evaluate", self.value)
return self
elif len(args) == 3:
val_rdd, batch_size, val_methods = args
return callBigDlFunc(self.bigdl_type,
"modelEvaluate",
self.value,
val_rdd, batch_size, val_methods)
else:
raise Exception("Error when calling evaluate(): it takes no argument or exactly three arguments only")
def _to_jtensors(self, x):
x = to_list(x)
if isinstance(x[0], np.ndarray):
return [JTensor.from_ndarray(i) for i in x]
elif isinstance(x[0], JTensor):
return x
else:
raise Exception("Not supported type: %s" % type(x[0]))
def predict_local(self, X):
"""
:param X: X can be a ndarray or list of ndarray if the model has multiple inputs.
The first dimension of X should be batch.
:return: a ndarray as the prediction result.
"""
jresults = callBigDlFunc(self.bigdl_type,
"predictLocal",
self.value,
self._to_jtensors(X))
return np.stack([j.to_ndarray()for j in jresults])
def predict_local_class(self, X):
"""
:param X: X can be a ndarray or list of ndarray if the model has multiple inputs.
The first dimension of X should be batch.
:return: a ndarray as the prediction result.
"""
result = callBigDlFunc(self.bigdl_type,
"predictLocalClass",
self.value,
self._to_jtensors(X))
return np.stack(result)
def predict(self, data_rdd):
"""
Model inference base on the given data.
You need to invoke collect() to trigger those action \
as the returning result is an RDD.
:param data_rdd: the data to be predict.
:return: An RDD represent the predict result.
"""
result = callBigDlFunc(self.bigdl_type,
"modelPredictRDD", self.value, data_rdd)
return result.map(lambda data: data.to_ndarray())
def predict_class(self, data_rdd):
"""
module predict, return the predict label
:param data_rdd: the data to be predict.
:return: An RDD represent the predict label.
"""
result = callBigDlFunc(self.bigdl_type,
"modelPredictClass", self.value, data_rdd)
return result
def predict_image(self, image_frame, output_layer=None, share_buffer=False,
batch_per_partition=4, predict_key="predict"):
"""
model predict images, return imageFrame with predicted tensor
:param image_frame imageFrame that contains images
:param output_layer if output_layer is not null, the output of layer that matches
output_layer will be used as predicted output
:param share_buffer whether to share same memory for each batch predict results
:param batch_per_partition batch size per partition, default is 4
:param predict_key key to store predicted results
"""
image_frame = callBigDlFunc(self.bigdl_type, "modelPredictImage", self.value,
image_frame,
output_layer,
share_buffer,
batch_per_partition,
predict_key)
return ImageFrame(image_frame)
def set_weights(self, weights):
"""
Set weights for this layer
:param weights: a list of numpy arrays which represent weight and bias
:return:
>>> linear = Linear(3,2)
creating: createLinear
>>> linear.set_weights([np.array([[1,2,3],[4,5,6]]), np.array([7,8])])
>>> weights = linear.get_weights()
>>> weights[0].shape == (2,3)
True
>>> weights[0][0]
array([ 1., 2., 3.], dtype=float32)
>>> weights[1]
array([ 7., 8.], dtype=float32)
>>> relu = ReLU()
creating: createReLU
>>> from py4j.protocol import Py4JJavaError
>>> try:
... relu.set_weights([np.array([[1,2,3],[4,5,6]]), np.array([7,8])])
... except Py4JJavaError as err:
... print(err.java_exception)
...
java.lang.IllegalArgumentException: requirement failed: this layer does not have weight/bias
>>> relu.get_weights()
The layer does not have weight/bias
>>> add = Add(2)
creating: createAdd
>>> try:
... add.set_weights([np.array([7,8]), np.array([1,2])])
... except Py4JJavaError as err:
... print(err.java_exception)
...
java.lang.IllegalArgumentException: requirement failed: the number of input weight/bias is not consistant with number of weight/bias of this layer, number of input 1, number of output 2
>>> cAdd = CAdd([4, 1])
creating: createCAdd
>>> cAdd.set_weights(np.ones([4, 1]))
>>> (cAdd.get_weights()[0] == np.ones([4, 1])).all()
True
"""
tensors = [JTensor.from_ndarray(param, self.bigdl_type) for param in to_list(weights)]
callBigDlFunc(self.bigdl_type, "setWeights", self.value, tensors)
def get_weights(self):
"""
Get weights for this layer
:return: list of numpy arrays which represent weight and bias
"""
tensorWeights = callBigDlFunc(self.bigdl_type,
"getWeights", self.value)
if tensorWeights is not None:
return [tensor.to_ndarray() for tensor in tensorWeights]
else:
print("The layer does not have weight/bias")
return None
def is_with_weights(self):
return callBigDlFunc(self.bigdl_type,
"isWithWeights", self.value)
def save(self, path, over_write = False):
callBigDlFunc(self.bigdl_type, "modelSave", self.value, path,
over_write)
def saveModel(self, modelPath, weightPath = None, over_write = False):
callBigDlFunc(self.bigdl_type, "saveBigDLModule", self.value, modelPath,
weightPath, over_write)
def save_caffe(self, prototxt_path, model_path, use_v2 = True, overwrite = False):
callBigDlFunc(self.bigdl_type, "saveCaffe", self.value, prototxt_path,
model_path, use_v2, overwrite)
def save_tensorflow(self, inputs, path, byte_order="little_endian", data_format="nhwc"):
"""
Save a model to protobuf files so that it can be used in tensorflow inference.
When saving the model, placeholders will be added to the tf model as input nodes. So
you need to pass in the names and shapes of the placeholders. BigDL model doesn't have
such information. The order of the placeholder information should be same as the inputs
of the graph model.
:param inputs: placeholder information, should be an array of tuples (input_name, shape)
where 'input_name' is a string and shape is an array of integer
:param path: the path to be saved to
:param byte_order: model byte order
:param data_format: model data format, should be "nhwc" or "nchw"
"""
callBigDlFunc(self.bigdl_type, "saveTF", self.value, inputs, path, byte_order, data_format)
def setWRegularizer(self, wRegularizer):
'''
set weight regularizer
:param wRegularizer: weight regularizer
:return:
'''
self.value.wRegularizer = wRegularizer.value
def setBRegularizer(self, bRegularizer):
'''
set bias regularizer
:param wRegularizer: bias regularizer
:return:
'''
self.value.bRegularizer = bRegularizer.value
def freeze(self, names=None):
"""
freeze module, if names is not None, set an array of layers that match given names
to be freezed
:param names: an array of layer names
:return:
"""
callBigDlFunc(self.bigdl_type, "freeze", self.value, names)
return self
def unfreeze(self, names=None):
"""
unfreeze module, if names is not None, unfreeze layers that match given names
:param names: an array of layer names
:return:
"""
callBigDlFunc(self.bigdl_type, "unFreeze", self.value, names)
return self
def training(self, is_training=True):
'''
Set this layer in the training mode or in predition mode if is_training=False
'''
if is_training:
callJavaFunc(get_spark_context(), self.value.training)
else:
callJavaFunc(get_spark_context(), self.value.evaluate)
return self
def is_training(self):
'''
:return: Whether this layer is in the training mode
>>> layer = Dropout()
creating: createDropout
>>> layer = layer.evaluate()
>>> layer.is_training()
False
>>> layer = layer.training()
>>> layer.is_training()
True
'''
return callJavaFunc(get_spark_context(), self.value.isTraining)
def quantize(self):
'''
Clone self and quantize it, at last return a new quantized model.
:return: A new quantized model.
>>> fc = Linear(4, 2)
creating: createLinear
>>> fc.set_weights([np.ones((2, 4)), np.ones((2,))])
>>> input = np.ones((2, 4))
>>> fc.forward(input)
array([[ 5., 5.],
[ 5., 5.]], dtype=float32)
>>> quantized_fc = fc.quantize()
>>> quantized_fc.forward(input)
array([[ 5., 5.],
[ 5., 5.]], dtype=float32)
>>> assert("quantized.Linear" in quantized_fc.__str__())
>>> conv = SpatialConvolution(1, 2, 3, 3)
creating: createSpatialConvolution
>>> conv.set_weights([np.ones((2, 1, 3, 3)), np.zeros((2,))])
>>> input = np.ones((2, 1, 4, 4))
>>> conv.forward(input)
array([[[[ 9., 9.],
[ 9., 9.]],
<BLANKLINE>
[[ 9., 9.],
[ 9., 9.]]],
<BLANKLINE>
<BLANKLINE>
[[[ 9., 9.],
[ 9., 9.]],
<BLANKLINE>
[[ 9., 9.],
[ 9., 9.]]]], dtype=float32)
>>> quantized_conv = conv.quantize()
>>> quantized_conv.forward(input)
array([[[[ 9., 9.],
[ 9., 9.]],
<BLANKLINE>
[[ 9., 9.],
[ 9., 9.]]],
<BLANKLINE>
<BLANKLINE>
[[[ 9., 9.],
[ 9., 9.]],
<BLANKLINE>
[[ 9., 9.],
[ 9., 9.]]]], dtype=float32)
>>> assert("quantized.SpatialConvolution" in quantized_conv.__str__())
>>> seq = Sequential()
creating: createSequential
>>> seq = seq.add(conv)
>>> seq = seq.add(Reshape([8, 4], False))
creating: createReshape
>>> seq = seq.add(fc)
>>> input = np.ones([1, 1, 6, 6])
>>> seq.forward(input)
array([[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.]], dtype=float32)
>>> quantized_seq = seq.quantize()
>>> quantized_seq.forward(input)
array([[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.]], dtype=float32)
>>> assert("quantized.Linear" in quantized_seq.__str__())
>>> assert("quantized.SpatialConvolution" in quantized_seq.__str__())
'''
quantized_model = callBigDlFunc(self.bigdl_type, "quantize", self.value)
return Layer.of(quantized_model)
class Container(Layer):
'''
[[Container]] is a sub-class of Model that declares methods defined in all containers.
A container usually contain some other modules which can be added through the "add" method
'''
def __init__(self, jvalue, bigdl_type, *args):
super(Container, self).__init__(jvalue, bigdl_type, *args)
def add(self, model):
self.value.add(model.value)
return self
@property
def layers(self):
jlayers = callBigDlFunc(self.bigdl_type, "getContainerModules" , self)
layers = [Layer.of(jlayer) for jlayer in jlayers]
return layers
def flattened_layers(self, include_container=False):
jlayers = callBigDlFunc(self.bigdl_type, "getFlattenModules", self, include_container)
layers = [Layer.of(jlayer) for jlayer in jlayers]
return layers
class Model(Container):
"""
A graph container. Each node can have multiple inputs. The output of the node should be a
tensor. The output tensor can be connected to multiple nodes. So the module in each node can
have a tensor or table input, and should have a tensor output.
The graph container can have multiple inputs and multiple outputs. If there's one input,
the input data fed to the graph module should be a tensor. If there're multiple inputs,
the input data fed to the graph module should be a table, which is actually an sequence of
tensor. The order of the input tensors should be same with the order of the input nodes.
This is also applied to the gradient from the module in the back propagation.
If there's one output, the module output is a tensor. If there're multiple outputs, the module
output is a table, which is actually an sequence of tensor. The order of the output tensors is
same with the order of the output modules. This is also applied to the gradient passed to the
module in the back propagation.
All inputs should be able to connect to outputs through some paths in the graph.
It is allowed that some successors of the inputs node are not connect to outputs.
If so, these nodes will be excluded in the computation.
We also support initializing a Graph directly from a tensorflow module. In this case, you should
pass your tensorflow nodes as inputs and outputs and also specify the byte_order parameter ("little_endian"
or "big_endian") and node_type parameter ("bigdl" or "tensorflow")
node_type parameter.
"""
def __init__(self,
inputs,
outputs,
jvalue=None,
bigdl_type="float", byte_order="little_endian", model_type="bigdl"):
if jvalue:
self.value = jvalue
self.bigdl_type = bigdl_type
elif model_type == "bigdl":
super(Model, self).__init__(None, bigdl_type,
to_list(inputs),
to_list(outputs))
else:
from bigdl.util.tf_utils import convert
model = convert(to_list(inputs), to_list(outputs), byte_order, bigdl_type)
super(Model, self).__init__(model.value, bigdl_type)
@staticmethod
def from_jvalue(jvalue, bigdl_type="float"):
"""
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
"""
model = Model([], [], jvalue=jvalue)
model.value = jvalue
return model
def __str__(self):
return "->".join(self.layers())
@staticmethod
def load(path, bigdl_type="float"):
"""
Load a pre-trained Bigdl model.
:param path: The path containing the pre-trained model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadBigDL", path)
return Layer.of(jmodel)
@staticmethod
def loadModel(modelPath, weightPath =None, bigdl_type="float"):
"""
Load a pre-trained Bigdl model.
:param path: The path containing the pre-trained model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadBigDLModule", modelPath, weightPath)
return Layer.of(jmodel)
@staticmethod
def load_torch(path, bigdl_type="float"):
"""
Load a pre-trained Torch model.
:param path: The path containing the pre-trained model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadTorch", path)
return Layer.of(jmodel)
@staticmethod
def load_keras(def_path, weights_path=None, by_name=False):
"""
Load a pre-trained Keras model.
:param def_path: The json path containing the keras model definition.
:param weights_path: The HDF5 path containing the pre-trained keras model weights.
:return: A pre-trained model.
"""
from bigdl.keras.converter import DefinitionLoader, WeightLoader
if weights_path:
return WeightLoader.load_weights_from_json_hdf5(def_path, weights_path, by_name=by_name)
else:
return DefinitionLoader.from_json_path(def_path)
return bmodel
@staticmethod
def load_caffe(model, defPath, modelPath, match_all=True, bigdl_type="float"):
"""
Load a pre-trained Caffe model.
:param model: A bigdl model definition \which equivalent to the pre-trained caffe model.
:param defPath: The path containing the caffe model definition.
:param modelPath: The path containing the pre-trained caffe model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadCaffe", model, defPath, modelPath, match_all)
return Layer.of(jmodel)
@staticmethod
def load_caffe_model(defPath, modelPath, bigdl_type="float"):
"""
Load a pre-trained Caffe model.
:param defPath: The path containing the caffe model definition.
:param modelPath: The path containing the pre-trained caffe model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadCaffeModel", defPath, modelPath)
return Layer.of(jmodel)
@staticmethod
def load_tensorflow(path, inputs, outputs, byte_order = "little_endian",
bin_file = None, bigdl_type="float"):
"""
Load a pre-trained Tensorflow model.
:param path: The path containing the pre-trained model.
:param inputs: The input node of this graph
:param outputs: The output node of this graph
:param byte_order: byte_order of the file, `little_endian` or `big_endian`
:param bin_file: the optional bin file produced by bigdl dump_model util function to store the weights
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadTF", path, inputs, outputs, byte_order, bin_file)
return Model.of(jmodel)
@staticmethod
def train(output, data, label, opt_method, criterion, batch_size, end_when, session=None, bigdl_type="float"):
from bigdl.util.tf_utils import get_path
from bigdl.util.common import Sample
output_name = output.name.split(":")[0]
path = get_path(output_name, session)
sc = get_spark_context()
rdd_train_images = sc.parallelize(data)
rdd_train_labels = sc.parallelize(label)
rdd_train_sample = rdd_train_images.zip(rdd_train_labels).map(lambda input:
Sample.from_ndarray(input[0], input[1]))
jmodel = callBigDlFunc(bigdl_type, "trainTF", path, output_name, rdd_train_sample, opt_method, criterion, batch_size, end_when)
return Model.of(jmodel)
def stop_gradient(self, stop_layers, bigdl_type="float"):
"""
stop the input gradient of layers that match the given ```names```
their input gradient are not computed.
And they will not contributed to the input gradient computation of
layers that depend on them.
:param stop_layers: an array of layer names
:param bigdl_type:
:return:
"""
callBigDlFunc(bigdl_type, "setStopGradient", self.value, stop_layers)
return self
def node(self, name, bigdl_type="float"):
"""
Return the corresponding node has the given name. If the given name doesn't match any node,
an exception will be thrown
:param name: node name
:param bigdl_type:
:return:
"""
jnode = callBigDlFunc(bigdl_type, "findGraphNode", self.value, name)
return Node.of(jnode)
def save_graph_topology(self, log_path, bigdl_type="float"):
"""
save current model graph to a folder, which can be display in tensorboard by running
tensorboard --logdir logPath
:param log_path: path to save the model graph
:param bigdl_type:
:return:
"""
callBigDlFunc(bigdl_type, "saveGraphTopology", self.value, log_path)
return self
class Linear(Layer):
'''
The [[Linear]] module applies a linear transformation to the input data,
i.e. `y = Wx + b`. The input given in `forward(input)` must be either
a vector (1D tensor) or matrix (2D tensor). If the input is a vector, it must
have the size of `inputSize`. If it is a matrix, then each row is assumed to be
an input sample of given batch (the number of rows means the batch size and
the number of columns should be equal to the `inputSize`).
:param input_size the size the each input sample
:param output_size the size of the module output of each sample
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
:param init_weight: the optional initial value for the weight
:param init_bias: the optional initial value for the bias
:param init_grad_weight: the optional initial value for the grad_weight
:param init_grad_bias: the optional initial value for the grad_bias
>>> linear = Linear(100, 10, True, L1Regularizer(0.5), L1Regularizer(0.5))
creating: createL1Regularizer
creating: createL1Regularizer
creating: createLinear
>>> import numpy as np
>>> init_weight = np.random.randn(10, 100)
>>> init_bias = np.random.randn(10)
>>> init_grad_weight = np.zeros([10, 100])
>>> init_grad_bias = np.zeros([10])
>>> linear = Linear(100, 10, True, L1Regularizer(0.5), L1Regularizer(0.5), init_weight, init_bias, init_grad_weight, init_grad_bias)
creating: createL1Regularizer
creating: createL1Regularizer
creating: createLinear
'''
def __init__(self, input_size, output_size, with_bias=True, wRegularizer=None, bRegularizer=None,
init_weight=None, init_bias=None, init_grad_weight=None, init_grad_bias=None, bigdl_type="float"):
super(Linear, self).__init__(None, bigdl_type, input_size, output_size,
with_bias, wRegularizer, bRegularizer,
JTensor.from_ndarray(init_weight),
JTensor.from_ndarray(init_bias),
JTensor.from_ndarray(init_grad_weight),
JTensor.from_ndarray(init_grad_bias))
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class SparseLinear(Layer):
'''
SparseLinear is the sparse version of module Linear. SparseLinear has two different from Linear:
firstly, SparseLinear's input Tensor is a SparseTensor. Secondly, SparseLinear doesn't backward
gradient to next layer in the backpropagation by default, as the gradInput of SparseLinear is
useless and very big in most cases.
But, considering model like Wide&Deep, we provide backwardStart and backwardLength to backward
part of the gradient to next layer.
:param input_size the size the each input sample
:param output_size the size of the module output of each sample
:param backwardStart backwardStart index, counting from 1
:param backwardLength backward length
:param withBias if has bias
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
:param init_weight: the optional initial value for the weight
:param init_bias: the optional initial value for the bias
:param init_grad_weight: the optional initial value for the grad_weight
:param init_grad_bias: the optional initial value for the grad_bias
>>> sparselinear = SparseLinear(100, 10, True, wRegularizer=L1Regularizer(0.5), bRegularizer=L1Regularizer(0.5))
creating: createL1Regularizer
creating: createL1Regularizer
creating: createSparseLinear
>>> import numpy as np
>>> init_weight = np.random.randn(10, 100)
>>> init_bias = np.random.randn(10)
>>> init_grad_weight = np.zeros([10, 100])
>>> init_grad_bias = np.zeros([10])
>>> sparselinear = SparseLinear(100, 10, True, 1, 5, L1Regularizer(0.5), L1Regularizer(0.5), init_weight, init_bias, init_grad_weight, init_grad_bias)
creating: createL1Regularizer
creating: createL1Regularizer
creating: createSparseLinear
>>> np.random.seed(123)
>>> init_weight = np.random.randn(5, 1000)
>>> init_bias = np.random.randn(5)
>>> sparselinear = SparseLinear(1000, 5, init_weight=init_weight, init_bias=init_bias)
creating: createSparseLinear
>>> input = JTensor.sparse(np.array([1, 3, 5, 2, 4, 6]), np.array([0, 0, 0, 1, 1, 1, 1, 5, 300, 2, 100, 500]), np.array([2, 1000]))
>>> print(sparselinear.forward(input))
[[ 10.09569263 -10.94844246 -4.1086688 1.02527523 11.80737209]
[ 7.9651413 9.7131443 -10.22719955 0.02345783 -3.74368906]]
'''
def __init__(self, input_size, output_size, with_bias=True, backwardStart=-1, backwardLength=-1,
wRegularizer=None, bRegularizer=None, init_weight=None, init_bias=None,
init_grad_weight=None, init_grad_bias=None, bigdl_type="float"):
super(SparseLinear, self).__init__(None, bigdl_type, input_size, output_size,
with_bias, backwardStart, backwardLength,
wRegularizer, bRegularizer,
JTensor.from_ndarray(init_weight),
JTensor.from_ndarray(init_bias),
JTensor.from_ndarray(init_grad_weight),
JTensor.from_ndarray(init_grad_bias))
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class DenseToSparse(Layer):
'''
Convert DenseTensor to SparseTensor.
>>> DenseToSparse = DenseToSparse()
creating: createDenseToSparse
'''
def __init__(self,
bigdl_type="float"):
super(DenseToSparse, self).__init__(None, bigdl_type)
class ReLU(Layer):
'''
Applies the rectified linear unit (ReLU) function element-wise to the input Tensor,
thus outputting a Tensor of the same dimension.
ReLU is defined as: f(x) = max(0, x)
Can optionally do its operation in-place without using extra state memory
>>> relu = ReLU()
creating: createReLU
'''
def __init__(self, ip=False, bigdl_type="float"):
super(ReLU, self).__init__(None, bigdl_type, ip)
class Tanh(Layer):
'''
Applies the Tanh function element-wise to the input Tensor, thus outputting a Tensor of the same
dimension. Tanh is defined as f(x) = (exp(x)-exp(-x))/(exp(x)+exp(-x)).
>>> tanh = Tanh()
creating: createTanh
'''
def __init__(self, bigdl_type="float"):
super(Tanh, self).__init__(None, bigdl_type)
class Sigmoid(Layer):
'''
Applies the Sigmoid function element-wise to the input Tensor,
thus outputting a Tensor of the same dimension.
>>> sigmoid = Sigmoid()
creating: createSigmoid
'''
def __init__(self,
bigdl_type="float"):
super(Sigmoid, self).__init__(None, bigdl_type)
class Echo(Layer):
'''
This module is for debug purpose, which can print activation and gradient in your model
topology
>>> echo = Echo()
creating: createEcho
'''
def __init__(self, bigdl_type="float"):
super(Echo, self).__init__(None, bigdl_type)
class LogSoftMax(Layer):
'''
Applies the LogSoftMax function to an n-dimensional input Tensor.
LogSoftmax is defined as: f_i(x) = log(1 / a exp(x_i))
where a = sum_j[exp(x_j)].
>>> logSoftMax = LogSoftMax()
creating: createLogSoftMax
'''
def __init__(self, bigdl_type="float"):
super(LogSoftMax, self).__init__(None, bigdl_type)
class Sequential(Container):
'''
Sequential provides a means to plug layers together
in a feed-forward fully connected manner.
>>> echo = Echo()
creating: createEcho
>>> s = Sequential()
creating: createSequential
>>> s = s.add(echo)
>>> s = s.add(s)
>>> s = s.add(echo)
'''
def __init__(self, bigdl_type="float"):
super(Sequential, self).__init__(None, bigdl_type)
class TemporalConvolution(Layer):
'''
Applies a 1D convolution over an input sequence composed of nInputFrame frames..
The input tensor in `forward(input)` is expected to be a 2D tensor
(`nInputFrame` x `inputFrameSize`) or a 3D tensor
(`nBatchFrame` x `nInputFrame` x `inputFrameSize`).
:param input_frame_size The input frame size expected in sequences given into `forward()`
:param output_frame_size The output frame size the convolution layer will produce.
:param kernel_w The kernel width of the convolution
:param stride_w The step of the convolution in the width dimension.
:param propagate_back Whether propagate gradient back, default is true.
:param weight_regularizer instance of [[Regularizer]]
(eg. L1 or L2 regularization), applied to the input weights matrices.
:param bias_regularizer instance of [[Regularizer]]
applied to the bias.
:param init_weight Initial weight
:param init_bias Initial bias
:param init_grad_weight Initial gradient weight
:param init_grad_bias Initial gradient bias
>>> temporalConvolution = TemporalConvolution(6, 12, 5, 5)
creating: createTemporalConvolution
>>> temporalConvolution.setWRegularizer(L1Regularizer(0.5))
creating: createL1Regularizer
>>> temporalConvolution.setBRegularizer(L1Regularizer(0.5))
creating: createL1Regularizer
'''
def __init__(self,
input_frame_size,
output_frame_size,
kernel_w,
stride_w=1,
propagate_back=True,
weight_regularizer=None,
bias_regularizer=None,
init_weight=None,
init_bias=None,
init_grad_weight=None,
init_grad_bias=None,
bigdl_type="float"):
super(TemporalConvolution, self).__init__(None, bigdl_type,
input_frame_size,
output_frame_size,
kernel_w,
stride_w,
propagate_back,
weight_regularizer,
bias_regularizer,
JTensor.from_ndarray(init_weight),
JTensor.from_ndarray(init_bias),
JTensor.from_ndarray(init_grad_weight),
JTensor.from_ndarray(init_grad_bias))
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class BinaryTreeLSTM(Layer):
'''
This class is an implementation of Binary TreeLSTM (Constituency Tree LSTM).
:param inputSize input units size
:param hiddenSize hidden units size
:param gateOutput whether gate output
:param withGraph whether create lstms with [[Graph]], the default value is true.
>>> treeLSTM = BinaryTreeLSTM(100, 200)
creating: createBinaryTreeLSTM
'''
def __init__(self,
input_size,
hidden_size,
gate_output=True,
with_graph=True,
bigdl_type="float"):
super(BinaryTreeLSTM, self).__init__(None,
bigdl_type,
input_size,
hidden_size,
gate_output,
with_graph)
class SpatialConvolution(Layer):
'''
Applies a 2D convolution over an input image composed of several input planes.
The input tensor in forward(input) is expected to be
a 3D tensor (nInputPlane x height x width).
:param n_input_plane The number of expected input planes in the image given into forward()
:param n_output_plane The number of output planes the convolution layer will produce.
:param kernel_w The kernel width of the convolution
:param kernel_h The kernel height of the convolution
:param stride_w The step of the convolution in the width dimension.
:param stride_h The step of the convolution in the height dimension
:param pad_w The additional zeros added per width to the input planes.
:param pad_h The additional zeros added per height to the input planes.
:param n_group Kernel group number
:param propagate_back Propagate gradient back
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
:param init_weight: the optional initial value for the weight
:param init_bias: the optional initial value for the bias
:param init_grad_weight: the optional initial value for the grad_weight
:param init_grad_bias: the optional initial value for the grad_bias
:param with_bias: the optional initial value for if need bias
:param data_format: a string value of "NHWC" or "NCHW" to specify the input data format of this layer. In "NHWC" format
data is stored in the order of [batch_size, height, width, channels], in "NCHW" format data is stored
in the order of [batch_size, channels, height, width].
>>> spatialConvolution = SpatialConvolution(6, 12, 5, 5)
creating: createSpatialConvolution
>>> spatialConvolution.setWRegularizer(L1Regularizer(0.5))
creating: createL1Regularizer
>>> spatialConvolution.setBRegularizer(L1Regularizer(0.5))
creating: createL1Regularizer
>>> import numpy as np
>>> init_weight = np.random.randn(1, 12, 6, 5, 5)
>>> init_bias = np.random.randn(12)
>>> init_grad_weight = np.zeros([1, 12, 6, 5, 5])
>>> init_grad_bias = np.zeros([12])
>>> spatialConvolution = SpatialConvolution(6, 12, 5, 5, 1, 1, 0, 0, 1, True, L1Regularizer(0.5), L1Regularizer(0.5), init_weight, init_bias, init_grad_weight, init_grad_bias, True, "NCHW")
creating: createL1Regularizer
creating: createL1Regularizer
creating: createSpatialConvolution
'''
def __init__(self,
n_input_plane,
n_output_plane,
kernel_w,
kernel_h,
stride_w=1,
stride_h=1,
pad_w=0,
pad_h=0,
n_group=1,
propagate_back=True,
wRegularizer=None,
bRegularizer=None,
init_weight=None,
init_bias=None,
init_grad_weight=None,
init_grad_bias=None,
with_bias=True,
data_format="NCHW",
bigdl_type="float"):
super(SpatialConvolution, self).__init__(None, bigdl_type,
n_input_plane,
n_output_plane,
kernel_w,
kernel_h,
stride_w,
stride_h,
pad_w,
pad_h,
n_group,
propagate_back,
wRegularizer,
bRegularizer,
JTensor.from_ndarray(init_weight),
JTensor.from_ndarray(init_bias),
JTensor.from_ndarray(init_grad_weight),
JTensor.from_ndarray(init_grad_bias),
with_bias,
data_format)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class TemporalMaxPooling(Layer):
'''
Applies 1D max-pooling operation in kW regions by step size dW steps.
Input sequence composed of nInputFrame frames.
The input tensor in forward(input) is expected to be a 2D tensor (nInputFrame x inputFrameSize)
or a 3D tensor (nBatchFrame x nInputFrame x inputFrameSize).
If the input sequence is a 2D tensor of dimension nInputFrame x inputFrameSize,
the output sequence will be nOutputFrame x inputFrameSize where
nOutputFrame = (nInputFrame - k_w) / d_w + 1
:param k_w: kernel width
:param d_w: step size in width
>>> temporalMaxPooling = TemporalMaxPooling(2, 2)
creating: createTemporalMaxPooling
'''
def __init__(self,
k_w,
d_w,
bigdl_type="float"):
super(TemporalMaxPooling, self).__init__(None, bigdl_type, k_w,
d_w)
class SpatialMaxPooling(Layer):
'''
Applies 2D max-pooling operation in kWxkH regions by step size dWxdH steps.
The number of output features is equal to the number of input planes.
If the input image is a 3D tensor nInputPlane x height x width,
the output image size will be nOutputPlane x oheight x owidth where
owidth = op((width + 2*padW - kW) / dW + 1)
oheight = op((height + 2*padH - kH) / dH + 1)
op is a rounding operator. By default, it is floor.
It can be changed by calling :ceil() or :floor() methods.
When padW and padH are both -1, we use a padding algorithm similar to the "SAME"
padding of tensorflow. That is
outHeight = Math.ceil(inHeight.toFloat/strideH.toFloat)
outWidth = Math.ceil(inWidth.toFloat/strideW.toFloat)
padAlongHeight = Math.max(0, (outHeight - 1) * strideH + kernelH - inHeight)
padAlongWidth = Math.max(0, (outWidth - 1) * strideW + kernelW - inWidth)
padTop = padAlongHeight / 2
padLeft = padAlongWidth / 2
:param kW: kernel width
:param kH: kernel height
:param dW: step size in width
:param dH: step size in height
:param padW: padding in width
:param padH: padding in height
:param format: "NCHW" or "NHWC", indicating the input data format
>>> spatialMaxPooling = SpatialMaxPooling(2, 2, 2, 2)
creating: createSpatialMaxPooling
>>> spatialMaxPooling = SpatialMaxPooling(2, 2, 2, 2, -1, -1, True, "NHWC")
creating: createSpatialMaxPooling
'''
# to_ceil: call floor() when False; call ceil() when True
def __init__(self, kw,
kh,
dw,
dh,
pad_w=0,
pad_h=0,
to_ceil=False,
format="NCHW",
bigdl_type="float"):
super(SpatialMaxPooling, self).__init__(None, bigdl_type, kw,
kh,
dw,
dh,
pad_w,
pad_h,
to_ceil,
format)
class Select(Layer):
'''
A Simple layer selecting an index of the input tensor in the given dimension
:param dimension: the dimension to select
:param index: the index of the dimension to be selected
>>> select = Select(1, 1)
creating: createSelect
'''
def __init__(self, dim, index, bigdl_type="float"):
super(Select, self).__init__(None, bigdl_type, dim, index)
class Recurrent(Container):
'''
Recurrent module is a container of rnn cells
Different types of rnn cells can be added using add() function
>>> recurrent = Recurrent()
creating: createRecurrent
'''
def __init__(self, bigdl_type="float"):
super(Recurrent, self).__init__(None, bigdl_type)
def get_hidden_state(self):
"""
get hidden state and cell at last time step.
:return: list of hidden state and cell
"""
states = callBigDlFunc(self.bigdl_type, "getHiddenState", self.value)
return states
class RecurrentDecoder(Recurrent):
'''
RecurrentDecoder module is a container of rnn cells which used to make
a prediction of the next timestep based on the prediction we made from
the previous timestep. Input for RecurrentDecoder is dynamically composed
during training. input at t(i) is output at t(i-1), input at t(0) is
user input, and user input has to be batch x stepShape(shape of the input
at a single time step).
Different types of rnn cells can be added using add() function.
>>> recurrent_decoder = RecurrentDecoder(output_length = 5)
creating: createRecurrentDecoder
'''
def __init__(self, output_length, bigdl_type="float"):
super(Recurrent, self).__init__(None, bigdl_type, output_length)
class LSTM(Layer):
'''
| Long Short Term Memory architecture.
| Ref.
| A.: http://arxiv.org/pdf/1303.5778v1 (blueprint for this module)
| B. http://web.eecs.utk.edu/~itamar/courses/ECE-692/Bobby_paper1.pdf
| C. http://arxiv.org/pdf/1503.04069v1.pdf
| D. https://github.com/wojzaremba/lstm
| E. https://github.com/Element-Research/rnn/blob/master/FastLSTM.lua
:param inputSize: the size of each input vector
:param hiddenSize: Hidden unit size in the LSTM
:param p: is used for [[Dropout]] probability. For more details aboutRNN dropouts, please refer to[RnnDrop: A Novel Dropout for RNNs in ASR](http://www.stat.berkeley.edu/~tsmoon/files/Conference/asru2015.pdf)[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](https://arxiv.org/pdf/1512.05287.pdf)
:param activation: activation function, by default to be Tanh if not specified.
It can also be the name of an existing activation as a string.
:param inner_activation: activation function for the inner cells, by default to be Sigmoid if not specified.
It can also be the name of an existing activation as a string.
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> lstm = LSTM(4, 3, 0.5, 'tanh', Sigmoid(), L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5))
creating: createSigmoid
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createTanh
creating: createLSTM
'''
def __init__(self, input_size, hidden_size, p=0.0, activation=None, inner_activation=None,
wRegularizer=None, uRegularizer=None, bRegularizer=None, bigdl_type="float"):
if not activation:
activation = Tanh()
if not inner_activation:
inner_activation = Sigmoid()
if isinstance(activation, six.string_types):
activation = get_activation_by_name(activation)
if isinstance(inner_activation, six.string_types):
inner_activation = get_activation_by_name(inner_activation)
super(LSTM, self).__init__(None, bigdl_type, input_size, hidden_size, p,
activation, inner_activation, wRegularizer, uRegularizer, bRegularizer)
class LSTMPeephole(Layer):
'''
| Long Short Term Memory architecture with peephole.
| Ref. A.: http://arxiv.org/pdf/1303.5778v1 (blueprint for this module)
| B. http://web.eecs.utk.edu/~itamar/courses/ECE-692/Bobby_paper1.pdf
| C. http://arxiv.org/pdf/1503.04069v1.pdf
| D. https://github.com/wojzaremba/lstm
| E. https://github.com/Element-Research/rnn/blob/master/LSTM.lua
:param input_size: the size of each input vector
:param hidden_size: Hidden unit size in the LSTM
:param p: is used for [[Dropout]] probability. For more details aboutRNN dropouts, please refer to[RnnDrop: A Novel Dropout for RNNs in ASR](http://www.stat.berkeley.edu/~tsmoon/files/Conference/asru2015.pdf)[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](https://arxiv.org/pdf/1512.05287.pdf)
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> lstm = LSTMPeephole(4, 3, 0.5, L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5))
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createLSTMPeephole
'''
def __init__(self, input_size=4, hidden_size=3, p=0.0, wRegularizer=None, uRegularizer=None, bRegularizer=None, bigdl_type="float"):
super(LSTMPeephole, self).__init__(None, bigdl_type, input_size, hidden_size, p, wRegularizer, uRegularizer, bRegularizer)
class GRU(Layer):
'''
Gated Recurrent Units architecture.
The first input in sequence uses zero value for cell and hidden state
| Ref.
| http://www.wildml.com/2015/10/recurrent-neural-network-tutorial-part-4-implementing-a-grulstm-rnn-with-python-and-theano/
| https://github.com/Element-Research/rnn/blob/master/GRU.lua
:param input_size: the size of each input vector
:param hidden_size: Hidden unit size in GRU
:param p: is used for [[Dropout]] probability. For more details aboutRNN dropouts, please refer to[RnnDrop: A Novel Dropout for RNNs in ASR](http://www.stat.berkeley.edu/~tsmoon/files/Conference/asru2015.pdf)[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](https://arxiv.org/pdf/1512.05287.pdf)
:param activation: activation function, by default to be Tanh if not specified.
It can also be the name of an existing activation as a string.
:param inner_activation: activation function for the inner cells, by default to be Sigmoid if not specified.
It can also be the name of an existing activation as a string.
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> gru = GRU(4, 3, 0.5, Tanh(), Sigmoid(), L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5))
creating: createTanh
creating: createSigmoid
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createGRU
'''
def __init__(self, input_size, hidden_size, p=0.0, activation=None, inner_activation=None,
wRegularizer=None, uRegularizer=None, bRegularizer=None, bigdl_type="float"):
if not activation:
activation = Tanh()
if not inner_activation:
inner_activation = Sigmoid()
if isinstance(activation, six.string_types):
activation = get_activation_by_name(activation)
if isinstance(inner_activation, six.string_types):
inner_activation = get_activation_by_name(inner_activation)
super(GRU, self).__init__(None, bigdl_type, input_size, hidden_size, p, activation, inner_activation,
wRegularizer, uRegularizer, bRegularizer)
class RnnCell(Layer):
'''
It is a simple RNN. User can pass an activation function to the RNN.
:param input_size: the size of each input vector
:param hidden_size: Hidden unit size in simple RNN
:param activation: activation function. It can also be the name of an existing activation as a string.
:param isInputWithBias: boolean
:param isHiddenWithBias: boolean
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices.
:param bRegularizer: instance of [[Regularizer]](../regularizers.md),applied to the bias.
>>> rnn = RnnCell(4, 3, Tanh(), True, True, L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5))
creating: createTanh
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createRnnCell
'''
def __init__(self,
input_size,
hidden_size,
activation,
isInputWithBias=True,
isHiddenWithBias=True,
wRegularizer=None,
uRegularizer=None,
bRegularizer=None,
bigdl_type="float"):
if isinstance(activation, six.string_types):
activation = get_activation_by_name(activation)
super(RnnCell, self).__init__(None, bigdl_type, input_size, hidden_size, activation, isInputWithBias, isHiddenWithBias, wRegularizer, uRegularizer, bRegularizer)
class TimeDistributed(Layer):
'''
This layer is intended to apply contained layer to each temporal time slice
of input tensor.
For instance, The TimeDistributed Layer can feed each time slice of input tensor
to the Linear layer.
The input data format is [Batch, Time, Other dims]. For the contained layer, it must not change
the Other dims length.
>>> td = TimeDistributed(Linear(2, 3))
creating: createLinear
creating: createTimeDistributed
'''
def __init__(self, model, bigdl_type="float"):
super(TimeDistributed, self).__init__(None, bigdl_type, model)
class Concat(Container):
'''
Concat concatenates the output of one layer of "parallel"
modules along the provided {@code dimension}: they take the
same inputs, and their output is concatenated.
```
+-----------+
+----> module1 -----+
| | | |
input -----+----> module2 -----+----> output
| | | |
+----> module3 -----+
+-----------+
```
:param dimension: dimension
>>> concat = Concat(2)
creating: createConcat
'''
def __init__(self,
dimension,
bigdl_type="float"):
super(Concat, self).__init__(None, bigdl_type,
dimension)
class SpatialAveragePooling(Layer):
'''
Applies 2D average-pooling operation in kWxkH regions by step size dWxdH steps.
The number of output features is equal to the number of input planes.
When padW and padH are both -1, we use a padding algorithm similar to the "SAME"
padding of tensorflow. That is
outHeight = Math.ceil(inHeight.toFloat/strideH.toFloat)
outWidth = Math.ceil(inWidth.toFloat/strideW.toFloat)
padAlongHeight = Math.max(0, (outHeight - 1) * strideH + kernelH - inHeight)
padAlongWidth = Math.max(0, (outWidth - 1) * strideW + kernelW - inWidth)
padTop = padAlongHeight / 2
padLeft = padAlongWidth / 2
:param kW: kernel width
:param kH: kernel height
:param dW: step width
:param dH: step height
:param padW: padding width
:param padH: padding height
:param global_pooling: If globalPooling then it will pool over the size of the input by doing
kH = input->height and kW = input->width
:param ceilMode: whether the output size is to be ceiled or floored
:param countIncludePad: whether to include padding when dividing thenumber of elements in pooling region
:param divide: whether to do the averaging
:param format: "NCHW" or "NHWC", indicating the input data format
>>> spatialAveragePooling = SpatialAveragePooling(7,7)
creating: createSpatialAveragePooling
>>> spatialAveragePooling = SpatialAveragePooling(2, 2, 2, 2, -1, -1, True, format="NHWC")
creating: createSpatialAveragePooling
'''
def __init__(self,
kw,
kh,
dw=1,
dh=1,
pad_w=0,
pad_h=0,
global_pooling=False,
ceil_mode=False,
count_include_pad=True,
divide=True,
format="NCHW",
bigdl_type="float"):
super(SpatialAveragePooling, self).__init__(None, bigdl_type,
kw,
kh,
dw,
dh,
pad_w,
pad_h,
global_pooling,
ceil_mode,
count_include_pad,
divide,
format)
def set_weights(self, weights):
super(SpatialAveragePooling, self).set_weights(weights)
class SpatialBatchNormalization(Layer):
'''
This file implements Batch Normalization as described in the paper:
"Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift"
by Sergey Ioffe, Christian Szegedy
This implementation is useful for inputs coming from convolution layers.
For non-convolutional layers, see [[BatchNormalization]]
The operation implemented is:
```
( x - mean(x) )
y = -------------------- * gamma + beta
standard-deviation(x)
```
where gamma and beta are learnable parameters.
The learning of gamma and beta is optional.
:param n_output: output feature map number
:param eps: avoid divide zero
:param momentum: momentum for weight update
:param affine: affine operation on output or not
:param data_format a string value (or DataFormat Object in Scala) of "NHWC" or "NCHW" to specify the input data format of this layer. In "NHWC" format
data is stored in the order of [batch_size, height, width, channels], in "NCHW" format data is stored
in the order of [batch_size, channels, height, width].
>>> spatialBatchNormalization = SpatialBatchNormalization(1)
creating: createSpatialBatchNormalization
>>> import numpy as np
>>> init_weight = np.array([1.0])
>>> init_grad_weight = np.array([0.0])
>>> init_bias = np.array([0.0])
>>> init_grad_bias = np.array([0.0])
>>> spatialBatchNormalization = SpatialBatchNormalization(1, 1e-5, 0.1, True, init_weight, init_bias, init_grad_weight, init_grad_bias)
creating: createSpatialBatchNormalization
>>> spatialBatchNormalization = SpatialBatchNormalization(1, 1e-5, 0.1, True, init_weight, init_bias, init_grad_weight, init_grad_bias, "NHWC")
creating: createSpatialBatchNormalization
'''
def __init__(self,
n_output,
eps=1e-5,
momentum=0.1,
affine=True,
init_weight=None,
init_bias=None,
init_grad_weight=None,
init_grad_bias=None,
data_format="NCHW",
bigdl_type="float"):
super(SpatialBatchNormalization, self).__init__(None, bigdl_type,
n_output,
eps,
momentum,
affine,
JTensor.from_ndarray(init_weight),
JTensor.from_ndarray(init_bias),
JTensor.from_ndarray(init_grad_weight),
JTensor.from_ndarray(init_grad_bias),
data_format)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class SpatialCrossMapLRN(Layer):
'''
Applies Spatial Local Response Normalization between different feature maps.
The operation implemented is:
```
x_f
y_f = -------------------------------------------------
(k+(alpha/size)* sum_{l=l1 to l2} (x_l^2^))^beta^
```
where x_f is the input at spatial locations h,w (not shown for simplicity) and feature map f,
l1 corresponds to max(0,f-ceil(size/2)) and l2 to min(F, f-ceil(size/2) + size).
Here, F is the number of feature maps.
:param size: the number of channels to sum over
:param alpha: the scaling parameter
:param beta: the exponent
:param k: a constant
:param data_format a string value (or DataFormat Object in Scala) of "NHWC" or "NCHW" to specify the input data format of this layer. In "NHWC" format
data is stored in the order of [batch_size, height, width, channels], in "NCHW" format data is stored
in the order of [batch_size, channels, height, width]
>>> spatialCrossMapLRN = SpatialCrossMapLRN()
creating: createSpatialCrossMapLRN
>>> spatialCrossMapLRN = SpatialCrossMapLRN(5, 1.0, 0.75, 1.0, "NHWC")
creating: createSpatialCrossMapLRN
'''
def __init__(self,
size=5,
alpha=1.0,
beta=0.75,
k=1.0,
data_format="NCHW",
bigdl_type="float"):
super(SpatialCrossMapLRN, self).__init__(None, bigdl_type,
size,
alpha,
beta,
k, data_format)
class Dropout(Layer):
'''
Dropout masks(set to zero) parts of input using a bernoulli distribution.
Each input element has a probability initP of being dropped. If scale is
set, the outputs are scaled by a factor of 1/(1-initP) during training.
During evaluating, output is the same as input.
:param initP: probability to be dropped
:param inplace: inplace model
:param scale: if scale by a factor of 1/(1-initP)
>>> dropout = Dropout(0.4)
creating: createDropout
'''
def __init__(self,
init_p=0.5,
inplace=False,
scale=True,
bigdl_type="float"):
super(Dropout, self).__init__(None, bigdl_type,
init_p,
inplace,
scale)
class GaussianDropout(Layer):
'''
Apply multiplicative 1-centered Gaussian noise.
The multiplicative noise will have standard deviation `sqrt(rate / (1 - rate)).
As it is a regularization layer, it is only active at training time.
:param rate: drop probability (as with `Dropout`).
>>> GaussianDropout = GaussianDropout(0.5)
creating: createGaussianDropout
'''
def __init__(self,
rate,
bigdl_type="float"):
super(GaussianDropout, self).__init__(None, bigdl_type,
rate)
class GaussianNoise(Layer):
'''
Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you could see it as a form of random data augmentation).
Gaussian Noise (GS) is a natural choice as corruption process for real valued inputs.
As it is a regularization layer, it is only active at training time.
:param stdev: standard deviation of the noise distribution
>>> GaussianNoise = GaussianNoise(0.5)
creating: createGaussianNoise
'''
def __init__(self,
stddev,
bigdl_type="float"):
super(GaussianNoise, self).__init__(None, bigdl_type,
stddev)
class View(Layer):
'''
This module creates a new view of the input tensor using the sizes passed to the constructor.
The method setNumInputDims() allows to specify the expected number of dimensions of the
inputs of the modules. This makes it possible to use minibatch inputs when using a size -1
for one of the dimensions.
:param size: sizes use for creates a new view
>>> view = View([1024,2])
creating: createView
'''
def __init__(self,
sizes,
num_input_dims=0,
bigdl_type="float"):
super(View, self).__init__(None, bigdl_type,
sizes,
num_input_dims)
class Abs(Layer):
'''
an element-wise abs operation
>>> abs = Abs()
creating: createAbs
'''
def __init__(self,
bigdl_type="float"):
super(Abs, self).__init__(None, bigdl_type)
class Add(Layer):
'''
adds a bias term to input data ;
:param input_size: size of input data
>>> add = Add(1)
creating: createAdd
'''
def __init__(self,
input_size,
bigdl_type="float"):
super(Add, self).__init__(None, bigdl_type,
input_size)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class AddConstant(Layer):
'''
adding a constant
:param constant_scalar: constant value
:param inplace: Can optionally do its operation in-place without using extra state memory
>>> addConstant = AddConstant(1e-5, True)
creating: createAddConstant
'''
def __init__(self,
constant_scalar,
inplace=False,
bigdl_type="float"):
super(AddConstant, self).__init__(None, bigdl_type,
constant_scalar,
inplace)
class BatchNormalization(Layer):
'''
This layer implements Batch Normalization as described in the paper:
"Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift"
by Sergey Ioffe, Christian Szegedy https://arxiv.org/abs/1502.03167
This implementation is useful for inputs NOT coming from convolution layers. For convolution
layers, use nn.SpatialBatchNormalization.
The operation implemented is:
```
( x - mean(x) )
y = -------------------- * gamma + beta
standard-deviation(x)
```
where gamma and beta are learnable parameters.The learning of gamma and beta is optional.
:param n_output: output feature map number
:param eps: avoid divide zero
:param momentum: momentum for weight update
:param affine: affine operation on output or not
>>> batchNormalization = BatchNormalization(1, 1e-5, 1e-5, True)
creating: createBatchNormalization
>>> import numpy as np
>>> init_weight = np.random.randn(2)
>>> init_grad_weight = np.zeros([2])
>>> init_bias = np.zeros([2])
>>> init_grad_bias = np.zeros([2])
>>> batchNormalization = BatchNormalization(2, 1e-5, 1e-5, True, init_weight, init_bias, init_grad_weight, init_grad_bias)
creating: createBatchNormalization
'''
def __init__(self,
n_output,
eps=1e-5,
momentum=0.1,
affine=True,
init_weight=None,
init_bias=None,
init_grad_weight=None,
init_grad_bias=None,
bigdl_type="float"):
super(BatchNormalization, self).__init__(None, bigdl_type,
n_output,
eps,
momentum,
affine,
JTensor.from_ndarray(init_weight),
JTensor.from_ndarray(init_bias),
JTensor.from_ndarray(init_grad_weight),
JTensor.from_ndarray(init_grad_bias))
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class BifurcateSplitTable(Layer):
'''
Creates a module that takes a Tensor as input and
outputs two tables, splitting the Tensor along
the specified dimension `dimension`.
The input to this layer is expected to be a tensor, or a batch of tensors;
:param dimension to be split along this dimension
:param T Numeric type. Only support float/double now
>>> bifurcateSplitTable = BifurcateSplitTable(1)
creating: createBifurcateSplitTable
'''
def __init__(self,
dimension,
bigdl_type="float"):
super(BifurcateSplitTable, self).__init__(None, bigdl_type,
dimension)
class Bilinear(Layer):
'''
a bilinear transformation with sparse inputs,
The input tensor given in forward(input) is a table containing both inputs x_1 and x_2,
which are tensors of size N x inputDimension1 and N x inputDimension2, respectively.
:param input_size1 input dimension of x_1
:param input_size2 input dimension of x_2
:param output_size output dimension
:param bias_res whether use bias
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> bilinear = Bilinear(1, 1, 1, True, L1Regularizer(0.5))
creating: createL1Regularizer
creating: createBilinear
'''
def __init__(self,
input_size1,
input_size2,
output_size,
bias_res=True,
wRegularizer=None,
bRegularizer=None,
bigdl_type="float"):
super(Bilinear, self).__init__(None, bigdl_type,
input_size1,
input_size2,
output_size,
bias_res,
wRegularizer,
bRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class Bottle(Container):
'''
Bottle allows varying dimensionality input to be forwarded through any module
that accepts input of nInputDim dimensions, and generates output of nOutputDim dimensions.
:param module: transform module
:param n_input_dim: nInputDim dimensions of module
:param n_output_dim1: output of nOutputDim dimensions
>>> bottle = Bottle(Linear(100,10), 1, 1)
creating: createLinear
creating: createBottle
'''
def __init__(self,
module,
n_input_dim=2,
n_output_dim1=INTMAX,
bigdl_type="float"):
super(Bottle, self).__init__(None, bigdl_type,
module,
n_input_dim,
n_output_dim1)
class CAdd(Layer):
'''
This layer has a bias tensor with given size. The bias will be added element wise to the input
tensor. If the element number of the bias tensor match the input tensor, a simply element wise
will be done. Or the bias will be expanded to the same size of the input. The expand means
repeat on unmatched singleton dimension(if some unmatched dimension isn't singleton dimension,
it will report an error). If the input is a batch, a singleton dimension will be add to the
first dimension before the expand.
:param size: the size of the bias
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> cAdd = CAdd([1,2])
creating: createCAdd
'''
def __init__(self,
size, bRegularizer=None,
bigdl_type="float"):
super(CAdd, self).__init__(None, bigdl_type,
size, bRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class CAddTable(Layer):
'''
Merge the input tensors in the input table by element wise adding them together. The input
table is actually an array of tensor with same size.
:param inplace: reuse the input memory
>>> cAddTable = CAddTable(True)
creating: createCAddTable
'''
def __init__(self,
inplace=False,
bigdl_type="float"):
super(CAddTable, self).__init__(None, bigdl_type,
inplace)
class CAveTable(Layer):
'''
Merge the input tensors in the input table by element wise taking the average. The input
table is actually an array of tensor with same size.
:param inplace: reuse the input memory
>>> cAveTable = CAveTable(True)
creating: createCAveTable
'''
def __init__(self,
inplace=False,
bigdl_type="float"):
super(CAveTable, self).__init__(None, bigdl_type,
inplace)
class CDivTable(Layer):
'''
Takes a table with two Tensor and returns the component-wise division between them.
>>> cDivTable = CDivTable()
creating: createCDivTable
'''
def __init__(self,
bigdl_type="float"):
super(CDivTable, self).__init__(None, bigdl_type)
class CMaxTable(Layer):
'''
Takes a table of Tensors and outputs the max of all of them.
>>> cMaxTable = CMaxTable()
creating: createCMaxTable
'''
def __init__(self,
bigdl_type="float"):
super(CMaxTable, self).__init__(None, bigdl_type)
class CMinTable(Layer):
'''
Takes a table of Tensors and outputs the min of all of them.
>>> cMinTable = CMinTable()
creating: createCMinTable
'''
def __init__(self,
bigdl_type="float"):
super(CMinTable, self).__init__(None, bigdl_type)
class CMul(Layer):
'''
Applies a component-wise multiplication to the incoming data
:param size: size of the data
>>> cMul = CMul([1,2])
creating: createCMul
'''
def __init__(self,
size,
wRegularizer=None,
bigdl_type="float"):
super(CMul, self).__init__(None, bigdl_type,
size, wRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class CMulTable(Layer):
'''
Takes a table of Tensors and outputs the multiplication of all of them.
>>> cMulTable = CMulTable()
creating: createCMulTable
'''
def __init__(self,
bigdl_type="float"):
super(CMulTable, self).__init__(None, bigdl_type)
class CSubTable(Layer):
'''
Takes a table with two Tensor and returns the component-wise subtraction between them.
>>> cSubTable = CSubTable()
creating: createCSubTable
'''
def __init__(self,
bigdl_type="float"):
super(CSubTable, self).__init__(None, bigdl_type)
class Clamp(Layer):
'''
Clamps all elements into the range [min_value, max_value].
Output is identical to input in the range,
otherwise elements less than min_value (or greater than max_value)
are saturated to min_value (or max_value).
:param min:
:param max:
>>> clamp = Clamp(1, 3)
creating: createClamp
'''
def __init__(self,
min,
max,
bigdl_type="float"):
super(Clamp, self).__init__(None, bigdl_type,
min,
max)
class Contiguous(Layer):
'''
used to make input, grad_output both contiguous
>>> contiguous = Contiguous()
creating: createContiguous
'''
def __init__(self,
bigdl_type="float"):
super(Contiguous, self).__init__(None, bigdl_type)
class Cosine(Layer):
'''
Cosine calculates the cosine similarity of the input to k mean centers. The input given in
forward(input) must be either a vector (1D tensor) or matrix (2D tensor). If the input is a
vector, it must have the size of inputSize. If it is a matrix, then each row is assumed to be
an input sample of given batch (the number of rows means the batch size and the number of
columns should be equal to the inputSize).
:param input_size: the size of each input sample
:param output_size: the size of the module output of each sample
>>> cosine = Cosine(2,3)
creating: createCosine
'''
def __init__(self,
input_size,
output_size,
bigdl_type="float"):
super(Cosine, self).__init__(None, bigdl_type,
input_size,
output_size)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class CosineDistance(Layer):
'''
Outputs the cosine distance between inputs
>>> cosineDistance = CosineDistance()
creating: createCosineDistance
'''
def __init__(self,
bigdl_type="float"):
super(CosineDistance, self).__init__(None, bigdl_type)
class UpSampling2D(Layer):
"""
Upsampling layer for 2D inputs.
Repeats the heights and widths of the data by size[0] and size[1] respectively.
If input's dataformat is NCHW, then the size of output is (N, C, H * size[0], W * size[1])
:param size tuple of 2 integers. The upsampling factors for heights and widths.
:param format DataFormat, NCHW or NHWC
>>> upsampled2d = UpSampling2D([2, 3])
creating: createUpSampling2D
"""
def __init__(self, size, data_format="nchw", bigdl_type="float"):
super(UpSampling2D, self).__init__(None, bigdl_type, size, data_format)
class UpSampling1D(Layer):
"""
Upsampling layer for 1D inputs.
Repeats each temporal step length times along the time axis.
If input's size is (batch, steps, features),
then the output's size is (batch, steps * length, features)
:param length integer, upsampling factor.
>>> upsampled1d = UpSampling1D(2)
creating: createUpSampling1D
"""
def __init__(self, length, bigdl_type="float"):
super(UpSampling1D, self).__init__(None, bigdl_type, length)
class Input(Node):
'''
Input layer do nothing to the input tensors, just passing them through. It is used as input to
the Graph container (add a link) when the first layer of the graph container accepts multiple
tensors as inputs.
Each input node of the graph container should accept one tensor as input. If you want a module
accepting multiple tensors as input, you should add some Input module before it and connect
the outputs of the Input nodes to it.
Please note that the return is not a layer but a Node containing input layer.
>>> input = Input()
creating: createInput
'''
def __init__(self,
bigdl_type="float"):
super(Input, self).__init__(None, bigdl_type)
class DotProduct(Layer):
'''
This is a simple table layer which takes a table of two tensors as input
and calculate the dot product between them as outputs
>>> dotProduct = DotProduct()
creating: createDotProduct
'''
def __init__(self,
bigdl_type="float"):
super(DotProduct, self).__init__(None, bigdl_type)
class ELU(Layer):
'''
D-A Clevert, Thomas Unterthiner, Sepp Hochreiter
Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
[http://arxiv.org/pdf/1511.07289.pdf]
>>> eLU = ELU(1e-5, True)
creating: createELU
'''
def __init__(self,
alpha=1.0,
inplace=False,
bigdl_type="float"):
super(ELU, self).__init__(None, bigdl_type,
alpha,
inplace)
class Euclidean(Layer):
'''
Outputs the Euclidean distance of the input to outputSize centers
:param inputSize: inputSize
:param outputSize: outputSize
:param T: Numeric type. Only support float/double now
>>> euclidean = Euclidean(1, 1, True)
creating: createEuclidean
'''
def __init__(self,
input_size,
output_size,
fast_backward=True,
bigdl_type="float"):
super(Euclidean, self).__init__(None, bigdl_type,
input_size,
output_size,
fast_backward)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class Exp(Layer):
'''
Applies element-wise exp to input tensor.
>>> exp = Exp()
creating: createExp
'''
def __init__(self,
bigdl_type="float"):
super(Exp, self).__init__(None, bigdl_type)
class FlattenTable(Layer):
'''
This is a table layer which takes an arbitrarily deep table of Tensors
(potentially nested) as input and a table of Tensors without any nested
table will be produced
>>> flattenTable = FlattenTable()
creating: createFlattenTable
'''
def __init__(self,
bigdl_type="float"):
super(FlattenTable, self).__init__(None, bigdl_type)
class GradientReversal(Layer):
'''
It is a simple module preserves the input, but takes the
gradient from the subsequent layer, multiplies it by -lambda
and passes it to the preceding layer. This can be used to maximise
an objective function whilst using gradient descent, as described in
["Domain-Adversarial Training of Neural Networks"
(http://arxiv.org/abs/1505.07818)]
:param lambda: hyper-parameter lambda can be set dynamically during training
>>> gradientReversal = GradientReversal(1e-5)
creating: createGradientReversal
>>> gradientReversal = GradientReversal()
creating: createGradientReversal
'''
def __init__(self,
the_lambda=1.0,
bigdl_type="float"):
super(GradientReversal, self).__init__(None, bigdl_type,
the_lambda)
class HardShrink(Layer):
'''
This is a transfer layer which applies the hard shrinkage function
element-wise to the input Tensor. The parameter lambda is set to 0.5
by default
```
x, if x > lambda
f(x) = x, if x < -lambda
0, otherwise
```
:param the_lambda: a threshold value whose default value is 0.5
>>> hardShrink = HardShrink(1e-5)
creating: createHardShrink
'''
def __init__(self,
the_lambda=0.5,
bigdl_type="float"):
super(HardShrink, self).__init__(None, bigdl_type,
the_lambda)
class HardTanh(Layer):
'''
Applies HardTanh to each element of input, HardTanh is defined:
```
| maxValue, if x > maxValue
f(x) = | minValue, if x < minValue
| x, otherwise
```
:param min_value: minValue in f(x), default is -1.
:param max_value: maxValue in f(x), default is 1.
:param inplace: whether enable inplace model.
>>> hardTanh = HardTanh(1e-5, 1e5, True)
creating: createHardTanh
>>> hardTanh = HardTanh()
creating: createHardTanh
'''
def __init__(self,
min_value=-1.0,
max_value=1.0,
inplace=False,
bigdl_type="float"):
super(HardTanh, self).__init__(None, bigdl_type,
min_value,
max_value,
inplace)
class Index(Layer):
'''
Applies the Tensor index operation along the given dimension.
:param dimension: the dimension to be indexed
>>> index = Index(1)
creating: createIndex
'''
def __init__(self,
dimension,
bigdl_type="float"):
super(Index, self).__init__(None, bigdl_type,
dimension)
class InferReshape(Layer):
'''
Reshape the input tensor with automatic size inference support.
Positive numbers in the `size` argument are used to reshape the input to the
corresponding dimension size.
There are also two special values allowed in `size`:
a. `0` means keep the corresponding dimension size of the input unchanged.
i.e., if the 1st dimension size of the input is 2,
the 1st dimension size of output will be set as 2 as well.
b. `-1` means infer this dimension size from other dimensions.
This dimension size is calculated by keeping the amount of output elements
consistent with the input.
Only one `-1` is allowable in `size`.
For example,
Input tensor with size: (4, 5, 6, 7)
-> InferReshape(Array(4, 0, 3, -1))
Output tensor with size: (4, 5, 3, 14)
The 1st and 3rd dim are set to given sizes, keep the 2nd dim unchanged,
and inferred the last dim as 14.
:param size: the target tensor size
:param batch_mode: whether in batch mode
>>> inferReshape = InferReshape([4, 0, 3, -1], False)
creating: createInferReshape
'''
def __init__(self,
size,
batch_mode=False,
bigdl_type="float"):
super(InferReshape, self).__init__(None, bigdl_type,
size,
batch_mode)
class JoinTable(Layer):
'''
It is a table module which takes a table of Tensors as input and
outputs a Tensor by joining them together along the dimension `dimension`.
The input to this layer is expected to be a tensor, or a batch of tensors;
when using mini-batch, a batch of sample tensors will be passed to the layer and
the user need to specify the number of dimensions of each sample tensor in the
batch using `nInputDims`.
:param dimension: to be join in this dimension
:param nInputDims: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimensionwould be considered as batch size
>>> joinTable = JoinTable(1, 1)
creating: createJoinTable
'''
def __init__(self,
dimension,
n_input_dims,
bigdl_type="float"):
super(JoinTable, self).__init__(None, bigdl_type,
dimension,
n_input_dims)
class SparseJoinTable(Layer):
'''
:: Experimental ::
Sparse version of JoinTable. Backward just pass the origin gradOutput back to
the next layers without split. So this layer may just works in Wide&Deep like models.
:param dimension: to be join in this dimension
>>> joinTable = SparseJoinTable(1)
creating: createSparseJoinTable
'''
def __init__(self,
dimension,
bigdl_type="float"):
super(SparseJoinTable, self).__init__(None, bigdl_type,
dimension)
class L1Penalty(Layer):
'''
adds an L1 penalty to an input (for sparsity).
L1Penalty is an inline module that in its forward propagation copies the input Tensor
directly to the output, and computes an L1 loss of the latent state (input) and stores
it in the module's loss field. During backward propagation: gradInput = gradOutput + gradLoss.
:param l1weight:
:param sizeAverage:
:param provideOutput:
>>> l1Penalty = L1Penalty(1, True, True)
creating: createL1Penalty
'''
def __init__(self,
l1weight,
size_average=False,
provide_output=True,
bigdl_type="float"):
super(L1Penalty, self).__init__(None, bigdl_type,
l1weight,
size_average,
provide_output)
class LeakyReLU(Layer):
'''
It is a transfer module that applies LeakyReLU, which parameter negval sets the slope of the
negative part: LeakyReLU is defined as: f(x) = max(0, x) + negval * min(0, x)
:param negval: sets the slope of the negative partl
:param inplace: if it is true, doing the operation in-place without using extra state memory
>>> leakyReLU = LeakyReLU(1e-5, True)
creating: createLeakyReLU
'''
def __init__(self,
negval=0.01,
inplace=False,
bigdl_type="float"):
super(LeakyReLU, self).__init__(None, bigdl_type,
negval,
inplace)
class Log(Layer):
'''
Applies the log function element-wise to the input Tensor,
thus outputting a Tensor of the same dimension.
>>> log = Log()
creating: createLog
'''
def __init__(self,
bigdl_type="float"):
super(Log, self).__init__(None, bigdl_type)
class LogSigmoid(Layer):
'''
This class is a transform layer corresponding to the sigmoid function:
f(x) = Log(1 / (1 + e ^^ (-x)))
>>> logSigmoid = LogSigmoid()
creating: createLogSigmoid
'''
def __init__(self,
bigdl_type="float"):
super(LogSigmoid, self).__init__(None, bigdl_type)
class LookupTable(Layer):
'''
a convolution of width 1, commonly used for word embeddings
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
>>> lookupTable = LookupTable(1, 1, 1e-5, 1e-5, 1e-5, True, L1Regularizer(0.5))
creating: createL1Regularizer
creating: createLookupTable
'''
def __init__(self,
n_index,
n_output,
padding_value=0.0,
max_norm=DOUBLEMAX,
norm_type=2.0,
should_scale_grad_by_freq=False,
wRegularizer=None,
bigdl_type="float"):
super(LookupTable, self).__init__(None, bigdl_type,
n_index,
n_output,
padding_value,
max_norm,
norm_type,
should_scale_grad_by_freq,
wRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class MM(Layer):
'''
Module to perform matrix multiplication on two mini-batch inputs, producing a mini-batch.
:param trans_a: specifying whether or not transpose the first input matrix
:param trans_b: specifying whether or not transpose the second input matrix
>>> mM = MM(True, True)
creating: createMM
'''
def __init__(self,
trans_a=False,
trans_b=False,
bigdl_type="float"):
super(MM, self).__init__(None, bigdl_type,
trans_a,
trans_b)
class MV(Layer):
'''
It is a module to perform matrix vector multiplication on two mini-batch inputs,
producing a mini-batch.
:param trans: whether make matrix transpose before multiplication
>>> mV = MV(True)
creating: createMV
'''
def __init__(self,
trans=False,
bigdl_type="float"):
super(MV, self).__init__(None, bigdl_type,
trans)
class MapTable(Container):
'''
This class is a container for a single module which will be applied
to all input elements. The member module is cloned as necessary to
process all input elements.
>>> mapTable = MapTable(Linear(100,10))
creating: createLinear
creating: createMapTable
'''
def __init__(self,
module=None,
bigdl_type="float"):
super(MapTable, self).__init__(None, bigdl_type,
module)
class MaskedSelect(Layer):
'''
Performs a torch.MaskedSelect on a Tensor. The mask is supplied as a tabular argument with
the input on the forward and backward passes.
>>> maskedSelect = MaskedSelect()
creating: createMaskedSelect
'''
def __init__(self,
bigdl_type="float"):
super(MaskedSelect, self).__init__(None, bigdl_type)
class Max(Layer):
'''
Applies a max operation over dimension `dim`
:param dim: max along this dimension
:param num_input_dims: Optional. If in a batch model, set to the inputDims.
>>> max = Max(1)
creating: createMax
'''
def __init__(self,
dim,
num_input_dims=INTMIN,
bigdl_type="float"):
super(Max, self).__init__(None, bigdl_type,
dim,
num_input_dims)
class Mean(Layer):
'''
It is a simple layer which applies a mean operation over the given dimension. When nInputDims
is provided, the input will be considered as batches. Then the mean operation will be applied
in (dimension + 1). The input to this layer is expected to be a tensor, or a batch of
tensors; when using mini-batch, a batch of sample tensors will be passed to the layer and the
user need to specify the number of dimensions of each sample tensor in the batch using
nInputDims.
:param dimension: the dimension to be applied mean operation
:param n_input_dims: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimension would be consideredas batch size
:param squeeze: default is true, which will squeeze the sum dimension; set it to false to keep the sum dimension
>>> mean = Mean(1, 1, True)
creating: createMean
'''
def __init__(self,
dimension=1,
n_input_dims=-1,
squeeze=True,
bigdl_type="float"):
super(Mean, self).__init__(None, bigdl_type,
dimension,
n_input_dims,
squeeze)
class Min(Layer):
'''
Applies a min operation over dimension `dim`.
:param dim: min along this dimension
:param num_input_dims: Optional. If in a batch model, set to the input_dim.
>>> min = Min(1)
creating: createMin
'''
def __init__(self,
dim=1,
num_input_dims=INTMIN,
bigdl_type="float"):
super(Min, self).__init__(None, bigdl_type,
dim,
num_input_dims)
class MixtureTable(Layer):
'''
Creates a module that takes a table {gater, experts} as input and outputs the mixture of experts
(a Tensor or table of Tensors) using a gater Tensor. When dim is provided, it specifies the
dimension of the experts Tensor that will be interpolated (or mixed). Otherwise, the experts
should take the form of a table of Tensors. This Module works for experts of dimension 1D or
more, and for a 1D or 2D gater, i.e. for single examples or mini-batches.
>>> mixtureTable = MixtureTable()
creating: createMixtureTable
>>> mixtureTable = MixtureTable(10)
creating: createMixtureTable
'''
def __init__(self,
dim=INTMAX,
bigdl_type="float"):
super(MixtureTable, self).__init__(None, bigdl_type, dim)
class Mul(Layer):
'''
Multiply a single scalar factor to the incoming data
>>> mul = Mul()
creating: createMul
'''
def __init__(self,
bigdl_type="float"):
super(Mul, self).__init__(None, bigdl_type)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class MulConstant(Layer):
'''
Multiplies input Tensor by a (non-learnable) scalar constant.
This module is sometimes useful for debugging purposes.
:param scalar: scalar constant
:param inplace: Can optionally do its operation in-place without using extra state memory
>>> mulConstant = MulConstant(2.5)
creating: createMulConstant
'''
def __init__(self,
scalar,
inplace=False,
bigdl_type="float"):
super(MulConstant, self).__init__(None, bigdl_type,
scalar,
inplace)
class Narrow(Layer):
'''
Narrow is application of narrow operation in a module.
The module further supports a negative length in order to handle inputs with an unknown size.
>>> narrow = Narrow(1, 1, 1)
creating: createNarrow
'''
def __init__(self,
dimension,
offset,
length=1,
bigdl_type="float"):
super(Narrow, self).__init__(None, bigdl_type,
dimension,
offset,
length)
class NarrowTable(Layer):
'''
Creates a module that takes a table as input and outputs the subtable starting at index
offset having length elements (defaults to 1 element). The elements can be either
a table or a Tensor. If `length` is negative, it means selecting the elements from the
offset to element which located at the abs(`length`) to the last element of the input.
:param offset: the start index of table
:param length: the length want to select
>>> narrowTable = NarrowTable(1, 1)
creating: createNarrowTable
'''
def __init__(self,
offset,
length=1,
bigdl_type="float"):
super(NarrowTable, self).__init__(None, bigdl_type,
offset,
length)
class Normalize(Layer):
'''
Normalizes the input Tensor to have unit L_p norm. The smoothing parameter eps prevents
division by zero when the input contains all zero elements (default = 1e-10).
p can be the max value of double
>>> normalize = Normalize(1e-5, 1e-5)
creating: createNormalize
'''
def __init__(self,
p,
eps=1e-10,
bigdl_type="float"):
super(Normalize, self).__init__(None, bigdl_type,
p,
eps)
class PReLU(Layer):
'''
Applies parametric ReLU, which parameter varies the slope of the negative part.
PReLU: f(x) = max(0, x) + a * min(0, x)
nOutputPlane's default value is 0, that means using PReLU in shared version and has
only one parameters.
Notice: Please don't use weight decay on this.
:param n_output_plane: input map number. Default is 0.
>>> pReLU = PReLU(1)
creating: createPReLU
'''
def __init__(self,
n_output_plane=0,
bigdl_type="float"):
super(PReLU, self).__init__(None, bigdl_type,
n_output_plane)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class Padding(Layer):
'''
This module adds pad units of padding to dimension dim of the input. If pad is negative,
padding is added to the left, otherwise, it is added to the right of the dimension.
The input to this layer is expected to be a tensor, or a batch of tensors;
when using mini-batch, a batch of sample tensors will be passed to the layer and
the user need to specify the number of dimensions of each sample tensor in the
batch using n_input_dim.
:param dim: the dimension to be applied padding operation
:param pad: num of the pad units
:param n_input_dim: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimensionwould be considered as batch size
:param value: padding value
>>> padding = Padding(1, 1, 1, 1e-5, 1)
creating: createPadding
'''
def __init__(self,
dim,
pad,
n_input_dim,
value=0.0,
n_index=1,
bigdl_type="float"):
super(Padding, self).__init__(None, bigdl_type,
dim,
pad,
n_input_dim,
value,
n_index)
class PairwiseDistance(Layer):
'''
It is a module that takes a table of two vectors as input and outputs
the distance between them using the p-norm.
The input given in `forward(input)` is a [[Table]] that contains two tensors which
must be either a vector (1D tensor) or matrix (2D tensor). If the input is a vector,
it must have the size of `inputSize`. If it is a matrix, then each row is assumed to be
an input sample of the given batch (the number of rows means the batch size and
the number of columns should be equal to the `inputSize`).
:param norm: the norm of distance
>>> pairwiseDistance = PairwiseDistance(2)
creating: createPairwiseDistance
'''
def __init__(self,
norm=2,
bigdl_type="float"):
super(PairwiseDistance, self).__init__(None, bigdl_type,
norm)
class ParallelTable(Container):
'''
It is a container module that applies the i-th member module to the i-th
input, and outputs an output in the form of Table
>>> parallelTable = ParallelTable()
creating: createParallelTable
'''
def __init__(self,
bigdl_type="float"):
super(ParallelTable, self).__init__(None, bigdl_type)
class Power(Layer):
'''
Apply an element-wise power operation with scale and shift.
f(x) = (shift + scale * x)^power^
:param power: the exponent.
:param scale: Default is 1.
:param shift: Default is 0.
>>> power = Power(1e-5)
creating: createPower
'''
def __init__(self,
power,
scale=1.0,
shift=0.0,
bigdl_type="float"):
super(Power, self).__init__(None, bigdl_type,
power,
scale,
shift)
class RReLU(Layer):
'''
Applies the randomized leaky rectified linear unit (RReLU) element-wise to the input Tensor,
thus outputting a Tensor of the same dimension. Informally the RReLU is also known as
'insanity' layer. RReLU is defined as:
```
f(x) = max(0,x) + a * min(0, x) where a ~ U(l, u).
```
In training mode negative inputs are multiplied by a factor drawn from a uniform random
distribution U(l, u).
In evaluation mode a RReLU behaves like a LeakyReLU with a constant mean factor
a = (l + u) / 2.
By default, l = 1/8 and u = 1/3. If l == u a RReLU effectively becomes a LeakyReLU.
Regardless of operating in in-place mode a RReLU will internally allocate an input-sized
noise tensor to store random factors for negative inputs.
The backward() operation assumes that forward() has been called before.
For reference see [Empirical Evaluation of Rectified Activations in Convolutional Network](
http://arxiv.org/abs/1505.00853).
:param lower: lower boundary of uniform random distribution
:param upper: upper boundary of uniform random distribution
:param inplace: optionally do its operation in-place without using extra state memory
>>> rReLU = RReLU(1e-5, 1e5, True)
creating: createRReLU
'''
def __init__(self,
lower=1.0/8,
upper=1.0/3,
inplace=False,
bigdl_type="float"):
super(RReLU, self).__init__(None, bigdl_type,
lower,
upper,
inplace)
class SpatialSeperableConvolution(Layer):
'''
Separable convolutions consist in first performing a depthwise spatial convolution (which acts
on each input channel separately) followed by a pointwise convolution which mixes together the
resulting output channels. The depth_multiplier argument controls how many output channels are
generated per input channel in the depthwise step.
:param n_input_channel The number of expected input planes in the image given into forward()
:param n_output_channel The number of output planes the convolution layer will produce.
:param depth_multiplier how many internal channels are generated per input channel
:param kernel_w The kernel width of the convolution
:param kernel_h The kernel height of the convolution
:param stride_w The step of the convolution in the width dimension.
:param stride_h The step of the convolution in the height dimension
:param pad_w The additional zeros added per width to the input planes.
:param pad_h The additional zeros added per height to the input planes.
:param with_bias: the optional initial value for if need bias
:param data_format: a string value of "NHWC" or "NCHW" to specify the input data format of this layer. In "NHWC" format
data is stored in the order of [batch_size, height, width, channels], in "NCHW" format data is stored
in the order of [batch_size, channels, height, width].
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the depth weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the pointwise bias.
:param pRegularizer: instance of [[Regularizer]]applied to the pointwise weights.
>>> conv = SpatialSeperableConvolution(6, 12, 1, 5, 5)
creating: createSpatialSeperableConvolution
>>> conv.setWRegularizer(L1Regularizer(0.5))
creating: createL1Regularizer
>>> conv.setBRegularizer(L1Regularizer(0.5))
creating: createL1Regularizer
>>> conv = SpatialSeperableConvolution(6, 12, 1, 5, 5, 1, 1, 0, 0, True, "NCHW", L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5))
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createSpatialSeperableConvolution
'''
def __init__(self,
n_input_channel,
n_output_channel,
depth_multiplier,
kernel_w,
kernel_h,
stride_w=1,
stride_h=1,
pad_w=0,
pad_h=0,
with_bias=True,
data_format="NCHW",
w_regularizer=None,
b_regularizer=None,
p_regularizer=None,
bigdl_type="float"):
super(SpatialSeperableConvolution, self).__init__(None, bigdl_type,
n_input_channel,
n_output_channel,
depth_multiplier,
kernel_w,
kernel_h,
stride_w,
stride_h,
pad_w,
pad_h,
with_bias,
data_format,
w_regularizer,
b_regularizer,
p_regularizer,
)
class ReLU6(Layer):
'''
Same as ReLU except that the rectifying function f(x) saturates at x = 6
:param inplace: either True = in-place or False = keeping separate state
>>> reLU6 = ReLU6(True)
creating: createReLU6
'''
def __init__(self,
inplace=False,
bigdl_type="float"):
super(ReLU6, self).__init__(None, bigdl_type,
inplace)
class SReLU(Layer):
'''S-shaped Rectified Linear Unit.
It follows:
`f(x) = t^r + a^r(x - t^r) for x >= t^r`,
`f(x) = x for t^r > x > t^l`,
`f(x) = t^l + a^l(x - t^l) for x <= t^l`.
# References
- [Deep Learning with S-shaped Rectified Linear Activation Units](http://arxiv.org/abs/1512.07030)
:param shared_axes: the axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
>>> srelu = SReLU()
creating: createSReLU
>>> srelu = SReLU((1, 2))
creating: createSReLU
'''
def __init__(self,
share_axes=None,
bigdl_type="float"):
super(SReLU, self).__init__(None, bigdl_type,
share_axes)
def set_init_method(self, tLeftInit=None, aLeftInit=None,
tRightInit=None, aRightInit=None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
tLeftInit, aLeftInit, tRightInit, aRightInit)
return self
class ActivityRegularization(Layer):
'''
Layer that applies an update to the cost function based input activity.
:param l1: L1 regularization factor (positive float).
:param l2: L2 regularization factor (positive float).
>>> ar = ActivityRegularization(0.1, 0.02)
creating: createActivityRegularization
'''
def __init__(self,
l1=0.0,
l2=0.0,
bigdl_type="float"):
super(ActivityRegularization, self).__init__(None, bigdl_type, l1, l2)
class Replicate(Layer):
'''
Replicate repeats input `nFeatures` times along its `dim` dimension.
Notice: No memory copy, it set the stride along the `dim`-th dimension to zero.
:param n_features: replicate times.
:param dim: dimension to be replicated.
:param n_dim: specify the number of non-batch dimensions.
>>> replicate = Replicate(2)
creating: createReplicate
'''
def __init__(self,
n_features,
dim=1,
n_dim=INTMAX,
bigdl_type="float"):
super(Replicate, self).__init__(None, bigdl_type,
n_features,
dim,
n_dim)
class RoiPooling(Layer):
'''
Region of interest pooling
The RoIPooling uses max pooling to convert the features inside any valid region of interest
into a small feature map with a fixed spatial extent of pooledH * pooledW (e.g., 7 * 7)
an RoI is a rectangular window into a conv feature map.
Each RoI is defined by a four-tuple (x1, y1, x2, y2) that specifies its
top-left corner (x1, y1) and its bottom-right corner (x2, y2).
RoI max pooling works by dividing the h * w RoI window into an pooledH * pooledW grid of
sub-windows of approximate size h/H * w/W and then max-pooling the values in each sub-window
into the corresponding output grid cell.
Pooling is applied independently to each feature map channel
:param pooled_w: spatial extent in width
:param pooled_h: spatial extent in height
:param spatial_scale: spatial scale
>>> import numpy as np
>>> input_data = np.random.rand(2,2,6,8)
>>> input_rois = np.array([0, 0, 0, 7, 5, 1, 6, 2, 7, 5, 1, 3, 1, 6, 4, 0, 3, 3, 3, 3],dtype='float64').reshape(4,5)
>>> m = RoiPooling(3,2,1.0)
creating: createRoiPooling
>>> out = m.forward([input_data,input_rois])
'''
def __init__(self,
pooled_w,
pooled_h,
spatial_scale,
bigdl_type="float"):
super(RoiPooling, self).__init__(None, bigdl_type,
pooled_w,
pooled_h,
spatial_scale)
class Scale(Layer):
'''
Scale is the combination of CMul and CAdd
Computes the elementwise product of input and weight, with the shape of the weight "expand" to
match the shape of the input.
Similarly, perform a expand cdd bias and perform an elementwise add
:param size: size of weight and bias
>>> scale = Scale([1,2])
creating: createScale
'''
def __init__(self,
size,
bigdl_type="float"):
super(Scale, self).__init__(None, bigdl_type,
size)
class SelectTable(Layer):
'''
Creates a module that takes a table as input and outputs the element at index `index`
(positive or negative). This can be either a table or a Tensor.
The gradients of the non-index elements are zeroed Tensors of the same size.
This is true regardless of the depth of the encapsulated Tensor as the function used
internally to do so is recursive.
:param index: the index to be selected
>>> selectTable = SelectTable(1)
creating: createSelectTable
'''
def __init__(self,
index,
bigdl_type="float"):
super(SelectTable, self).__init__(None, bigdl_type,
index)
class SoftMax(Layer):
'''
Applies the SoftMax function to an n-dimensional input Tensor, rescaling them so that the
elements of the n-dimensional output Tensor lie in the range (0, 1) and sum to 1.
Softmax is defined as: f_i(x) = exp(x_i - shift) / sum_j exp(x_j - shift)
where shift = max_i(x_i).
>>> softMax = SoftMax()
creating: createSoftMax
'''
def __init__(self,
bigdl_type="float"):
super(SoftMax, self).__init__(None, bigdl_type)
class SoftMin(Layer):
'''
Applies the SoftMin function to an n-dimensional input Tensor, rescaling them so that the
elements of the n-dimensional output Tensor lie in the range (0,1) and sum to 1.
Softmin is defined as: f_i(x) = exp(-x_i - shift) / sum_j exp(-x_j - shift)
where shift = max_i(-x_i).
>>> softMin = SoftMin()
creating: createSoftMin
'''
def __init__(self,
bigdl_type="float"):
super(SoftMin, self).__init__(None, bigdl_type)
class SoftPlus(Layer):
'''
Apply the SoftPlus function to an n-dimensional input tensor.
SoftPlus function: f_i(x) = 1/beta * log(1 + exp(beta * x_i))
:param beta: Controls sharpness of transfer function
>>> softPlus = SoftPlus(1e-5)
creating: createSoftPlus
'''
def __init__(self,
beta=1.0,
bigdl_type="float"):
super(SoftPlus, self).__init__(None, bigdl_type,
beta)
class SoftShrink(Layer):
'''
Apply the soft shrinkage function element-wise to the input Tensor
SoftShrinkage operator:
```
| x - lambda, if x > lambda
f(x) = | x + lambda, if x < -lambda
| 0, otherwise
```
:param the_lambda: lambda, default is 0.5
>>> softShrink = SoftShrink(1e-5)
creating: createSoftShrink
'''
def __init__(self,
the_lambda=0.5,
bigdl_type="float"):
super(SoftShrink, self).__init__(None, bigdl_type,
the_lambda)
class SoftSign(Layer):
'''
Apply SoftSign function to an n-dimensional input Tensor.
SoftSign function: f_i(x) = x_i / (1+|x_i|)
>>> softSign = SoftSign()
creating: createSoftSign
'''
def __init__(self,
bigdl_type="float"):
super(SoftSign, self).__init__(None, bigdl_type)
class SpatialDilatedConvolution(Layer):
'''
Apply a 2D dilated convolution over an input image.
The input tensor is expected to be a 3D or 4D(with batch) tensor.
If input is a 3D tensor nInputPlane x height x width,
owidth = floor(width + 2 * padW - dilationW * (kW-1) - 1) / dW + 1
oheight = floor(height + 2 * padH - dilationH * (kH-1) - 1) / dH + 1
Reference Paper: Yu F, Koltun V. Multi-scale context aggregation by dilated convolutions[J].
arXiv preprint arXiv:1511.07122, 2015.
:param n_input_plane: The number of expected input planes in the image given into forward().
:param n_output_plane: The number of output planes the convolution layer will produce.
:param kw: The kernel width of the convolution.
:param kh: The kernel height of the convolution.
:param dw: The step of the convolution in the width dimension. Default is 1.
:param dh: The step of the convolution in the height dimension. Default is 1.
:param pad_w: The additional zeros added per width to the input planes. Default is 0.
:param pad_h: The additional zeros added per height to the input planes. Default is 0.
:param dilation_w: The number of pixels to skip. Default is 1.
:param dilation_h: The number of pixels to skip. Default is 1.
:param init_method: Init method, Default, Xavier.
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> spatialDilatedConvolution = SpatialDilatedConvolution(1, 1, 1, 1)
creating: createSpatialDilatedConvolution
'''
def __init__(self,
n_input_plane,
n_output_plane,
kw,
kh,
dw=1,
dh=1,
pad_w=0,
pad_h=0,
dilation_w=1,
dilation_h=1,
wRegularizer=None,
bRegularizer=None,
bigdl_type="float"):
super(SpatialDilatedConvolution, self).__init__(None, bigdl_type,
n_input_plane,
n_output_plane,
kw,
kh,
dw,
dh,
pad_w,
pad_h,
dilation_w,
dilation_h,
wRegularizer,
bRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class SpatialFullConvolution(Layer):
'''
Apply a 2D full convolution over an input image.
The input tensor is expected to be a 3D or 4D(with batch) tensor. Note that instead
of setting adjW and adjH, SpatialFullConvolution[Table, T] also accepts a table input
with two tensors: T(convInput, sizeTensor) where convInput is the standard input tensor,
and the size of sizeTensor is used to set the size of the output (will ignore the adjW and
adjH values used to construct the module). This module can be used without a bias by setting
parameter noBias = true while constructing the module.
If input is a 3D tensor nInputPlane x height x width,
owidth = (width - 1) * dW - 2*padW + kW + adjW
oheight = (height - 1) * dH - 2*padH + kH + adjH
Other frameworks call this operation "In-network Upsampling", "Fractionally-strided convolution",
"Backwards Convolution," "Deconvolution", or "Upconvolution."
Reference Paper: Long J, Shelhamer E, Darrell T. Fully convolutional networks for semantic
segmentation[C]//Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition.
2015: 3431-3440.
:param nInputPlane The number of expected input planes in the image given into forward()
:param nOutputPlane The number of output planes the convolution layer will produce.
:param kW The kernel width of the convolution.
:param kH The kernel height of the convolution.
:param dW The step of the convolution in the width dimension. Default is 1.
:param dH The step of the convolution in the height dimension. Default is 1.
:param padW The additional zeros added per width to the input planes. Default is 0.
:param padH The additional zeros added per height to the input planes. Default is 0.
:param adjW Extra width to add to the output image. Default is 0.
:param adjH Extra height to add to the output image. Default is 0.
:param nGroup Kernel group number.
:param noBias If bias is needed.
:param initMethod Init method, Default, Xavier, Bilinear.
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> spatialFullConvolution = SpatialFullConvolution(1, 1, 1, 1)
creating: createSpatialFullConvolution
'''
def __init__(self,
n_input_plane,
n_output_plane,
kw,
kh,
dw=1,
dh=1,
pad_w=0,
pad_h=0,
adj_w=0,
adj_h=0,
n_group=1,
no_bias=False,
wRegularizer=None,
bRegularizer=None,
bigdl_type="float"):
super(SpatialFullConvolution, self).__init__(None, bigdl_type,
n_input_plane,
n_output_plane,
kw,
kh,
dw,
dh,
pad_w,
pad_h,
adj_w,
adj_h,
n_group,
no_bias,
wRegularizer,
bRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class VolumetricFullConvolution(Layer):
'''
Apply a 3D full convolution over an 3D input image, a sequence of images, or a video etc.
The input tensor is expected to be a 4D or 5D(with batch) tensor. Note that instead
of setting adjT, adjW and adjH, `VolumetricFullConvolution` also accepts a table input
with two tensors: T(convInput, sizeTensor) where convInput is the standard input tensor,
and the size of sizeTensor is used to set the size of the output (will ignore the adjT, adjW and
adjH values used to construct the module). This module can be used without a bias by setting
parameter noBias = true while constructing the module.
If input is a 4D tensor nInputPlane x depth x height x width,
odepth = (depth - 1) * dT - 2*padt + kT + adjT
owidth = (width - 1) * dW - 2*padW + kW + adjW
oheight = (height - 1) * dH - 2*padH + kH + adjH
Other frameworks call this operation "In-network Upsampling", "Fractionally-strided convolution",
"Backwards Convolution," "Deconvolution", or "Upconvolution."
Reference Paper: Long J, Shelhamer E, Darrell T. Fully convolutional networks for semantic
segmentation[C]//Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition.
2015: 3431-3440.
:param nInputPlane The number of expected input planes in the image given into forward()
:param nOutputPlane The number of output planes the convolution layer will produce.
:param kT The kernel depth of the convolution.
:param kW The kernel width of the convolution.
:param kH The kernel height of the convolution.
:param dT The step of the convolution in the depth dimension. Default is 1.
:param dW The step of the convolution in the width dimension. Default is 1.
:param dH The step of the convolution in the height dimension. Default is 1.
:param padT The additional zeros added per depth to the input planes. Default is 0.
:param padW The additional zeros added per width to the input planes. Default is 0.
:param padH The additional zeros added per height to the input planes. Default is 0.
:param adjT Extra depth to add to the output image. Default is 0.
:param adjW Extra width to add to the output image. Default is 0.
:param adjH Extra height to add to the output image. Default is 0.
:param nGroup Kernel group number.
:param noBias If bias is needed.
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> volumetricFullConvolution = VolumetricFullConvolution(1, 1, 1, 1, 1, 1)
creating: createVolumetricFullConvolution
'''
def __init__(self,
n_input_plane,
n_output_plane,
kt,
kw,
kh,
dt=1,
dw=1,
dh=1,
pad_t=0,
pad_w=0,
pad_h=0,
adj_t=0,
adj_w=0,
adj_h=0,
n_group=1,
no_bias=False,
wRegularizer=None,
bRegularizer=None,
bigdl_type="float"):
super(VolumetricFullConvolution, self).__init__(None, bigdl_type,
n_input_plane,
n_output_plane,
kt,
kw,
kh,
dt,
dw,
dh,
pad_t,
pad_w,
pad_h,
adj_t,
adj_w,
adj_h,
n_group,
no_bias,
wRegularizer,
bRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class SpatialShareConvolution(Layer):
'''
>>> spatialShareConvolution = SpatialShareConvolution(1, 1, 1, 1)
creating: createSpatialShareConvolution
>>> import numpy as np
>>> init_weight = np.random.randn(1, 12, 6, 5, 5)
>>> init_bias = np.random.randn(12)
>>> init_grad_weight = np.zeros([1, 12, 6, 5, 5])
>>> init_grad_bias = np.zeros([12])
>>> conv = SpatialShareConvolution(6, 12, 5, 5, 1, 1, 0, 0, 1, True, L1Regularizer(0.5), L1Regularizer(0.5), init_weight, init_bias, init_grad_weight, init_grad_bias)
creating: createL1Regularizer
creating: createL1Regularizer
creating: createSpatialShareConvolution
'''
def __init__(self,
n_input_plane,
n_output_plane,
kernel_w,
kernel_h,
stride_w=1,
stride_h=1,
pad_w=0,
pad_h=0,
n_group=1,
propagate_back=True,
wRegularizer=None,
bRegularizer=None,
init_weight=None,
init_bias=None,
init_grad_weight=None,
init_grad_bias=None,
with_bias=True,
bigdl_type="float"):
super(SpatialShareConvolution, self).__init__(None, bigdl_type,
n_input_plane,
n_output_plane,
kernel_w,
kernel_h,
stride_w,
stride_h,
pad_w,
pad_h,
n_group,
propagate_back,
wRegularizer,
bRegularizer,
JTensor.from_ndarray(init_weight),
JTensor.from_ndarray(init_bias),
JTensor.from_ndarray(init_grad_weight),
JTensor.from_ndarray(init_grad_bias),
with_bias)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class VolumetricConvolution(Layer):
'''
Applies a 3D convolution over an input image composed of several input planes. The input tensor
in forward(input) is expected to be a 4D tensor (nInputPlane x time x height x width).
:param n_input_plane: The number of expected input planes in the image given into forward()
:param n_output_plane: The number of output planes the convolution layer will produce.
:param k_t: The kernel size of the convolution in time
:param k_w: The kernel width of the convolution
:param k_h: The kernel height of the convolution
:param d_t: The step of the convolution in the time dimension. Default is 1
:param d_w: The step of the convolution in the width dimension. Default is 1
:param d_h: The step of the convolution in the height dimension. Default is 1
:param pad_t: Additional zeros added to the input plane data on both sides of time axis.Default is 0. (kT-1)/2 is often used here.
:param pad_w: The additional zeros added per width to the input planes.
:param pad_h: The additional zeros added per height to the input planes.
:param with_bias: whether with bias
:param wRegularizer: instance of [[Regularizer]] (eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]] applied to the bias.
>>> volumetricConvolution = VolumetricConvolution(6, 12, 5, 5, 5, 1, 1, 1)
creating: createVolumetricConvolution
'''
def __init__(self,
n_input_plane,
n_output_plane,
k_t,
k_w,
k_h,
d_t=1,
d_w=1,
d_h=1,
pad_t=0,
pad_w=0,
pad_h=0,
with_bias=True,
wRegularizer=None,
bRegularizer=None,
bigdl_type="float"):
super(VolumetricConvolution, self).__init__(None, bigdl_type,
n_input_plane,
n_output_plane,
k_t,
k_w,
k_h,
d_t,
d_w,
d_h,
pad_t,
pad_w,
pad_h,
with_bias,
wRegularizer,
bRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class VolumetricMaxPooling(Layer):
'''
Applies 3D max-pooling operation in kTxkWxkH regions by step size dTxdWxdH.
The number of output features is equal to the number of input planes / dT.
The input can optionally be padded with zeros. Padding should be smaller than
half of kernel size. That is, padT < kT/2, padW < kW/2 and padH < kH/2
:param k_t: The kernel size
:param k_w: The kernel width
:param k_h: The kernel height
:param d_t: The step in the time dimension
:param d_w: The step in the width dimension
:param d_h: The step in the height dimension
:param pad_t: The padding in the time dimension
:param pad_w: The padding in the width dimension
:param pad_h: The padding in the height dimension
>>> volumetricMaxPooling = VolumetricMaxPooling(5, 5, 5, 1, 1, 1)
creating: createVolumetricMaxPooling
'''
def __init__(self,
k_t,
k_w,
k_h,
d_t,
d_w,
d_h,
pad_t=0,
pad_w=0,
pad_h=0,
bigdl_type="float"):
super(VolumetricMaxPooling, self).__init__(None, bigdl_type,
k_t,
k_w,
k_h,
d_t,
d_w,
d_h,
pad_t,
pad_w,
pad_h)
class VolumetricAveragePooling(Layer):
'''
Applies 3D average-pooling operation in kTxkWxkH regions by step size dTxdWxdH.
The number of output features is equal to the number of input planes / dT.
The input can optionally be padded with zeros. Padding should be smaller than
half of kernel size. That is, padT < kT/2, padW < kW/2 and padH < kH/2
:param k_t: The kernel size
:param k_w: The kernel width
:param k_h: The kernel height
:param d_t: The step in the time dimension
:param d_w: The step in the width dimension
:param d_h: The step in the height dimension
:param pad_t: The padding in the time dimension
:param pad_w: The padding in the width dimension
:param pad_h: The padding in the height dimension
:param count_include_pad: whether to include padding when dividing the number of elements in pooling region
:param ceil_mode: whether the output size is to be ceiled or floored
>>> volumetricAveragePooling = VolumetricAveragePooling(5, 5, 5, 1, 1, 1)
creating: createVolumetricAveragePooling
'''
def __init__(self,
k_t,
k_w,
k_h,
d_t,
d_w,
d_h,
pad_t=0,
pad_w=0,
pad_h=0,
count_include_pad=True,
ceil_mode=False,
bigdl_type="float"):
super(VolumetricAveragePooling, self).__init__(None, bigdl_type,
k_t,
k_w,
k_h,
d_t,
d_w,
d_h,
pad_t,
pad_w,
pad_h,
count_include_pad,
ceil_mode)
class SpatialZeroPadding(Layer):
'''
Each feature map of a given input is padded with specified number of zeros.
If padding values are negative, then input is cropped.
:param padLeft: pad left position
:param padRight: pad right position
:param padTop: pad top position
:param padBottom: pad bottom position
>>> spatialZeroPadding = SpatialZeroPadding(1, 1, 1, 1)
creating: createSpatialZeroPadding
'''
def __init__(self,
pad_left,
pad_right,
pad_top,
pad_bottom,
bigdl_type="float"):
super(SpatialZeroPadding, self).__init__(None, bigdl_type,
pad_left,
pad_right,
pad_top,
pad_bottom)
class SplitTable(Layer):
'''
Creates a module that takes a Tensor as input and
outputs several tables, splitting the Tensor along
the specified dimension `dimension`. Please note the dimension starts from 1.
The input to this layer is expected to be a tensor, or a batch of tensors;
when using mini-batch, a batch of sample tensors will be passed to the layer and
the user needs to specify the number of dimensions of each sample tensor in a
batch using `nInputDims`.
:param dimension: to be split along this dimension
:param n_input_dims: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimensionwould be considered as batch size
>>> splitTable = SplitTable(1, 1)
creating: createSplitTable
'''
def __init__(self,
dimension,
n_input_dims=-1,
bigdl_type="float"):
super(SplitTable, self).__init__(None, bigdl_type,
dimension,
n_input_dims)
class Sqrt(Layer):
'''
Apply an element-wise sqrt operation.
>>> sqrt = Sqrt()
creating: createSqrt
'''
def __init__(self,
bigdl_type="float"):
super(Sqrt, self).__init__(None, bigdl_type)
class Square(Layer):
'''
Apply an element-wise square operation.
>>> square = Square()
creating: createSquare
'''
def __init__(self,
bigdl_type="float"):
super(Square, self).__init__(None, bigdl_type)
class Squeeze(Layer):
'''
Delete singleton all dimensions or a specific dim.
:param dim: Optional. The dimension to be delete. Default: delete all dimensions.
:param num_input_dims: Optional. If in a batch model, set to the inputDims.
>>> squeeze = Squeeze(1)
creating: createSqueeze
'''
def __init__(self,
dim,
num_input_dims=INTMIN,
bigdl_type="float"):
super(Squeeze, self).__init__(None, bigdl_type,
dim,
num_input_dims)
class Sum(Layer):
'''
It is a simple layer which applies a sum operation over the given dimension.
When nInputDims is provided, the input will be considered as a batches.
Then the sum operation will be applied in (dimension + 1)
The input to this layer is expected to be a tensor, or a batch of tensors;
when using mini-batch, a batch of sample tensors will be passed to the layer and
the user need to specify the number of dimensions of each sample tensor in the
batch using `nInputDims`.
:param dimension: the dimension to be applied sum operation
:param n_input_dims: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimensionwould be considered as batch size
:param size_average: default is false, if it is true, it will return the mean instead
:param squeeze: default is true, which will squeeze the sum dimension; set it to false to keep the sum dimension
>>> sum = Sum(1, 1, True, True)
creating: createSum
'''
def __init__(self,
dimension=1,
n_input_dims=-1,
size_average=False,
squeeze=True,
bigdl_type="float"):
super(Sum, self).__init__(None, bigdl_type,
dimension,
n_input_dims,
squeeze,
size_average)
class TanhShrink(Layer):
'''
A simple layer for each element of the input tensor, do the following operation
during the forward process:
[f(x) = tanh(x) - 1]
>>> tanhShrink = TanhShrink()
creating: createTanhShrink
'''
def __init__(self,
bigdl_type="float"):
super(TanhShrink, self).__init__(None, bigdl_type)
class Threshold(Layer):
'''
Threshold input Tensor.
If values in the Tensor smaller than th, then replace it with v
:param th: the threshold to compare with
:param v: the value to replace with
:param ip: inplace mode
>>> threshold = Threshold(1e-5, 1e-5, True)
creating: createThreshold
'''
def __init__(self,
th=1e-6,
v=0.0,
ip=False,
bigdl_type="float"):
super(Threshold, self).__init__(None, bigdl_type,
th,
v,
ip)
class Negative(Layer):
'''
Create an Negative layer. Computing negative value of each element of input tensor
:param inplace: if output tensor reuse input tensor storage. Default value is false
>>> negative = Negative(False)
creating: createNegative
'''
def __init__(self,
inplace = False,
bigdl_type="float"):
super(Negative, self).__init__(None, bigdl_type, inplace)
class Unsqueeze(Layer):
'''
Create an Unsqueeze layer. Insert singleton dim (i.e., dimension 1) at position pos.
For an input with dim = input.dim(),
there are dim + 1 possible positions to insert the singleton dimension.
:param pos: The position will be insert singleton.
:param num_input_dims: Optional. If in a batch model, set to the inputDim
>>> unsqueeze = Unsqueeze(1, 1)
creating: createUnsqueeze
'''
def __init__(self,
pos,
num_input_dims=INTMIN,
bigdl_type="float"):
super(Unsqueeze, self).__init__(None, bigdl_type,
pos,
num_input_dims)
class Reshape(Layer):
'''
The forward(input) reshape the input tensor into a size(0) * size(1) * ... tensor, taking the
elements row-wise.
:param size: the reshape size
>>> reshape = Reshape([1, 28, 28])
creating: createReshape
>>> reshape = Reshape([1, 28, 28], False)
creating: createReshape
'''
def __init__(self, size, batch_mode=None, bigdl_type="float"):
super(Reshape, self).__init__(None, bigdl_type, size, batch_mode)
class BiRecurrent(Container):
'''
Create a Bidirectional recurrent layer
:param merge: merge layer
>>> biRecurrent = BiRecurrent(CAddTable())
creating: createCAddTable
creating: createBiRecurrent
>>> biRecurrent = BiRecurrent()
creating: createBiRecurrent
'''
def __init__(self,
merge=None,
bigdl_type="float"):
super(BiRecurrent, self).__init__(None, bigdl_type, merge)
class ConcatTable(Container):
'''
ConcateTable is a container module like Concate. Applies an input
to each member module, input can be a tensor or a table.
ConcateTable usually works with CAddTable and CMulTable to
implement element wise add/multiply on outputs of two modules.
>>> concatTable = ConcatTable()
creating: createConcatTable
'''
def __init__(self,
bigdl_type="float"):
super(ConcatTable, self).__init__(None, bigdl_type)
class Identity(Layer):
'''
Identity just return the input to output.
It's useful in same parallel container to get an origin input.
>>> identity = Identity()
creating: createIdentity
'''
def __init__(self,
bigdl_type="float"):
super(Identity, self).__init__(None, bigdl_type)
class Reverse(Layer):
'''
Reverse the input w.r.t given dimension.
The input can be a Tensor or Table.
:param dim:
>>> reverse = Reverse()
creating: createReverse
>>> reverse = Reverse(1, False)
creating: createReverse
'''
def __init__(self,
dimension=1,
is_inplace=False,
bigdl_type="float"):
super(Reverse, self).__init__(None, bigdl_type,
dimension,
is_inplace)
class Transpose(Layer):
'''
Transpose input along specified dimensions
:param permutations: dimension pairs that need to swap
>>> transpose = Transpose([(1,2)])
creating: createTranspose
'''
def __init__(self,
permutations,
bigdl_type="float"):
super(Transpose, self).__init__(None, bigdl_type,
permutations)
class SpatialContrastiveNormalization(Layer):
'''
Subtractive + divisive contrast normalization.
:param n_input_plane:
:param kernel:
:param threshold:
:param thresval:
>>> kernel = np.ones([9,9]).astype("float32")
>>> spatialContrastiveNormalization = SpatialContrastiveNormalization(1, kernel)
creating: createSpatialContrastiveNormalization
>>> spatialContrastiveNormalization = SpatialContrastiveNormalization()
creating: createSpatialContrastiveNormalization
'''
def __init__(self,
n_input_plane=1,
kernel=None,
threshold=1e-4,
thresval=1e-4,
bigdl_type="float"):
super(SpatialContrastiveNormalization, self).__init__(None, bigdl_type,
n_input_plane,
JTensor.from_ndarray(kernel),
threshold,
thresval)
class SpatialConvolutionMap(Layer):
'''
This class is a generalization of SpatialConvolution.
It uses a generic connection table between input and output features.
The SpatialConvolution is equivalent to using a full connection table.
When padW and padH are both -1, we use a padding algorithm similar to the "SAME"
padding of tensorflow. That is
outHeight = Math.ceil(inHeight.toFloat/strideH.toFloat)
outWidth = Math.ceil(inWidth.toFloat/strideW.toFloat)
padAlongHeight = Math.max(0, (outHeight - 1) * strideH + kernelH - inHeight)
padAlongWidth = Math.max(0, (outWidth - 1) * strideW + kernelW - inWidth)
padTop = padAlongHeight / 2
padLeft = padAlongWidth / 2
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> ct = np.ones([9,9]).astype("float32")
>>> spatialConvolutionMap = SpatialConvolutionMap(ct, 9, 9)
creating: createSpatialConvolutionMap
'''
def __init__(self,
conn_table,
kw,
kh,
dw=1,
dh=1,
pad_w=0,
pad_h=0,
wRegularizer=None,
bRegularizer=None,
bigdl_type="float"):
super(SpatialConvolutionMap, self).__init__(None, bigdl_type,
JTensor.from_ndarray(conn_table),
kw,
kh,
dw,
dh,
pad_w,
pad_h,
wRegularizer,
bRegularizer)
class SpatialDivisiveNormalization(Layer):
'''
Applies a spatial division operation on a series of 2D inputs using kernel for
computing the weighted average in a neighborhood. The neighborhood is defined for
a local spatial region that is the size as kernel and across all features. For
an input image, since there is only one feature, the region is only spatial. For
an RGB image, the weighted average is taken over RGB channels and a spatial region.
If the kernel is 1D, then it will be used for constructing and separable 2D kernel.
The operations will be much more efficient in this case.
The kernel is generally chosen as a gaussian when it is believed that the correlation
of two pixel locations decrease with increasing distance. On the feature dimension,
a uniform average is used since the weighting across features is not known.
:param nInputPlane: number of input plane, default is 1.
:param kernel: kernel tensor, default is a 9 x 9 tensor.
:param threshold: threshold
:param thresval: threshhold value to replace withif data is smaller than theshold
>>> kernel = np.ones([9,9]).astype("float32")
>>> spatialDivisiveNormalization = SpatialDivisiveNormalization(2,kernel)
creating: createSpatialDivisiveNormalization
>>> spatialDivisiveNormalization = SpatialDivisiveNormalization()
creating: createSpatialDivisiveNormalization
'''
def __init__(self,
n_input_plane=1,
kernel=None,
threshold=1e-4,
thresval=1e-4,
bigdl_type="float"):
super(SpatialDivisiveNormalization, self).__init__(None, bigdl_type,
n_input_plane,
JTensor.from_ndarray(kernel),
threshold,
thresval)
class SpatialSubtractiveNormalization(Layer):
'''
Applies a spatial subtraction operation on a series of 2D inputs using kernel for
computing the weighted average in a neighborhood. The neighborhood is defined for
a local spatial region that is the size as kernel and across all features. For a
an input image, since there is only one feature, the region is only spatial. For
an RGB image, the weighted average is taken over RGB channels and a spatial region.
If the kernel is 1D, then it will be used for constructing and separable 2D kernel.
The operations will be much more efficient in this case.
The kernel is generally chosen as a gaussian when it is believed that the correlation
of two pixel locations decrease with increasing distance. On the feature dimension,
a uniform average is used since the weighting across features is not known.
:param n_input_plane: number of input plane, default is 1.
:param kernel: kernel tensor, default is a 9 x 9 tensor.
>>> kernel = np.ones([9,9]).astype("float32")
>>> spatialSubtractiveNormalization = SpatialSubtractiveNormalization(2,kernel)
creating: createSpatialSubtractiveNormalization
>>> spatialSubtractiveNormalization = SpatialSubtractiveNormalization()
creating: createSpatialSubtractiveNormalization
'''
def __init__(self,
n_input_plane=1,
kernel=None,
bigdl_type="float"):
super(SpatialSubtractiveNormalization, self).__init__(None, bigdl_type,
n_input_plane,
JTensor.from_ndarray(kernel))
class SpatialWithinChannelLRN(Layer):
'''
The local response normalization layer performs a kind of lateral inhibition
by normalizing over local input regions. the local regions extend spatially,
in separate channels (i.e., they have shape 1 x local_size x local_size).
:param size the side length of the square region to sum over
:param alpha the scaling parameter
:param beta the exponent
>>> layer = SpatialWithinChannelLRN()
creating: createSpatialWithinChannelLRN
'''
def __init__(self,
size=5,
alpha=1.0,
beta=0.75,
bigdl_type="float"):
super(SpatialWithinChannelLRN, self).__init__(None, bigdl_type,
size,
alpha,
beta)
class Pack(Layer):
'''
Stacks a list of n-dimensional tensors into one (n+1)-dimensional tensor.
>>> layer = Pack(1)
creating: createPack
'''
def __init__(self, dimension, bigdl_type="float"):
super(Pack, self).__init__(None, bigdl_type, dimension)
class ConvLSTMPeephole(Layer):
'''
| Convolution Long Short Term Memory architecture with peephole.
| Ref. A.: https://arxiv.org/abs/1506.04214 (blueprint for this module)
| B. https://github.com/viorik/ConvLSTM
:param input_size: number of input planes in the image given into forward()
:param output_size: number of output planes the convolution layer will produce
:param kernel_i: Convolutional filter size to convolve input
:param kernel_c: Convolutional filter size to convolve cell
:param stride: The step of the convolution, default is 1
:param padding: The additional zeros added, default is -1
:param activation: activation function, by default to be Tanh if not specified.
It can also be the name of an existing activation as a string.
:param inner_activation: activation function for the inner cells, by default to be Sigmoid if not specified.
It can also be the name of an existing activation as a string.
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices
:param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
:param cRegularizer: instance of [[Regularizer]]applied to peephole.
:param with_peephole: whether use last cell status control a gate.
>>> convlstm = ConvLSTMPeephole(4, 3, 3, 3, 1, -1, Tanh(), HardSigmoid(), L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5))
creating: createTanh
creating: createHardSigmoid
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createConvLSTMPeephole
'''
def __init__(self, input_size, output_size, kernel_i, kernel_c, stride=1, padding=-1,
activation=None, inner_activation=None,
wRegularizer=None, uRegularizer=None, bRegularizer=None, cRegularizer=None,
with_peephole=True, bigdl_type="float"):
if not activation:
activation = Tanh()
if not inner_activation:
inner_activation = Sigmoid()
if isinstance(activation, six.string_types):
activation = get_activation_by_name(activation)
if isinstance(inner_activation, six.string_types):
inner_activation = get_activation_by_name(inner_activation)
super(ConvLSTMPeephole, self).__init__(None, bigdl_type, input_size, output_size, kernel_i, kernel_c,
stride, padding, activation, inner_activation,
wRegularizer, uRegularizer, bRegularizer, cRegularizer, with_peephole)
class Tile(Layer):
'''
Replicate 'copies' copy along 'dim' dimension
>>> layer = Tile(1, 2)
creating: createTile
'''
def __init__(self, dim = 1, copies = 2, bigdl_type="float"):
super(Tile, self).__init__(None, bigdl_type, dim, copies)
class BinaryThreshold(Layer):
'''
Binary threshold, 1 if value > th, 0 otherwise
>>> layer = BinaryThreshold(0.1, False)
creating: createBinaryThreshold
'''
def __init__(self, th=1e-6, ip = False, bigdl_type="float"):
super(BinaryThreshold, self).__init__(None, bigdl_type, th, ip)
class ConvLSTMPeephole3D(Layer):
'''
:param input_size: number of input planes in the image given into forward()
:param output_size: number of output planes the convolution layer will produce
:param kernel_i Convolutional filter size to convolve input
:param kernel_c Convolutional filter size to convolve cell
:param stride The step of the convolution
:param padding The additional zeros added
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices
:param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
:param cRegularizer: instance of [[Regularizer]]applied to peephole.
:param with_peephole: whether use last cell status control a gate.
>>> convlstm = ConvLSTMPeephole3D(4, 3, 3, 3, 1, -1, L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5))
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createConvLSTMPeephole3D
'''
def __init__(self, input_size, output_size, kernel_i, kernel_c, stride=1, padding=-1, wRegularizer=None, uRegularizer=None,
bRegularizer=None, cRegularizer=None, with_peephole=True, bigdl_type="float"):
super(ConvLSTMPeephole3D, self).__init__(None, bigdl_type, input_size, output_size, kernel_i, kernel_c, stride,
padding, wRegularizer, uRegularizer, bRegularizer, cRegularizer, with_peephole)
class MultiRNNCell(Layer):
'''
A cell that enables stack multiple simple rnn cells
>>> cells = []
>>> cells.append(ConvLSTMPeephole3D(4, 3, 3, 3, 1))
creating: createConvLSTMPeephole3D
>>> cells.append(ConvLSTMPeephole3D(4, 3, 3, 3, 1))
creating: createConvLSTMPeephole3D
>>> stacked_convlstm = MultiRNNCell(cells)
creating: createMultiRNNCell
'''
def __init__(self, cells, bigdl_type="float"):
super(MultiRNNCell, self).__init__(None, bigdl_type, cells)
class ResizeBilinear(Layer):
"""
Resize the input image with bilinear interpolation. The input image must be a float tensor with
NHWC layout
:param output_height: output height
:param output_width: output width
:param align_corner: align corner or not
>>> resizeBilinear = ResizeBilinear(10, 20, False)
creating: createResizeBilinear
"""
def __init__(self, output_height, output_width, align_corner, bigdl_type="float"):
super(ResizeBilinear, self).__init__(None, bigdl_type, output_height, output_width, align_corner)
class GaussianSampler(Layer):
"""
Takes {mean, log_variance} as input and samples from the Gaussian distribution
>>> sampler = GaussianSampler()
creating: createGaussianSampler
"""
def __init__(self, bigdl_type="float"):
super(GaussianSampler, self).__init__(None, bigdl_type)
class Masking(Layer):
'''
Use a mask value to skip timesteps for a sequence
```
:param mask_value: mask value
>>> masking = Masking(0.0)
creating: createMasking
'''
def __init__(self,
mask_value,
bigdl_type="float"):
super(Masking, self).__init__(None, bigdl_type,
mask_value)
class Maxout(Layer):
'''
A linear maxout layer Maxout layer select the element-wise maximum value of
maxoutNumber Linear(inputSize, outputSize) layers
```
:param input_size: the size the each input sample
:param output_size: the size of the module output of each sample
:param maxout_number: number of Linear layers to use
:param with_bias: whether use bias in Linear
:param w_regularizer: instance of [[Regularizer]]
(eg. L1 or L2 regularization), applied to the input weights matrices.
:param b_regularizer: instance of [[Regularizer]]
applied to the bias.
:param init_weight: initial weight
:param init_bias: initial bias
>>> maxout = Maxout(2, 5, 3)
creating: createMaxout
'''
def __init__(self,
input_size,
output_size,
maxout_number,
with_bias = True,
w_regularizer=None,
b_regularizer=None,
init_weight=None,
init_bias=None,
bigdl_type="float"):
super(Maxout, self).__init__(None, bigdl_type,
input_size, output_size, maxout_number, with_bias,
w_regularizer, b_regularizer, init_weight, init_bias)
class HardSigmoid(Layer):
"""
Apply Hard-sigmoid function
```
| 0, if x < -2.5
f(x) = | 1, if x > 2.5
| 0.2 * x + 0.5, otherwise
```
>>> hardSigmoid = HardSigmoid()
creating: createHardSigmoid
"""
def __init__(self, bigdl_type="float"):
super(HardSigmoid, self).__init__(None, bigdl_type)
class Highway(Layer):
"""
Densely connected highway network.
Highway layers are a natural extension of LSTMs to feedforward networks.
:param size input size
:param with_bias whether to include a bias
:param activation activation function. It can also be the name of an existing activation as a string.
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]], applied to the bias.
>>> highway = Highway(2)
creating: createHighway
"""
def __init__(self, size, with_bias=True, activation=None, wRegularizer=None, bRegularizer=None, bigdl_type="float"):
if isinstance(activation, six.string_types):
activation = get_activation_by_name(activation)
super(Highway, self).__init__(None, bigdl_type, size, with_bias, activation, wRegularizer, bRegularizer)
class UpSampling3D(Layer):
"""
Upsampling layer for 3D inputs.
Repeats the 1st, 2nd and 3rd dimensions
of the data by size[0], size[1] and size[2] respectively.
The input data is assumed to be of the form `minibatch x channels x depth x height x width`.
:param size Repeats the depth, height, width dimensions of the data by
>>> upsample3d = UpSampling3D([1, 2, 3])
creating: createUpSampling3D
"""
def __init__(self, size, bigdl_type="float"):
super(UpSampling3D, self).__init__(None, bigdl_type, size)
class PriorBox(Layer):
"""
Generate the prior boxes of designated sizes and aspect ratios across
all dimensions (H * W)
Intended for use with MultiBox detection method to generate prior
:param min_sizes minimum box size in pixels. can be multiple. required!
:param max_sizes maximum box size in pixels. can be ignored or same as the # of min_size.
:param aspect_ratios optional aspect ratios of the boxes. can be multiple
:param is_flip optional bool, default true. if set, flip the aspect ratio.
:param is_clip whether to clip the prior's coordidate such that it is within [0, 1]
>>> layer = PriorBox([0.1])
creating: createPriorBox
"""
def __init__(self, min_sizes,
max_sizes=None,
aspect_ratios=None,
is_flip=True,
is_clip=False,
variances=None,
offset = 0.5,
img_h=0,
img_w=0,
img_size=0,
step_h=0.0,
step_w=0.0,
step=0.0,
bigdl_type="float"):
super(PriorBox, self).__init__(None, bigdl_type,
min_sizes,
max_sizes,
aspect_ratios,
is_flip,
is_clip,
variances,
offset,
img_h,
img_w,
img_size,
step_h,
step_w,
step)
class NormalizeScale(Layer):
"""
NormalizeScale is conposed of normalize and scale, this is equal to caffe Normalize layer
:param p L_p norm
:param eps smoothing parameter
:param scale scale parameter
:param size size of scale input
:param w_regularizer weight regularizer
>>> layer = NormalizeScale(2.0, scale = 20.0, size = [1, 5, 1, 1])
creating: createNormalizeScale
"""
def __init__(self, p, scale, size, w_regularizer=None, eps=1e-10,
bigdl_type="float"):
super(NormalizeScale, self).__init__(None, bigdl_type, p, eps, scale, size, w_regularizer)
class Proposal(Layer):
"""
Outputs object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
rois: holds R regions of interest, each is a 5-tuple
(n, x1, y1, x2, y2) specifying an image batch index n and a rectangle (x1, y1, x2, y2)
scores: holds scores for R regions of interest
>>> layer = Proposal(1000, 200, [0.1, 0.2], [2.0, 3.0])
creating: createProposal
"""
def __init__(self, pre_nms_topn, post_nms_topn, ratios, scales,
rpn_pre_nms_topn_train=12000, rpn_post_nms_topn_train=2000,
bigdl_type="float"):
super(Proposal, self).__init__(None, bigdl_type,
pre_nms_topn,
post_nms_topn,
ratios,
scales,
rpn_pre_nms_topn_train,
rpn_post_nms_topn_train)
class DetectionOutputSSD(Layer):
"""
Layer to Post-process SSD output
:param n_classes number of classes
:param share_location whether to share location, default is true
:param bg_label background label
:param nms_thresh nms threshold
:param nms_topk nms topk
:param keep_top_k result topk
:param conf_thresh confidence threshold
:param variance_encoded_in_target if variance is encoded in target,
we simply need to retore the offset predictions,
else if variance is encoded in bbox,
we need to scale the offset accordingly.
:param conf_post_process whether add some additional post process to confidence prediction
>>> layer = DetectionOutputSSD()
creating: createDetectionOutputSSD
"""
def __init__(self, n_classes=21,
share_location=True,
bg_label=0,
nms_thresh=0.45,
nms_topk=400,
keep_top_k=200,
conf_thresh=0.01,
variance_encoded_in_target=False,
conf_post_process=True,
bigdl_type="float"):
super(DetectionOutputSSD, self).__init__(None,
bigdl_type,
n_classes,
share_location,
bg_label,
nms_thresh,
nms_topk,
keep_top_k,
conf_thresh,
variance_encoded_in_target,
conf_post_process)
class DetectionOutputFrcnn(Layer):
"""
Post process Faster-RCNN models
:param nms_thresh nms threshold
:param n_classes number of classes
:param bbox_vote whether to vote for detections
:param max_per_image limit max number of detections per image
:param thresh score threshold
>>> layer = DetectionOutputFrcnn(21, True)
creating: createDetectionOutputFrcnn
"""
def __init__(self, n_classes, bbox_vote, nms_thresh = 0.3,
max_per_image=100, thresh=0.05,
bigdl_type="float"):
super(DetectionOutputFrcnn, self).__init__(None, bigdl_type, nms_thresh,
n_classes,
bbox_vote,
max_per_image,
thresh)
def _test():
import doctest
from pyspark import SparkContext
from bigdl.nn import layer
from bigdl.util.common import init_engine
from bigdl.util.common import create_spark_conf
globs = layer.__dict__.copy()
sc = SparkContext(master="local[4]", appName="test layer",
conf=create_spark_conf())
globs['sc'] = sc
init_engine()
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| luchy0120/BigDL | pyspark/bigdl/nn/layer.py | Python | apache-2.0 | 184,483 | [
"Gaussian"
] | 8b305479a027ec0174889d8444eea9751c2697d7a96de06cf88d3079e99fa8b0 |
from __future__ import print_function
import unittest
import IMP.rmf
import IMP.test
import IMP.container
import RMF
from IMP.algebra import *
class Tests(IMP.test.TestCase):
def _create(self, m):
pdbname = self.get_input_file_name("simple.pdb")
full = IMP.atom.read_pdb(pdbname, m)
chain = IMP.atom.get_by_type(full, IMP.atom.CHAIN_TYPE)[0]
rep = IMP.atom.Representation.setup_particle(full)
for i in range(1, 3):
simp = IMP.atom.create_simplified_along_backbone(chain, i)
simp.set_name("A " + str(i))
rep.add_representation(simp, IMP.atom.BALLS)
return full
def _create_custom(self, m):
pdbname = self.get_input_file_name("simple.pdb")
full = IMP.atom.read_pdb(pdbname, m)
chain = IMP.atom.get_by_type(full, IMP.atom.CHAIN_TYPE)[0]
rep = IMP.atom.Representation.setup_particle(full, 0)
for i in range(1, 3):
simp = IMP.atom.create_simplified_along_backbone(chain, i)
simp.set_name("A " + str(i))
rep.add_representation(simp, IMP.atom.BALLS, i + 1)
return full
def test_0(self):
"""Test writing mult resolutions"""
m = IMP.Model()
h = self._create(m)
IMP.atom.show_molecular_hierarchy(h)
resolutions = IMP.atom.Representation(h).get_resolutions()
rmfname = self.get_tmp_file_name("multires.rmfz")
fh = RMF.create_rmf_file(rmfname)
IMP.rmf.add_hierarchy(fh, h)
IMP.rmf.save_frame(fh, "frame")
del fh
fh = RMF.open_rmf_file_read_only(rmfname)
RMF.show_hierarchy(fh.get_root_node())
IMP.rmf.link_hierarchies(fh, [h])
del fh
fh = RMF.open_rmf_file_read_only(rmfname)
h2 = IMP.rmf.create_hierarchies(fh, m)
rd = IMP.atom.Representation(h2[0])
back_resolutions = rd.get_resolutions()
print(back_resolutions)
for p in zip(resolutions, back_resolutions):
self.assertAlmostEqual(p[0], p[1], delta=.1)
def test_custom_resolutions(self):
"""Test writing RMF file w/ explicit resolutions is read correctly"""
m = IMP.Model()
h = self._create_custom(m)
IMP.atom.show_molecular_hierarchy(h)
resolutions = IMP.atom.Representation(h).get_resolutions()
rmfname = self.get_tmp_file_name("multires.rmfz")
fh = RMF.create_rmf_file(rmfname)
IMP.rmf.add_hierarchy(fh, h)
IMP.rmf.save_frame(fh, "frame")
del fh
fh = RMF.open_rmf_file_read_only(rmfname)
RMF.show_hierarchy(fh.get_root_node())
IMP.rmf.link_hierarchies(fh, [h])
del fh
fh = RMF.open_rmf_file_read_only(rmfname)
h2 = IMP.rmf.create_hierarchies(fh, m)
rd = IMP.atom.Representation(h2[0])
back_resolutions = rd.get_resolutions()
print(back_resolutions)
for p in zip(resolutions, back_resolutions):
self.assertAlmostEqual(p[0], p[1], delta=.1)
def test_multi_type(self):
"""Test using same particle for two representations"""
m = IMP.Model()
p = IMP.Particle(m)
center = IMP.algebra.Vector3D(0,0,0)
rad = 1.0
IMP.core.XYZR.setup_particle(p,IMP.algebra.Sphere3D(center,rad))
IMP.atom.Mass.setup_particle(p,1.0)
trans = IMP.algebra.Transformation3D(IMP.algebra.get_identity_rotation_3d(),center)
shape = IMP.algebra.Gaussian3D(IMP.algebra.ReferenceFrame3D(trans),[rad]*3)
g = IMP.core.Gaussian.setup_particle(p,shape)
root = IMP.atom.Hierarchy(IMP.Particle(m))
rep = IMP.atom.Representation.setup_particle(root, 0)
# particle is BALLS resolution 0 and DENSITIES resolution 0
root.add_child(IMP.atom.Hierarchy(p))
rep.add_representation(g,IMP.atom.DENSITIES,0)
rmfname = self.get_tmp_file_name("multitype.rmfz")
fh = RMF.create_rmf_file(rmfname)
IMP.rmf.add_hierarchy(fh, root)
IMP.rmf.save_frame(fh, "frame")
del fh
# check upon reading you get the same particle as both BALLS and DENSITIES
fh = RMF.open_rmf_file_read_only(rmfname)
h2 = IMP.rmf.create_hierarchies(fh, m)
selA = IMP.atom.Selection(h2)
selD = IMP.atom.Selection(h2,representation_type=IMP.atom.DENSITIES)
self.assertEqual(selA.get_selected_particles()[0],
selD.get_selected_particles()[0])
del fh
fh = RMF.open_rmf_file_read_only(rmfname)
IMP.rmf.link_hierarchies(fh, h2)
if __name__ == '__main__':
IMP.test.main()
| shanot/imp | modules/rmf/test/test_representations.py | Python | gpl-3.0 | 4,625 | [
"Gaussian"
] | 54892d1ea760ccf680632dfd3ecfebf89a626e5550d325f379882c9ef5887695 |
## INFO ########################################################################
## ##
## COUBLET ##
## ======= ##
## ##
## Cross-platform desktop client to follow posts from COUB ##
## Version: 0.6.93.172 (20140814) ##
## ##
## File: views/window.py ##
## ##
## Designed and written by Peter Varo. Copyright (c) 2014 ##
## License agreement is provided in the LICENSE file ##
## For more info visit: https://github.com/petervaro/coub ##
## ##
## Copyright (c) 2014 Coub Ltd and/or its suppliers and licensors, ##
## 5 Themistokli Dervi Street, Elenion Building, 1066 Nicosia, Cyprus. ##
## All rights reserved. COUB (TM) is a trademark of Coub Ltd. ##
## http://coub.com ##
## ##
######################################################################## INFO ##
# Import PyQt5 modules
from PyQt5.QtCore import Qt, QTimer, QElapsedTimer
from PyQt5.QtWidgets import (QWidget,
QFrame,
QHBoxLayout,
QVBoxLayout,
QScrollArea,
QDesktopWidget)
# Import Coublet modules
from views.vars import *
from models.cache import CACHE
from models.api import CoubAPI
from widgets.handler import CoubletMouseEventHandler
from widgets.button import (CoubletButtonWidget,
VERTICAL,
HORIZONTAL,
ICON_AND_LABEL,
LABEL_AND_ICON)
#------------------------------------------------------------------------------#
class CoubletWindowView(QWidget):
SCROLL_POSITIVE = 30
SCROLL_NEGATIVE = -SCROLL_POSITIVE
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def __init__(self, presenter, title):
super().__init__(None)
# Store static values
self._presenter = presenter
self._title = title.upper()
self.setWindowTitle(title)
self._buttons = []
self._stream = None
# Build GUI
self._build_gui()
# Overload closing and scrolling event, and rename it just
# for the sake of under-scored names ;)
self.closeEvent = self.on_exit
self.wheelEvent = self.on_mouse_scroll
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_exit(self, event):
# Get current dimension and store in cache
dim = self.geometry()
CACHE['dimension'] = dim.x(), dim.y(), dim.width(), dim.height()
# TODO: this call is at the wrong place
CACHE.save()
# Exit
event.accept()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_mouse_scroll(self, event):
# Get the "stregth" of scroll
dy = event.pixelDelta().y()
# If "hard" enoough downward
if dy < self.SCROLL_NEGATIVE:
self._presenter.load_posts()
# If "hard" enough upward
elif dy > self.SCROLL_POSITIVE:
self._presenter.sync_posts()
# Kill posts in stream which are not visible
self._presenter.reset_unseen_posts()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_menu_button_pressed(self, index):
# Report event to presenter
self._presenter.set_active_stream(index)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def remove_stream(self, index):
# Set button deselected
self._buttons[index].deselect()
# Remove stream from layout
self._posts.takeAt(1)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def set_stream(self, index, stream):
# Set button selected
self._buttons[index].select()
# Indicate change in window title too
self.setWindowTitle(
'{} | {}'.format(self._title, CoubAPI.STREAM_NAMES[index].upper()))
# Set stream to layout
self._posts.insertLayout(1, stream)
self._stream = stream
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def get_scroll_position(self):
# Get position of scroll bar
return self._scroll_area.verticalScrollBar().sliderPosition()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def set_scroll_position(self, value):
# Set position of scroll bar
self._scroll_area.verticalScrollBar().setSliderPosition(value)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def show_scroll_indicators(self, up=False, down=False):
# Place scroll indicators
up_space = down_space = POST_SPACING_FULL
if up:
self._scroll_up.show()
up_space = 0
if down:
self._scroll_down.show()
down_space = 0
# Set leading and trailing padding
self._posts.setContentsMargins(0, up_space, 0, down_space)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def hide_scroll_indicators(self, up=False, down=False):
# Remove scroll indicators
up_space = down_space = 0
if up:
self._scroll_up.hide()
up_space = POST_SPACING_FULL
if down:
self._scroll_down.hide()
down_space = POST_SPACING_FULL
# Set leading and trailing padding
self._posts.setContentsMargins(0, up_space, 0, down_space)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def _build_gui(self):
# Storages
buttons = self._buttons
# Unpack dimension data
x, y, width, height = CACHE['dimension']
# If position have not been set before
if x is NotImplemented:
screen = QDesktopWidget().screenGeometry()
x, y = (screen.width() - width) / 2, (screen.height() - height) / 2
# Set window position and dimension
self.setGeometry(x, y, width, height)
self.setFixedWidth(width)
# Create layout for the entire application and zero-out
self.layout = main_layout = QVBoxLayout()
main_layout.setSpacing(0)
main_layout.setContentsMargins(0, 0, 0, 0)
# Create and add scrollable area for streams
self._scroll_area = posts = QScrollArea()
posts.setWidgetResizable(True)
posts.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
posts.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
posts.setFrameShape(QFrame.NoFrame)
# Create a main-stream widget
main_stream = QWidget()
main_stream.setFixedWidth(width)
# TODO: rename self._posts to something meaningful
self._posts = posts_layout = QVBoxLayout()
posts_layout.setSpacing(POST_SPACING_FULL)
posts_layout.setContentsMargins(0, 0, 0, 0)
# HACK: in both scroll arrows the 'padding_left' value is a hack.
# The reason why the arrows are not aligned to the horizontal
# center is unknown as it looks like everything is set up properly
# Add scroll-up icon and text
self._scroll_up = CoubletButtonWidget(icon=CONSTANTS['icon_scroll_up'],
label='SCROLL UP TO REFRESH',
font=CONSTANTS['text_font_generic'],
palette=CONSTANTS['text_color_light'],
order=ICON_AND_LABEL,
orientation=VERTICAL,
spacing=SMALL_PADDING,
padding_top=POST_SPACING_FULL,
padding_left=8)
posts_layout.addWidget(self._scroll_up, alignment=Qt.AlignHCenter)
# Dynamic space
posts_layout.addStretch(0)
# Add scroll-down icon and text
self._scroll_down = CoubletButtonWidget(icon=CONSTANTS['icon_scroll_down'],
label='SCROLL DOWN TO LOAD MORE',
font=CONSTANTS['text_font_generic'],
palette=CONSTANTS['text_color_light'],
order=LABEL_AND_ICON,
orientation=VERTICAL,
spacing=SMALL_PADDING,
padding_bottom=POST_SPACING_FULL,
padding_left=8)
posts_layout.addWidget(self._scroll_down, alignment=Qt.AlignHCenter)
# Set posts' layout to stream, add stream to main layout
main_stream.setLayout(posts_layout)
posts.setWidget(main_stream)
main_layout.addWidget(posts)
# Create menu-bar
menu_bar = QWidget()
menu_bar.setPalette(CONSTANTS['panel_color_darker'])
menu_bar.setAutoFillBackground(True)
# Create layout for menu-bar and zero-out
menu_bar_layout = QVBoxLayout()
menu_bar_layout.setSpacing(0)
menu_bar_layout.setContentsMargins(0, 0, 0, 0)
# Create layout for menu buttons and zero-out
menu_buttons_layout = QHBoxLayout()
menu_buttons_layout.setSpacing(0)
menu_buttons_layout.setContentsMargins(0, 0, 0, 0)
# Add menu-buttons to menu-bar
menu_bar_layout.addSpacing(2*SMALL_PADDING)
menu_bar_layout.addLayout(menu_buttons_layout)
menu_bar_layout.addSpacing(2*SMALL_PADDING)
# Assign layout and add menu-bar to app
menu_bar.setLayout(menu_bar_layout)
main_layout.addWidget(menu_bar)
# Add buttons and spacess to menu-buttons layout
menu_buttons_layout.addSpacing(2*SMALL_PADDING)
# get default double-click interval
for i, menu_item in enumerate(CoubAPI.STREAM_NAMES):
# If not the first item, add
# auto-stretching before it
if i:
menu_buttons_layout.addStretch(0)
# Add menu item
icon_name = 'icon_' + menu_item
click = CoubletMouseEventHandler(l_single=lambda n=i: self.on_menu_button_pressed(n))
menu_button = CoubletButtonWidget(icon=CONSTANTS[icon_name],
icon_selected=CONSTANTS[icon_name + '_selected'],
label=menu_item.upper(),
order=ICON_AND_LABEL,
orientation=HORIZONTAL,
font=CONSTANTS['text_font_generic'],
palette=CONSTANTS['text_color_light'],
palette_selected=CONSTANTS['text_color_light_selected'],
spacing=SMALL_PADDING,
mouse_event_handler=click)
buttons.append(menu_button)
menu_buttons_layout.addWidget(menu_button)
# Tail padding
menu_buttons_layout.addSpacing(2*SMALL_PADDING)
self.setLayout(main_layout)
self.setPalette(CONSTANTS['panel_color_dark'])
| petervaro/coublet | views/window.py | Python | mit | 12,351 | [
"VisIt"
] | a2ba127c6d70fea1159e504655dbfbe8f11a34dd62366a5a54fd7635920bb91b |
import itertools
import os
import random
from collections import defaultdict
from datetime import datetime
from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple
import bmemcached
import orjson
from django.conf import settings
from django.contrib.sessions.models import Session
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandParser
from django.db.models import F, Max
from django.utils.timezone import now as timezone_now
from django.utils.timezone import timedelta as timezone_timedelta
from scripts.lib.zulip_tools import get_or_create_dev_uuid_var_path
from zerver.lib.actions import (
STREAM_ASSIGNMENT_COLORS,
build_message_send_dict,
check_add_realm_emoji,
do_change_user_role,
do_send_messages,
do_update_user_custom_profile_data_if_changed,
try_add_realm_custom_profile_field,
try_add_realm_default_custom_profile_field,
)
from zerver.lib.bulk_create import bulk_create_streams
from zerver.lib.cache import cache_set
from zerver.lib.generate_test_data import create_test_data, generate_topics
from zerver.lib.onboarding import create_if_missing_realm_internal_bots
from zerver.lib.push_notifications import logger as push_notifications_logger
from zerver.lib.server_initialization import create_internal_realm, create_users
from zerver.lib.storage import static_path
from zerver.lib.types import ProfileFieldData
from zerver.lib.url_preview.preview import CACHE_NAME as PREVIEW_CACHE_NAME
from zerver.lib.user_groups import create_user_group
from zerver.lib.users import add_service
from zerver.lib.utils import generate_api_key
from zerver.models import (
AlertWord,
Client,
CustomProfileField,
DefaultStream,
Huddle,
Message,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
Recipient,
Service,
Stream,
Subscription,
UserMessage,
UserPresence,
UserProfile,
get_client,
get_huddle,
get_realm,
get_stream,
get_user,
get_user_by_delivery_email,
get_user_profile_by_id,
)
settings.USING_TORNADO = False
# Disable using memcached caches to avoid 'unsupported pickle
# protocol' errors if `populate_db` is run with a different Python
# from `run-dev.py`.
default_cache = settings.CACHES['default']
settings.CACHES['default'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
DEFAULT_EMOJIS = [
('+1', '1f44d'),
('smiley', '1f603'),
('eyes', '1f440'),
('crying_cat_face', '1f63f'),
('arrow_up', '2b06'),
('confetti_ball', '1f38a'),
('hundred_points', '1f4af'),
]
def clear_database() -> None:
# Hacky function only for use inside populate_db. Designed to
# allow running populate_db repeatedly in series to work without
# flushing memcached or clearing the database manually.
# With `zproject.test_settings`, we aren't using real memcached
# and; we only need to flush memcached if we're populating a
# database that would be used with it (i.e. zproject.dev_settings).
if default_cache['BACKEND'] == 'django_bmemcached.memcached.BMemcached':
bmemcached.Client(
(default_cache['LOCATION'],), **default_cache['OPTIONS'],
).flush_all()
model: Any = None # Hack because mypy doesn't know these are model classes
for model in [Message, Stream, UserProfile, Recipient,
Realm, Subscription, Huddle, UserMessage, Client,
DefaultStream]:
model.objects.all().delete()
Session.objects.all().delete()
# Suppress spammy output from the push notifications logger
push_notifications_logger.disabled = True
def subscribe_users_to_streams(realm: Realm, stream_dict: Dict[str, Dict[str, Any]]) -> None:
subscriptions_to_add = []
event_time = timezone_now()
all_subscription_logs = []
profiles = UserProfile.objects.select_related().filter(realm=realm)
for i, stream_name in enumerate(stream_dict):
stream = Stream.objects.get(name=stream_name, realm=realm)
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
for profile in profiles:
# Subscribe to some streams.
s = Subscription(
recipient=recipient,
user_profile=profile,
color=STREAM_ASSIGNMENT_COLORS[i % len(STREAM_ASSIGNMENT_COLORS)])
subscriptions_to_add.append(s)
log = RealmAuditLog(realm=profile.realm,
modified_user=profile,
modified_stream=stream,
event_last_message_id=0,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time)
all_subscription_logs.append(log)
Subscription.objects.bulk_create(subscriptions_to_add)
RealmAuditLog.objects.bulk_create(all_subscription_logs)
def create_alert_words(realm_id: int) -> None:
user_ids = UserProfile.objects.filter(
realm_id=realm_id,
is_bot=False,
is_active=True,
).values_list('id', flat=True)
alert_words = [
'algorithms',
'complexity',
'founded',
'galaxy',
'grammar',
'illustrious',
'natural',
'objective',
'people',
'robotics',
'study',
]
recs: List[AlertWord] = []
for user_id in user_ids:
random.shuffle(alert_words)
for i in range(4):
recs.append(
AlertWord(
realm_id=realm_id,
user_profile_id=user_id,
word = alert_words[i],
)
)
AlertWord.objects.bulk_create(recs)
class Command(BaseCommand):
help = "Populate a test database"
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument('-n', '--num-messages',
type=int,
default=500,
help='The number of messages to create.')
parser.add_argument('-b', '--batch-size',
type=int,
default=1000,
help='How many messages to process in a single batch')
parser.add_argument('--extra-users',
type=int,
default=0,
help='The number of extra users to create')
parser.add_argument('--extra-bots',
type=int,
default=0,
help='The number of extra bots to create')
parser.add_argument('--extra-streams',
type=int,
default=0,
help='The number of extra streams to create')
parser.add_argument('--max-topics',
type=int,
help='The number of maximum topics to create')
parser.add_argument('--huddles',
dest='num_huddles',
type=int,
default=3,
help='The number of huddles to create.')
parser.add_argument('--personals',
dest='num_personals',
type=int,
default=6,
help='The number of personal pairs to create.')
parser.add_argument('--threads',
type=int,
default=1,
help='The number of threads to use.')
parser.add_argument('--percent-huddles',
type=float,
default=15,
help='The percent of messages to be huddles.')
parser.add_argument('--percent-personals',
type=float,
default=15,
help='The percent of messages to be personals.')
parser.add_argument('--stickyness',
type=float,
default=20,
help='The percent of messages to repeat recent folks.')
parser.add_argument('--nodelete',
action="store_false",
dest='delete',
help='Whether to delete all the existing messages.')
parser.add_argument('--test-suite',
action="store_true",
help='Configures populate_db to create a deterministic '
'data set for the backend tests.')
def handle(self, **options: Any) -> None:
if options["percent_huddles"] + options["percent_personals"] > 100:
self.stderr.write("Error! More than 100% of messages allocated.\n")
return
# Get consistent data for backend tests.
if options["test_suite"]:
random.seed(0)
# If max_topics is not set, we set it proportional to the
# number of messages.
if options["max_topics"] is None:
options["max_topics"] = 1 + options["num_messages"] // 100
if options["delete"]:
# Start by clearing all the data in our database
clear_database()
# Create our three default realms
# Could in theory be done via zerver.lib.actions.do_create_realm, but
# welcome-bot (needed for do_create_realm) hasn't been created yet
create_internal_realm()
zulip_realm = Realm.objects.create(
string_id="zulip", name="Zulip Dev", emails_restricted_to_domains=False,
email_address_visibility=Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS,
description="The Zulip development environment default organization."
" It's great for testing!",
invite_required=False, org_type=Realm.CORPORATE)
RealmDomain.objects.create(realm=zulip_realm, domain="zulip.com")
if options["test_suite"]:
mit_realm = Realm.objects.create(
string_id="zephyr", name="MIT", emails_restricted_to_domains=True,
invite_required=False, org_type=Realm.CORPORATE)
RealmDomain.objects.create(realm=mit_realm, domain="mit.edu")
lear_realm = Realm.objects.create(
string_id="lear", name="Lear & Co.", emails_restricted_to_domains=False,
invite_required=False, org_type=Realm.CORPORATE)
# Default to allowing all members to send mentions in
# large streams for the test suite to keep
# mention-related tests simple.
zulip_realm.wildcard_mention_policy = Realm.WILDCARD_MENTION_POLICY_MEMBERS
zulip_realm.save(update_fields=['wildcard_mention_policy'])
# Create test Users (UserProfiles are automatically created,
# as are subscriptions to the ability to receive personals).
names = [
("Zoe", "ZOE@zulip.com"),
("Othello, the Moor of Venice", "othello@zulip.com"),
("Iago", "iago@zulip.com"),
("Prospero from The Tempest", "prospero@zulip.com"),
("Cordelia Lear", "cordelia@zulip.com"),
("King Hamlet", "hamlet@zulip.com"),
("aaron", "AARON@zulip.com"),
("Polonius", "polonius@zulip.com"),
("Desdemona", "desdemona@zulip.com"),
]
# For testing really large batches:
# Create extra users with semi realistic names to make search
# functions somewhat realistic. We'll still create 1000 users
# like Extra222 User for some predicability.
num_names = options['extra_users']
num_boring_names = 300
for i in range(min(num_names, num_boring_names)):
full_name = f'Extra{i:03} User'
names.append((full_name, f'extrauser{i}@zulip.com'))
if num_names > num_boring_names:
fnames = ['Amber', 'Arpita', 'Bob', 'Cindy', 'Daniela', 'Dan', 'Dinesh',
'Faye', 'François', 'George', 'Hank', 'Irene',
'James', 'Janice', 'Jenny', 'Jill', 'John',
'Kate', 'Katelyn', 'Kobe', 'Lexi', 'Manish', 'Mark', 'Matt', 'Mayna',
'Michael', 'Pete', 'Peter', 'Phil', 'Phillipa', 'Preston',
'Sally', 'Scott', 'Sandra', 'Steve', 'Stephanie',
'Vera']
mnames = ['de', 'van', 'von', 'Shaw', 'T.']
lnames = ['Adams', 'Agarwal', 'Beal', 'Benson', 'Bonita', 'Davis',
'George', 'Harden', 'James', 'Jones', 'Johnson', 'Jordan',
'Lee', 'Leonard', 'Singh', 'Smith', 'Patel', 'Towns', 'Wall']
for i in range(num_boring_names, num_names):
fname = random.choice(fnames) + str(i)
full_name = fname
if random.random() < 0.7:
if random.random() < 0.5:
full_name += ' ' + random.choice(mnames)
full_name += ' ' + random.choice(lnames)
email = fname.lower() + '@zulip.com'
names.append((full_name, email))
create_users(zulip_realm, names, tos_version=settings.TOS_VERSION)
iago = get_user_by_delivery_email("iago@zulip.com", zulip_realm)
do_change_user_role(iago, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
iago.is_staff = True
iago.save(update_fields=['is_staff'])
desdemona = get_user_by_delivery_email("desdemona@zulip.com", zulip_realm)
do_change_user_role(desdemona, UserProfile.ROLE_REALM_OWNER, acting_user=None)
guest_user = get_user_by_delivery_email("polonius@zulip.com", zulip_realm)
guest_user.role = UserProfile.ROLE_GUEST
guest_user.save(update_fields=['role'])
# These bots are directly referenced from code and thus
# are needed for the test suite.
zulip_realm_bots = [
("Zulip Error Bot", "error-bot@zulip.com"),
("Zulip Default Bot", "default-bot@zulip.com"),
]
for i in range(options["extra_bots"]):
zulip_realm_bots.append((f'Extra Bot {i}', f'extrabot{i}@zulip.com'))
create_users(zulip_realm, zulip_realm_bots, bot_type=UserProfile.DEFAULT_BOT)
zoe = get_user_by_delivery_email("zoe@zulip.com", zulip_realm)
zulip_webhook_bots = [
("Zulip Webhook Bot", "webhook-bot@zulip.com"),
]
# If a stream is not supplied in the webhook URL, the webhook
# will (in some cases) send the notification as a PM to the
# owner of the webhook bot, so bot_owner can't be None
create_users(zulip_realm, zulip_webhook_bots,
bot_type=UserProfile.INCOMING_WEBHOOK_BOT, bot_owner=zoe)
aaron = get_user_by_delivery_email("AARON@zulip.com", zulip_realm)
zulip_outgoing_bots = [
("Outgoing Webhook", "outgoing-webhook@zulip.com"),
]
create_users(zulip_realm, zulip_outgoing_bots,
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT, bot_owner=aaron)
outgoing_webhook = get_user("outgoing-webhook@zulip.com", zulip_realm)
add_service("outgoing-webhook", user_profile=outgoing_webhook, interface=Service.GENERIC,
base_url="http://127.0.0.1:5002", token=generate_api_key())
# Add the realm internal bots to each realm.
create_if_missing_realm_internal_bots()
# Create public streams.
stream_list = ["Verona", "Denmark", "Scotland", "Venice", "Rome"]
stream_dict: Dict[str, Dict[str, Any]] = {
"Verona": {"description": "A city in Italy"},
"Denmark": {"description": "A Scandinavian country"},
"Scotland": {"description": "Located in the United Kingdom"},
"Venice": {"description": "A northeastern Italian city"},
"Rome": {"description": "Yet another Italian city", "is_web_public": True},
}
bulk_create_streams(zulip_realm, stream_dict)
recipient_streams: List[int] = [
Stream.objects.get(name=name, realm=zulip_realm).id
for name in stream_list
]
# Create subscriptions to streams. The following
# algorithm will give each of the users a different but
# deterministic subset of the streams (given a fixed list
# of users). For the test suite, we have a fixed list of
# subscriptions to make sure test data is consistent
# across platforms.
subscriptions_list: List[Tuple[UserProfile, Recipient]] = []
profiles: Sequence[UserProfile] = UserProfile.objects.select_related().filter(
is_bot=False).order_by("email")
if options["test_suite"]:
subscriptions_map = {
'AARON@zulip.com': ['Verona'],
'cordelia@zulip.com': ['Verona'],
'hamlet@zulip.com': ['Verona', 'Denmark'],
'iago@zulip.com': ['Verona', 'Denmark', 'Scotland'],
'othello@zulip.com': ['Verona', 'Denmark', 'Scotland'],
'prospero@zulip.com': ['Verona', 'Denmark', 'Scotland', 'Venice'],
'ZOE@zulip.com': ['Verona', 'Denmark', 'Scotland', 'Venice', 'Rome'],
'polonius@zulip.com': ['Verona'],
'desdemona@zulip.com': ['Verona', 'Denmark', 'Venice'],
}
for profile in profiles:
email = profile.delivery_email
if email not in subscriptions_map:
raise Exception(f'Subscriptions not listed for user {email}')
for stream_name in subscriptions_map[email]:
stream = Stream.objects.get(name=stream_name)
r = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
subscriptions_list.append((profile, r))
else:
num_streams = len(recipient_streams)
num_users = len(profiles)
for i, profile in enumerate(profiles):
# Subscribe to some streams.
fraction = float(i) / num_users
num_recips = int(num_streams * fraction) + 1
for type_id in recipient_streams[:num_recips]:
r = Recipient.objects.get(type=Recipient.STREAM, type_id=type_id)
subscriptions_list.append((profile, r))
subscriptions_to_add: List[Subscription] = []
event_time = timezone_now()
all_subscription_logs: (List[RealmAuditLog]) = []
i = 0
for profile, recipient in subscriptions_list:
i += 1
color = STREAM_ASSIGNMENT_COLORS[i % len(STREAM_ASSIGNMENT_COLORS)]
s = Subscription(
recipient=recipient,
user_profile=profile,
color=color)
subscriptions_to_add.append(s)
log = RealmAuditLog(realm=profile.realm,
modified_user=profile,
modified_stream_id=recipient.type_id,
event_last_message_id=0,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time)
all_subscription_logs.append(log)
Subscription.objects.bulk_create(subscriptions_to_add)
RealmAuditLog.objects.bulk_create(all_subscription_logs)
# Create custom profile field data
phone_number = try_add_realm_custom_profile_field(zulip_realm, "Phone number",
CustomProfileField.SHORT_TEXT,
hint='')
biography = try_add_realm_custom_profile_field(zulip_realm, "Biography",
CustomProfileField.LONG_TEXT,
hint='What are you known for?')
favorite_food = try_add_realm_custom_profile_field(zulip_realm, "Favorite food",
CustomProfileField.SHORT_TEXT,
hint="Or drink, if you'd prefer")
field_data: ProfileFieldData = {
'vim': {'text': 'Vim', 'order': '1'},
'emacs': {'text': 'Emacs', 'order': '2'},
}
favorite_editor = try_add_realm_custom_profile_field(zulip_realm,
"Favorite editor",
CustomProfileField.CHOICE,
field_data=field_data)
birthday = try_add_realm_custom_profile_field(zulip_realm, "Birthday",
CustomProfileField.DATE)
favorite_website = try_add_realm_custom_profile_field(zulip_realm, "Favorite website",
CustomProfileField.URL,
hint="Or your personal blog's URL")
mentor = try_add_realm_custom_profile_field(zulip_realm, "Mentor",
CustomProfileField.USER)
github_profile = try_add_realm_default_custom_profile_field(zulip_realm, "github")
# Fill in values for Iago and Hamlet
hamlet = get_user_by_delivery_email("hamlet@zulip.com", zulip_realm)
do_update_user_custom_profile_data_if_changed(iago, [
{"id": phone_number.id, "value": "+1-234-567-8901"},
{"id": biography.id, "value": "Betrayer of Othello."},
{"id": favorite_food.id, "value": "Apples"},
{"id": favorite_editor.id, "value": "emacs"},
{"id": birthday.id, "value": "2000-01-01"},
{"id": favorite_website.id, "value": "https://zulip.readthedocs.io/en/latest/"},
{"id": mentor.id, "value": [hamlet.id]},
{"id": github_profile.id, "value": 'zulip'},
])
do_update_user_custom_profile_data_if_changed(hamlet, [
{"id": phone_number.id, "value": "+0-11-23-456-7890"},
{
"id": biography.id,
"value": "I am:\n* The prince of Denmark\n* Nephew to the usurping Claudius",
},
{"id": favorite_food.id, "value": "Dark chocolate"},
{"id": favorite_editor.id, "value": "vim"},
{"id": birthday.id, "value": "1900-01-01"},
{"id": favorite_website.id, "value": "https://blog.zulig.org"},
{"id": mentor.id, "value": [iago.id]},
{"id": github_profile.id, "value": 'zulipbot'},
])
else:
zulip_realm = get_realm("zulip")
recipient_streams = [klass.type_id for klass in
Recipient.objects.filter(type=Recipient.STREAM)]
# Extract a list of all users
user_profiles: List[UserProfile] = list(UserProfile.objects.filter(is_bot=False))
# Create a test realm emoji.
IMAGE_FILE_PATH = static_path('images/test-images/checkbox.png')
with open(IMAGE_FILE_PATH, 'rb') as fp:
check_add_realm_emoji(zulip_realm, 'green_tick', iago, fp)
if not options["test_suite"]:
# Populate users with some bar data
for user in user_profiles:
status: int = UserPresence.ACTIVE
date = timezone_now()
client = get_client("website")
if user.full_name[0] <= 'H':
client = get_client("ZulipAndroid")
UserPresence.objects.get_or_create(user_profile=user,
realm_id=user.realm_id,
client=client,
timestamp=date,
status=status)
user_profiles_ids = [user_profile.id for user_profile in user_profiles]
# Create several initial huddles
for i in range(options["num_huddles"]):
get_huddle(random.sample(user_profiles_ids, random.randint(3, 4)))
# Create several initial pairs for personals
personals_pairs = [random.sample(user_profiles_ids, 2)
for i in range(options["num_personals"])]
create_alert_words(zulip_realm.id)
# Generate a new set of test data.
create_test_data()
# prepopulate the URL preview/embed data for the links present
# in the config.generate_data.json data set. This makes it
# possible for populate_db to run happily without Internet
# access.
with open("zerver/tests/fixtures/docs_url_preview_data.json", "rb") as f:
urls_with_preview_data = orjson.loads(f.read())
for url in urls_with_preview_data:
cache_set(url, urls_with_preview_data[url], PREVIEW_CACHE_NAME)
if options["delete"]:
if options["test_suite"]:
# Create test users; the MIT ones are needed to test
# the Zephyr mirroring codepaths.
testsuite_mit_users = [
("Fred Sipb (MIT)", "sipbtest@mit.edu"),
("Athena Consulting Exchange User (MIT)", "starnine@mit.edu"),
("Esp Classroom (MIT)", "espuser@mit.edu"),
]
create_users(mit_realm, testsuite_mit_users, tos_version=settings.TOS_VERSION)
testsuite_lear_users = [
("King Lear", "king@lear.org"),
("Cordelia Lear", "cordelia@zulip.com"),
]
create_users(lear_realm, testsuite_lear_users, tos_version=settings.TOS_VERSION)
if not options["test_suite"]:
# To keep the messages.json fixtures file for the test
# suite fast, don't add these users and subscriptions
# when running populate_db for the test suite
zulip_stream_dict: Dict[str, Dict[str, Any]] = {
"devel": {"description": "For developing"},
"all": {"description": "For **everything**"},
"announce": {"description": "For announcements",
'stream_post_policy': Stream.STREAM_POST_POLICY_ADMINS},
"design": {"description": "For design"},
"support": {"description": "For support"},
"social": {"description": "For socializing"},
"test": {"description": "For testing `code`"},
"errors": {"description": "For errors"},
"sales": {"description": "For sales discussion"},
}
# Calculate the maximum number of digits in any extra stream's
# number, since a stream with name "Extra Stream 3" could show
# up after "Extra Stream 29". (Used later to pad numbers with
# 0s).
maximum_digits = len(str(options['extra_streams'] - 1))
for i in range(options['extra_streams']):
# Pad the number with 0s based on `maximum_digits`.
number_str = str(i).zfill(maximum_digits)
extra_stream_name = 'Extra Stream ' + number_str
zulip_stream_dict[extra_stream_name] = {
"description": "Auto-generated extra stream.",
}
bulk_create_streams(zulip_realm, zulip_stream_dict)
# Now that we've created the notifications stream, configure it properly.
zulip_realm.notifications_stream = get_stream("announce", zulip_realm)
zulip_realm.save(update_fields=['notifications_stream'])
# Add a few default streams
for default_stream_name in ["design", "devel", "social", "support"]:
DefaultStream.objects.create(realm=zulip_realm,
stream=get_stream(default_stream_name, zulip_realm))
# Now subscribe everyone to these streams
subscribe_users_to_streams(zulip_realm, zulip_stream_dict)
if not options["test_suite"]:
# Update pointer of each user to point to the last message in their
# UserMessage rows with sender_id=user_profile_id.
users = list(UserMessage.objects.filter(
message__sender_id=F('user_profile_id')).values(
'user_profile_id').annotate(pointer=Max('message_id')))
for user in users:
UserProfile.objects.filter(id=user['user_profile_id']).update(
pointer=user['pointer'])
create_user_groups()
if not options["test_suite"]:
# We populate the analytics database here for
# development purpose only
call_command('populate_analytics_db')
threads = options["threads"]
jobs: List[Tuple[int, List[List[int]], Dict[str, Any], Callable[[str], int], int]] = []
for i in range(threads):
count = options["num_messages"] // threads
if i < options["num_messages"] % threads:
count += 1
jobs.append((count, personals_pairs, options, self.stdout.write, random.randint(0, 10**10)))
for job in jobs:
generate_and_send_messages(job)
if options["delete"]:
if not options['test_suite']:
# These bots are not needed by the test suite
# Also, we don't want interacting with each other
# in dev setup.
internal_zulip_users_nosubs = [
("Zulip Commit Bot", "commit-bot@zulip.com"),
("Zulip Trac Bot", "trac-bot@zulip.com"),
("Zulip Nagios Bot", "nagios-bot@zulip.com"),
]
create_users(zulip_realm, internal_zulip_users_nosubs, bot_type=UserProfile.DEFAULT_BOT)
mark_all_messages_as_read()
self.stdout.write("Successfully populated test database.\n")
def mark_all_messages_as_read() -> None:
'''
We want to keep these two flags intact after we
create messages:
has_alert_word
is_private
But we will mark all messages as read to save a step for users.
'''
# Mark all messages as read
UserMessage.objects.all().update(
flags=F('flags').bitor(UserMessage.flags.read),
)
recipient_hash: Dict[int, Recipient] = {}
def get_recipient_by_id(rid: int) -> Recipient:
if rid in recipient_hash:
return recipient_hash[rid]
return Recipient.objects.get(id=rid)
# Create some test messages, including:
# - multiple streams
# - multiple subjects per stream
# - multiple huddles
# - multiple personals converastions
# - multiple messages per subject
# - both single and multi-line content
def generate_and_send_messages(data: Tuple[int, Sequence[Sequence[int]], Mapping[str, Any],
Callable[[str], Any], int]) -> int:
(tot_messages, personals_pairs, options, output, random_seed) = data
random.seed(random_seed)
with open(os.path.join(get_or_create_dev_uuid_var_path('test-backend'),
"test_messages.json"), "rb") as infile:
dialog = orjson.loads(infile.read())
random.shuffle(dialog)
texts = itertools.cycle(dialog)
recipient_streams: List[int] = [
klass.id for klass in Recipient.objects.filter(type=Recipient.STREAM)
]
recipient_huddles: List[int] = [h.id for h in Recipient.objects.filter(type=Recipient.HUDDLE)]
huddle_members: Dict[int, List[int]] = {}
for h in recipient_huddles:
huddle_members[h] = [s.user_profile.id for s in
Subscription.objects.filter(recipient_id=h)]
# Generate different topics for each stream
possible_topics = {}
for stream_id in recipient_streams:
possible_topics[stream_id] = generate_topics(options["max_topics"])
message_batch_size = options['batch_size']
num_messages = 0
random_max = 1000000
recipients: Dict[int, Tuple[int, int, Dict[str, Any]]] = {}
messages: List[Message] = []
while num_messages < tot_messages:
saved_data: Dict[str, Any] = {}
message = Message()
message.sending_client = get_client('populate_db')
message.content = next(texts)
randkey = random.randint(1, random_max)
if (num_messages > 0 and
random.randint(1, random_max) * 100. / random_max < options["stickyness"]):
# Use an old recipient
message_type, recipient_id, saved_data = recipients[num_messages - 1]
if message_type == Recipient.PERSONAL:
personals_pair = saved_data['personals_pair']
random.shuffle(personals_pair)
elif message_type == Recipient.STREAM:
message.subject = saved_data['subject']
message.recipient = get_recipient_by_id(recipient_id)
elif message_type == Recipient.HUDDLE:
message.recipient = get_recipient_by_id(recipient_id)
elif (randkey <= random_max * options["percent_huddles"] / 100.):
message_type = Recipient.HUDDLE
message.recipient = get_recipient_by_id(random.choice(recipient_huddles))
elif (randkey <= random_max * (options["percent_huddles"] + options["percent_personals"]) / 100.):
message_type = Recipient.PERSONAL
personals_pair = random.choice(personals_pairs)
random.shuffle(personals_pair)
elif (randkey <= random_max * 1.0):
message_type = Recipient.STREAM
message.recipient = get_recipient_by_id(random.choice(recipient_streams))
if message_type == Recipient.HUDDLE:
sender_id = random.choice(huddle_members[message.recipient.id])
message.sender = get_user_profile_by_id(sender_id)
elif message_type == Recipient.PERSONAL:
message.recipient = Recipient.objects.get(type=Recipient.PERSONAL,
type_id=personals_pair[0])
message.sender = get_user_profile_by_id(personals_pair[1])
saved_data['personals_pair'] = personals_pair
elif message_type == Recipient.STREAM:
# Pick a random subscriber to the stream
message.sender = random.choice(Subscription.objects.filter(
recipient=message.recipient)).user_profile
message.subject = random.choice(possible_topics[message.recipient.id])
saved_data['subject'] = message.subject
message.date_sent = choose_date_sent(num_messages, tot_messages, options['threads'])
messages.append(message)
recipients[num_messages] = (message_type, message.recipient.id, saved_data)
num_messages += 1
if (num_messages % message_batch_size) == 0:
# Send the batch and empty the list:
send_messages(messages)
messages = []
if len(messages) > 0:
# If there are unsent messages after exiting the loop, send them:
send_messages(messages)
return tot_messages
def send_messages(messages: List[Message]) -> None:
# We disable USING_RABBITMQ here, so that deferred work is
# executed in do_send_message_messages, rather than being
# queued. This is important, because otherwise, if run-dev.py
# wasn't running when populate_db was run, a developer can end
# up with queued events that reference objects from a previous
# life of the database, which naturally throws exceptions.
settings.USING_RABBITMQ = False
message_dict_list = []
for message in messages:
message_dict = build_message_send_dict({'message': message})
message_dict_list.append(message_dict)
do_send_messages(message_dict_list)
bulk_create_reactions(messages)
settings.USING_RABBITMQ = True
def get_message_to_users(message_ids: List[int]) -> Dict[int, List[int]]:
rows = UserMessage.objects.filter(
message_id__in=message_ids,
).values("message_id", "user_profile_id")
result: Dict[int, List[int]] = defaultdict(list)
for row in rows:
result[row["message_id"]].append(row["user_profile_id"])
return result
def bulk_create_reactions(all_messages: List[Message]) -> None:
reactions: List[Reaction] = []
num_messages = int(0.2 * len(all_messages))
messages = random.sample(all_messages, num_messages)
message_ids = [message.id for message in messages]
message_to_users = get_message_to_users(message_ids)
for message_id in message_ids:
msg_user_ids = message_to_users[message_id]
if msg_user_ids:
# Now let between 1 and 7 users react.
#
# Ideally, we'd make exactly 1 reaction more common than
# this algorithm generates.
max_num_users = min(7, len(msg_user_ids))
num_users = random.randrange(1, max_num_users + 1)
user_ids = random.sample(msg_user_ids, num_users)
for user_id in user_ids:
# each user does between 1 and 3 emojis
num_emojis = random.choice([1, 2, 3])
emojis = random.sample(DEFAULT_EMOJIS, num_emojis)
for emoji_name, emoji_code in emojis:
reaction = Reaction(
user_profile_id=user_id,
message_id=message_id,
emoji_name=emoji_name,
emoji_code=emoji_code,
reaction_type=Reaction.UNICODE_EMOJI
)
reactions.append(reaction)
Reaction.objects.bulk_create(reactions)
def choose_date_sent(num_messages: int, tot_messages: int, threads: int) -> datetime:
# Spoofing time not supported with threading
if threads != 1:
return timezone_now()
# Distrubutes 80% of messages starting from 5 days ago, over a period
# of 3 days. Then, distributes remaining messages over past 24 hours.
amount_in_first_chunk = int(tot_messages * 0.8)
amount_in_second_chunk = tot_messages - amount_in_first_chunk
if (num_messages < amount_in_first_chunk):
# Distribute starting from 5 days ago, over a period
# of 3 days:
spoofed_date = timezone_now() - timezone_timedelta(days = 5)
interval_size = 3 * 24 * 60 * 60 / amount_in_first_chunk
lower_bound = interval_size * num_messages
upper_bound = interval_size * (num_messages + 1)
else:
# We're in the last 20% of messages, distribute them over the last 24 hours:
spoofed_date = timezone_now() - timezone_timedelta(days = 1)
interval_size = 24 * 60 * 60 / amount_in_second_chunk
lower_bound = interval_size * (num_messages - amount_in_first_chunk)
upper_bound = interval_size * (num_messages - amount_in_first_chunk + 1)
offset_seconds = random.uniform(lower_bound, upper_bound)
spoofed_date += timezone_timedelta(seconds=offset_seconds)
return spoofed_date
def create_user_groups() -> None:
zulip = get_realm('zulip')
members = [get_user_by_delivery_email('cordelia@zulip.com', zulip),
get_user_by_delivery_email('hamlet@zulip.com', zulip)]
create_user_group("hamletcharacters", members, zulip,
description="Characters of Hamlet")
| kou/zulip | zilencer/management/commands/populate_db.py | Python | apache-2.0 | 41,025 | [
"Amber",
"Galaxy"
] | a7094855b5af6be01bbbf6da5fa207bca05e72f8fc60f381826b9b4b3b4a454c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.