text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
import os
import unittest
import pysal
import numpy as np
class Testuser(unittest.TestCase):
def setUp(self):
self.wq = pysal.queen_from_shapefile(
pysal.examples.get_path("columbus.shp"))
self.wr = pysal.rook_from_shapefile(
pysal.examples.get_path("columbus.shp"))
def test_queen_from_shapefile(self):
self.assertAlmostEquals(self.wq.pct_nonzero, 9.82923781757601)
def test_rook_from_shapefile(self):
self.assertAlmostEquals(self.wr.pct_nonzero, 8.329862557267806)
def test_knnW_from_array(self):
import numpy as np
x, y = np.indices((5, 5))
x.shape = (25, 1)
y.shape = (25, 1)
data = np.hstack([x, y])
wnn2 = pysal.knnW_from_array(data, k=2)
wnn4 = pysal.knnW_from_array(data, k=4)
self.assertEquals(set(wnn4.neighbors[0]), set([1, 5, 6, 2]))
self.assertEquals(set(wnn4.neighbors[5]), set([0, 6, 10, 1]))
self.assertEquals(set(wnn2.neighbors[0]), set([1, 5]))
self.assertEquals(set(wnn2.neighbors[5]), set([0, 6]))
self.assertAlmostEquals(wnn2.pct_nonzero, 8.0)
self.assertAlmostEquals(wnn4.pct_nonzero, 16.0)
wnn4 = pysal.knnW_from_array(data, k=4)
self.assertEquals(set(wnn4.neighbors[0]), set([1, 5, 6, 2]))
'''
wnn3e = pysal.knnW(data, p=2, k=3)
self.assertEquals(set(wnn3e.neighbors[0]),set([1, 5, 6]))
wnn3m = pysal.knnW(data, p=1, k=3)
self.assertEquals(set(wnn3m.neighbors[0]), set([1, 5, 2]))
'''
def test_knnW_from_shapefile(self):
wc = pysal.knnW_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.assertAlmostEquals(wc.pct_nonzero, 4.081632653061225)
wc3 = pysal.knnW_from_shapefile(pysal.examples.get_path(
"columbus.shp"), k=3)
self.assertEquals(wc3.weights[1], [1, 1, 1])
self.assertEquals(set(wc3.neighbors[1]), set([3, 0, 7]))
self.assertEquals(set(wc.neighbors[0]), set([2, 1]))
w = pysal.knnW_from_shapefile(pysal.examples.get_path('juvenile.shp'))
self.assertAlmostEquals(w.pct_nonzero, 1.1904761904761904)
w1 = pysal.knnW_from_shapefile(
pysal.examples.get_path('juvenile.shp'), k=1)
self.assertAlmostEquals(w1.pct_nonzero, 0.5952380952380952)
def test_threshold_binaryW_from_array(self):
points = [(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
w = pysal.threshold_binaryW_from_array(points, threshold=11.2)
self.assertEquals(w.weights, {0: [1, 1], 1: [1, 1], 2: [],
3: [1, 1], 4: [1], 5: [1]})
self.assertEquals(w.neighbors, {0: [1, 3], 1: [0, 3], 2: [
], 3: [1, 0], 4: [5], 5: [4]})
def test_threshold_binaryW_from_shapefile(self):
w = pysal.threshold_binaryW_from_shapefile(pysal.examples.get_path(
"columbus.shp"), 0.62, idVariable="POLYID")
self.assertEquals(w.weights[1], [1, 1])
def test_threshold_continuousW_from_array(self):
points = [(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
wid = pysal.threshold_continuousW_from_array(points, 11.2)
self.assertEquals(wid.weights[0], [0.10000000000000001,
0.089442719099991588])
wid2 = pysal.threshold_continuousW_from_array(points, 11.2, alpha=-2.0)
self.assertEquals(wid2.weights[0], [0.01, 0.0079999999999999984])
def test_threshold_continuousW_from_shapefile(self):
w = pysal.threshold_continuousW_from_shapefile(pysal.examples.get_path(
"columbus.shp"), 0.62, idVariable="POLYID")
self.assertEquals(
w.weights[1], [1.6702346893743334, 1.7250729841938093])
def test_kernelW(self):
points = [(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
kw = pysal.kernelW(points)
self.assertEquals(kw.weights[0], [1.0, 0.50000004999999503,
0.44098306152674649])
self.assertEquals(kw.neighbors[0], [0, 1, 3])
np.testing.assert_array_almost_equal(
kw.bandwidth, np.array([[20.000002],
[20.000002],
[20.000002],
[20.000002],
[20.000002],
[20.000002]]))
def test_min_threshold_dist_from_shapefile(self):
f = pysal.examples.get_path('columbus.shp')
min_d = pysal.min_threshold_dist_from_shapefile(f)
self.assertAlmostEquals(min_d, 0.61886415807685413)
def test_kernelW_from_shapefile(self):
kw = pysal.kernelW_from_shapefile(pysal.examples.get_path(
'columbus.shp'), idVariable='POLYID')
self.assertEquals(set(kw.weights[1]), set([0.0070787731484506233,
0.2052478782400463,
0.23051223027663237,
1.0
]))
np.testing.assert_array_almost_equal(
kw.bandwidth[:3], np.array([[0.75333961], [0.75333961],
[0.75333961]]))
def test_adaptive_kernelW(self):
points = [(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
bw = [25.0, 15.0, 25.0, 16.0, 14.5, 25.0]
kwa = pysal.adaptive_kernelW(points, bandwidths=bw)
self.assertEqual(kwa.weights[0], [1.0, 0.59999999999999998,
0.55278640450004202,
0.10557280900008403])
self.assertEqual(kwa.neighbors[0], [0, 1, 3, 4])
np.testing.assert_array_almost_equal(kwa.bandwidth,
np.array([[25.], [15.], [25.],
[16.], [14.5], [25.]]))
kweag = pysal.adaptive_kernelW(points, function='gaussian')
self.assertEqual(
kweag.weights[0], [0.3989422804014327, 0.26741902915776961,
0.24197074871621341])
np.testing.assert_array_almost_equal(kweag.bandwidth,
np.array([[11.18034101],
[11.18034101],
[20.000002],
[11.18034101],
[14.14213704],
[18.02775818]]))
def test_adaptive_kernelW_from_shapefile(self):
kwa = pysal.adaptive_kernelW_from_shapefile(
pysal.examples.get_path('columbus.shp'))
self.assertEquals(kwa.weights[0], [1.0, 0.03178906767736345,
9.9999990066379496e-08])
np.testing.assert_array_almost_equal(kwa.bandwidth[:3],
np.array([[0.59871832],
[0.59871832],
[0.56095647]]))
def test_build_lattice_shapefile(self):
of = "lattice.shp"
pysal.build_lattice_shapefile(20, 20, of)
w = pysal.rook_from_shapefile(of)
self.assertEquals(w.n, 400)
os.remove('lattice.shp')
os.remove('lattice.shx')
suite = unittest.TestLoader().loadTestsFromTestCase(Testuser)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| chhao91/pysal | pysal/weights/tests/test_user.py | Python | bsd-3-clause | 7,673 | [
"COLUMBUS",
"Gaussian"
] | 00d4ef9f7933f0c64a06199e96d4ae634eab7597038a7c6283db90bc3e2fd13f |
#!/usr/bin/env python
# encoding: utf-8
'''
Created by Brian Cherinka on 2016-04-08 14:31:34
Licensed under a 3-clause BSD license.
Revision History:
Initial Version: 2016-04-08 14:31:34 by Brian Cherinka
Last Modified On: 2016-04-08 14:31:34 by Brian
'''
from __future__ import print_function
from __future__ import division
from flask import Blueprint, render_template, session as current_session, request, jsonify
from flask_classy import FlaskView, route
from brain.api.base import processRequest
from marvin import marvindb
from marvin.utils.general.general import (convertImgCoords, parseIdentifier, getDefaultMapPath,
getDapRedux, _db_row_to_dict, get_plot_params)
from brain.utils.general.general import convertIvarToErr
from marvin.core.exceptions import MarvinError
from marvin.tools.cube import Cube
from marvin.tools.maps import _get_bintemps, _get_bintype, _get_template_kin
from marvin.utils.dap.datamodel import get_dap_maplist, get_default_mapset
from marvin.web.web_utils import parseSession
from marvin.web.controllers import BaseWebView
from marvin.api.base import arg_validate as av
from marvin.core.caching_query import FromCache
from marvin.core import marvin_pickle
from collections import OrderedDict
import os
import numpy as np
try:
from sdss_access.path import Path
except ImportError as e:
Path = None
galaxy = Blueprint("galaxy_page", __name__)
def getWebSpectrum(cube, x, y, xyorig=None, byradec=False):
''' Get and format a spectrum for the web '''
webspec = None
try:
if byradec:
spaxel = cube.getSpaxel(ra=x, dec=y, xyorig=xyorig, modelcube=True, properties=False)
else:
spaxel = cube.getSpaxel(x=x, y=y, xyorig=xyorig, modelcube=True, properties=False)
except Exception as e:
specmsg = 'Could not get spaxel: {0}'.format(e)
else:
# get error and wavelength
error = convertIvarToErr(spaxel.spectrum.ivar)
wave = spaxel.spectrum.wavelength
# try to get the model flux
try:
modelfit = spaxel.model.flux
except Exception as e:
modelfit = None
# make input array for Dygraph
if not isinstance(modelfit, type(None)):
webspec = [[wave[i], [s, error[i]], [modelfit[i], 0.0]] for i, s in enumerate(spaxel.spectrum.flux)]
else:
webspec = [[wave[i], [s, error[i]]] for i, s in enumerate(spaxel.spectrum.flux)]
specmsg = "Spectrum in Spaxel ({2},{3}) at RA, Dec = ({0}, {1})".format(x, y, spaxel.x, spaxel.y)
return webspec, specmsg
def getWebMap(cube, parameter='emline_gflux', channel='ha_6564',
bintype=None, template_kin=None, template_pop=None):
''' Get and format a map for the web '''
name = '{0}_{1}'.format(parameter.lower(), channel)
webmap = None
try:
maps = cube.getMaps(plateifu=cube.plateifu, mode='local',
bintype=bintype, template_kin=template_kin)
data = maps.getMap(parameter, channel=channel)
except Exception as e:
mapmsg = 'Could not get map: {0}'.format(e)
else:
vals = data.value
ivar = data.ivar
mask = data.mask
webmap = {'values': [it.tolist() for it in data.value],
'ivar': [it.tolist() for it in data.ivar] if data.ivar is not None else None,
'mask': [it.tolist() for it in data.mask] if data.mask is not None else None}
mapmsg = "{0}: {1}-{2}".format(name, maps.bintype, maps.template_kin)
return webmap, mapmsg
def buildMapDict(cube, params, dapver, bintemp=None):
''' Build a list of dictionaries of maps
params - list of string parameter names in form of category_channel
NOT GENERALIZED
'''
# split the bintemp
if bintemp:
bintype, temp = bintemp.split('-', 1)
else:
bintype, temp = (None, None)
mapdict = []
params = params if type(params) == list else [params]
for param in params:
param = str(param)
try:
parameter, channel = param.split(':')
except ValueError as e:
parameter, channel = (param, None)
webmap, mapmsg = getWebMap(cube, parameter=parameter, channel=channel,
bintype=bintype, template_kin=temp)
plotparams = get_plot_params(dapver=dapver, prop=parameter)
mapdict.append({'data': webmap, 'msg': mapmsg, 'plotparams': plotparams})
anybad = [m['data'] is None for m in mapdict]
if any(anybad):
raise MarvinError('Could not get map for one of supplied parameters')
return mapdict
def make_nsa_dict(nsa, cols=None):
''' Make/rearrange the nsa dictionary of values '''
# get columns
if not cols:
cols = [k for k in nsa.keys() if 'stokes' not in k]
cols.sort()
# make dictionary
nsadict = {c: nsa[c] for c in cols}
nsadict.update({'elpetro_absmag_i': nsadict['elpetro_absmag'][5]})
nsadict.update({'elpetro_mtol_i': nsadict['elpetro_mtol'][5]})
cols.append('elpetro_absmag_i')
cols.append('elpetro_mtol_i')
cols.sort()
return nsadict, cols
def get_nsa_dict(name, drpver):
''' Gets a NSA dictionary from a pickle or a query '''
nsapath = os.environ.get('MANGA_SCRATCH_DIR', None)
if nsapath and os.path.isdir(nsapath):
nsapath = nsapath
else:
nsapath = os.path.expanduser('~')
nsaroot = os.path.join(nsapath, 'nsa_pickles')
if not os.path.isdir(nsaroot):
os.makedirs(nsaroot)
picklename = '{0}.pickle'.format(name)
nsapickle_file = os.path.join(nsaroot, picklename)
if os.path.isfile(nsapickle_file):
nsadict = marvin_pickle.restore(nsapickle_file)
else:
# make from scratch from db
#nsacache = 'nsa_mpl5' if drpver == 'v2_0_1' else 'nsa_mpl4' if drpver == 'v1_5_1' else None
session = marvindb.session
sampledb = marvindb.sampledb
allnsa = session.query(sampledb.NSA, marvindb.datadb.Cube.plateifu).\
join(sampledb.MangaTargetToNSA, sampledb.MangaTarget,
marvindb.datadb.Cube, marvindb.datadb.PipelineInfo,
marvindb.datadb.PipelineVersion, marvindb.datadb.IFUDesign).\
filter(marvindb.datadb.PipelineVersion.version == drpver).options(FromCache(name)).all()
nsadict = [(_db_row_to_dict(n[0], remove_columns=['pk', 'catalogue_pk']), n[1]) for n in allnsa]
return nsadict
def remove_nans(datadict):
''' Removes objects with nan values from the NSA sample dictionary '''
# collect total unique indices of nan objects
allnans = np.array([])
for key, vals in datadict.items():
if key != 'plateifu':
naninds = np.where(np.isnan(vals))[0]
allnans = np.append(allnans, naninds)
allnans = list(set(allnans))
# delete those targets from all items in the dictionary
for key, vals in datadict.items():
datadict[key] = np.delete(np.asarray(vals), allnans).tolist()
return datadict
class Galaxy(BaseWebView):
route_base = '/galaxy/'
def __init__(self):
''' Initialize the route '''
super(Galaxy, self).__init__('marvin-galaxy')
self.galaxy = self.base.copy()
self.galaxy['cube'] = None
self.galaxy['image'] = ''
self.galaxy['spectra'] = 'null'
self.galaxy['maps'] = None
self.galaxy['specmsg'] = None
self.galaxy['mapmsg'] = None
self.galaxy['toggleon'] = 'false'
self.galaxy['nsamsg'] = None
self.galaxy['nsachoices'] = {'1': {'y': 'z', 'x': 'elpetro_logmass', 'xtitle': 'Stellar Mass',
'ytitle': 'Redshift', 'title': 'Redshift vs Stellar Mass'},
'2': {'y': 'elpetro_absmag_g_r', 'x': 'elpetro_absmag_i', 'xtitle': 'AbsMag_i',
'ytitle': 'Abs. g-r', 'title': 'Abs. g-r vs Abs. Mag i'}
}
# self.galaxy['nsachoices'] = {'1': {'y': 'z', 'x': 'sersic_mass', 'xtitle': 'Stellar Mass',
# 'ytitle': 'Redshift', 'title': 'Redshift vs Stellar Mass'}}
# cols = ['z', 'sersic_logmass', 'sersic_n', 'sersic_absmag', 'elpetro_mag_g_r', 'elpetro_th50_r']
self.galaxy['nsaplotcols'] = ['z', 'elpetro_logmass', 'sersic_n', 'elpetro_absmag_i', 'elpetro_absmag_g_r',
'elpetro_th50_r', 'elpetro_absmag_u_r', 'elpetro_absmag_i_z', 'elpetro_ba',
'elpetro_phi', 'elpetro_mtol_i', 'elpetro_th90_r']
def before_request(self, *args, **kwargs):
''' Do these things before a request to any route '''
super(Galaxy, self).before_request(*args, **kwargs)
self.reset_dict(self.galaxy, exclude=['nsachoices', 'nsaplotcols'])
def index(self):
''' Main galaxy page '''
self.galaxy['error'] = 'Not all there are you... Try adding a plate-IFU or manga-ID to the end of the address.'
return render_template("galaxy.html", **self.galaxy)
def get(self, galid):
''' Retrieve info for a given cube '''
# determine type of galid
args = av.manual_parse(self, request, use_params='galaxy')
self.galaxy['id'] = args['galid']
idtype = parseIdentifier(galid)
if idtype in ['plateifu', 'mangaid']:
# set plateifu or mangaid
self.galaxy['idtype'] = idtype
galaxyid = {self.galaxy['idtype']: galid, 'release': self._release}
# Get cube
try:
cube = Cube(**galaxyid)
except MarvinError as e:
self.galaxy['cube'] = None
self.galaxy['error'] = 'MarvinError: {0}'.format(e)
return render_template("galaxy.html", **self.galaxy)
else:
self.galaxy['cube'] = cube
self.galaxy['daplink'] = getDapRedux(release=self._release)
# get SAS url links to cube, rss, maps, image
if Path:
sdss_path = Path()
self.galaxy['image'] = sdss_path.url('mangaimage', drpver=cube._drpver, plate=cube.plate, ifu=cube.ifu, dir3d=cube.dir3d)
cubelink = sdss_path.url('mangacube', drpver=cube._drpver, plate=cube.plate, ifu=cube.ifu)
rsslink = sdss_path.url('mangarss', drpver=cube._drpver, plate=cube.plate, ifu=cube.ifu)
maplink = getDefaultMapPath(release=self._release, plate=cube.plate, ifu=cube.ifu, daptype='SPX-GAU-MILESHC', mode='MAPS')
self.galaxy['links'] = {'cube': cubelink, 'rss': rsslink, 'map': maplink}
else:
self.galaxy['image'] = cube.data.image
# Get the initial spectrum
if cube:
daplist = get_dap_maplist(self._dapver, web=True)
self.galaxy['cube'] = cube
self.galaxy['toggleon'] = current_session.get('toggleon', 'false')
self.galaxy['cubehdr'] = cube.header
self.galaxy['quality'] = cube.qualitybit
self.galaxy['mngtarget'] = cube.targetbit
# make the nsa dictionary
hasnsa = cube.nsa is not None
self.galaxy['hasnsa'] = hasnsa
if hasnsa:
cols = self.galaxy.get('nsaplotcols')
nsadict, nsacols = make_nsa_dict(cube.nsa)
nsatmp = [nsacols.pop(nsacols.index(i)) for i in cols]
nsatmp.extend(nsacols)
self.galaxy['nsacols'] = nsatmp
self.galaxy['nsadict'] = nsadict
self.galaxy['dapmaps'] = daplist
self.galaxy['dapbintemps'] = _get_bintemps(self._dapver)
current_session['bintemp'] = '{0}-{1}'.format(_get_bintype(self._dapver), _get_template_kin(self._dapver))
# TODO - make this general - see also search.py for querystr
self.galaxy['cubestr'] = ("<html><samp>from marvin.tools.cube import Cube<br>cube = \
Cube(plateifu='{0}')<br># access the header<br>cube.header<br># get NSA data<br>\
cube.nsa<br></samp></html>".format(cube.plateifu))
self.galaxy['spaxelstr'] = ("<html><samp>from marvin.tools.cube import Cube<br>cube = \
Cube(plateifu='{0}')<br># get a spaxel<br>spaxel=cube[16, 16]<br>spec = \
spaxel.spectrum<br>wave = spectrum.wavelength<br>flux = spectrum.flux<br>ivar = \
spectrum.ivar<br>mask = spectrum.mask<br>spec.plot()<br></samp></html>".format(cube.plateifu))
self.galaxy['mapstr'] = ("<html><samp>from marvin.tools.maps import Maps<br>maps = \
Maps(plateifu='{0}')<br>print(maps)<br># get an emission \
line map<br>haflux = maps.getMap('emline_gflux', channel='ha_6564')<br>values = \
haflux.value<br>ivar = haflux.ivar<br>mask = haflux.mask<br>haflux.plot()<br>\
</samp></html>".format(cube.plateifu))
else:
self.galaxy['error'] = 'Error: Galaxy ID {0} must either be a Plate-IFU, or MaNGA-Id designation.'.format(galid)
return render_template("galaxy.html", **self.galaxy)
return render_template("galaxy.html", **self.galaxy)
@route('/initdynamic/', methods=['POST'], endpoint='initdynamic')
def initDynamic(self):
''' Route to run when the dynamic toggle is initialized
This creates the web spectrum and dap heatmaps
'''
# get the form parameters
args = av.manual_parse(self, request, use_params='galaxy', required='plateifu')
#self._drpver, self._dapver, self._release = parseSession()
# turning toggle on
current_session['toggleon'] = args.get('toggleon')
# get the cube
cubeinputs = {'plateifu': args.get('plateifu'), 'release': self._release}
cube = Cube(**cubeinputs)
output = {'specstatus': -1, 'mapstatus': -1}
# get web spectrum
webspec, specmsg = getWebSpectrum(cube, cube.ra, cube.dec, byradec=True)
daplist = get_dap_maplist(self._dapver, web=True)
dapdefaults = get_default_mapset(self._dapver)
# build the uber map dictionary
try:
mapdict = buildMapDict(cube, dapdefaults, self._dapver)
mapmsg = None
except Exception as e:
mapdict = [{'data': None, 'msg': 'Error', 'plotparams': None} for m in dapdefaults]
mapmsg = 'Error getting maps: {0}'.format(e)
else:
output['mapstatus'] = 1
if not webspec:
output['error'] = 'Error: {0}'.format(specmsg)
else:
output['specstatus'] = 1
sdss_path = Path()
output['image'] = sdss_path.url('mangaimage', drpver=cube._drpver, plate=cube.plate, ifu=cube.ifu, dir3d=cube.dir3d)
output['spectra'] = webspec
output['specmsg'] = specmsg
output['maps'] = mapdict
output['mapmsg'] = mapmsg
output['dapmaps'] = daplist
output['dapbintemps'] = _get_bintemps(self._dapver)
current_session['bintemp'] = '{0}-{1}'.format(_get_bintype(self._dapver), _get_template_kin(self._dapver))
return jsonify(result=output)
@route('/getspaxel/', methods=['POST'], endpoint='getspaxel')
def getSpaxel(self):
args = av.manual_parse(self, request, use_params='galaxy', required=['plateifu', 'type'], makemulti=True)
#self._drpver, self._dapver, self._release = parseSession()
cubeinputs = {'plateifu': args.get('plateifu'), 'release': self._release}
maptype = args.get('type', None)
if maptype == 'optical':
# for now, do this, but TODO - general processRequest to handle lists and not lists
try:
mousecoords = args.getlist('mousecoords[]', type=float)
except Exception as e:
mousecoords = None
if mousecoords:
pixshape = (args.get('imwidth', type=int), args.get('imheight', type=int))
if (mousecoords[0] < 0 or mousecoords[0] > pixshape[0]) or (mousecoords[1] < 0 or mousecoords[1] > pixshape[1]):
output = {'specmsg': 'Error: requested pixel coords are outside the image range.', 'status': -1}
self.galaxy['error'] = output['specmsg']
else:
# TODO - generalize image file sas_url to filesystem switch, maybe in sdss_access
infile = os.path.join(os.getenv('MANGA_SPECTRO_REDUX'), args.get('image').split('redux/')[1])
arrcoords = convertImgCoords(mousecoords, infile, to_radec=True)
cube = Cube(**cubeinputs)
webspec, specmsg = getWebSpectrum(cube, arrcoords[0], arrcoords[1], byradec=True)
if not webspec:
self.galaxy['error'] = 'Error: {0}'.format(specmsg)
status = -1
else:
status = 1
msg = 'gettin some spaxel at RA/Dec {0}'.format(arrcoords)
output = {'message': msg, 'specmsg': specmsg, 'spectra': webspec, 'status': status}
else:
output = {'specmsg': 'Error getting mouse coords', 'status': -1}
self.galaxy['error'] = output['specmsg']
elif maptype == 'heatmap':
# grab spectrum based on (x, y) coordinates
x = args.get('x', None, type=int)
y = args.get('y', None, type=int)
if all([x, y]):
cube = Cube(**cubeinputs)
webspec, specmsg = getWebSpectrum(cube, x, y, xyorig='lower')
msg = 'gettin some spaxel with (x={0}, y={1})'.format(x, y)
if not webspec:
self.galaxy['error'] = 'Error: {0}'.format(specmsg)
status = -1
else:
status = 1
output = {'message': msg, 'specmsg': specmsg, 'spectra': webspec, 'status': status}
else:
output = {'specmsg': 'Error: X or Y not specified for map', 'status': -1}
self.galaxy['error'] = output['specmsg']
else:
output = {'specmsg': 'Error: No maptype specified in request', 'status': -1}
self.galaxy['error'] = output['specmsg']
return jsonify(result=output)
@route('/updatemaps/', methods=['POST'], endpoint='updatemaps')
def updateMaps(self):
args = av.manual_parse(self, request, use_params='galaxy', required=['plateifu', 'bintemp', 'params[]'], makemulti=True)
#self._drpver, self._dapver, self._release = parseSession()
cubeinputs = {'plateifu': args.get('plateifu'), 'release': self._release}
params = args.getlist('params[]', type=str)
bintemp = args.get('bintemp', None, type=str)
current_session['bintemp'] = bintemp
# get cube (self.galaxy['cube'] does not work)
try:
cube = Cube(**cubeinputs)
except Exception as e:
cube = None
# Try to make the web maps
if not cube:
output = {'mapmsg': 'No cube found', 'maps': None, 'status': -1}
elif not params:
output = {'mapmsg': 'No parameters selected', 'maps': None, 'status': -1}
else:
try:
mapdict = buildMapDict(cube, params, self._dapver, bintemp=bintemp)
except Exception as e:
output = {'mapmsg': e.message, 'status': -1, 'maps': None}
else:
output = {'mapmsg': None, 'status': 1, 'maps': mapdict}
return jsonify(result=output)
@route('/initnsaplot/', methods=['POST'], endpoint='initnsaplot')
def init_nsaplot(self):
args = av.manual_parse(self, request, use_params='galaxy', required='plateifu')
#self._drpver, self._dapver, self._release = parseSession()
print('args', args)
cubeinputs = {'plateifu': args.get('plateifu'), 'release': self._release}
# get the default nsa choices
nsachoices = self.galaxy.get('nsachoices', None)
if not nsachoices:
nsachoices = {'1': {'y': 'z', 'x': 'elpetro_logmass', 'xtitle': 'Stellar Mass',
'ytitle': 'Redshift', 'title': 'Redshift vs Stellar Mass'},
'2': {'y': 'elpetro_absmag_g_r', 'x': 'elpetro_absmag_i', 'xtitle': 'AbsMag_i',
'ytitle': 'Abs. g-r', 'title': 'Abs. g-r vs Abs. Mag i'}}
# get cube (self.galaxy['cube'] does not work)
try:
cube = Cube(**cubeinputs)
except Exception as e:
cube = None
# get some nsa params
if not cube:
output = {'nsamsg': 'No cube found', 'nsa': None, 'status': -1}
else:
# get the galaxy nsa parameters
cols = self.galaxy.get('nsaplotcols')
try:
nsadict, nsacols = make_nsa_dict(cube.nsa)
nsa = {args.get('plateifu'): nsadict}
except Exception as e:
output = {'nsamsg': e.message, 'status': -1, 'nsa': None}
else:
# get the sample nsa parameters
try:
nsacache = 'nsa_mpl5' if self._drpver == 'v2_0_1' else 'nsa_mpl4' if self._drpver == 'v1_5_1' else None
nsadict = get_nsa_dict(nsacache, self._drpver)
except Exception as e:
output = {'nsamsg': 'Failed to retrieve sample NSA: {0}'.format(e), 'status': -1, 'nsa': nsa, 'nsachoices': nsachoices}
else:
#nsadict = [(_db_row_to_dict(n[0], remove_columns=['pk', 'catalogue_pk']), n[1]) for n in allnsa]
nsasamp = {c: [n[0][c.split('_i')[0]][5] if 'absmag_i' in c or 'mtol_i' in c else n[0][c] for n in nsadict] for c in cols}
nsasamp['plateifu'] = [n[1] for n in nsadict]
nsasamp = remove_nans(nsasamp)
nsa['sample'] = nsasamp
output = {'nsamsg': None, 'status': 1, 'nsa': nsa, 'nsachoices': nsachoices, 'nsaplotcols': cols}
return jsonify(result=output)
Galaxy.register(galaxy)
| bretthandrews/marvin | python/marvin/web/controllers/galaxy.py | Python | bsd-3-clause | 22,585 | [
"Brian",
"Galaxy"
] | f9b8894eb291f75ce700b276826841f4ac6726cd5a2fe527d90c3c844bb82e8b |
from unittest.mock import MagicMock
import numpy as np
import pytest
from emukit.bayesian_optimization.acquisitions.expected_improvement import (
ExpectedImprovement,
MeanPluginExpectedImprovement,
)
from emukit.core.interfaces import IModel, IModelWithNoise
from emukit.model_wrappers import GPyModelWrapper
class MockIModel(IModel):
def __init__(self, X, Y):
self._X = X
self._Y = Y
@property
def X(self):
return self._X
@property
def Y(self):
return self._Y
def deterministic_test_func(x: np.ndarray) -> np.ndarray:
return np.sin(x * 30 + x ** 2).sum(axis=-1, keepdims=True)
class MockNoiselessModel(MockIModel, IModelWithNoise):
"""
A mock model with zero observation noise (predict() and predict_noiseless() will return the
same predictive distribution).
This model mocks predictions for the deterministic_test_func() (the mean prediction will
be the same as function output).
"""
@staticmethod
def _mean_func(X):
return deterministic_test_func(X)
@staticmethod
def _var_func(X):
return (np.cos(X * 10) + 1.2).sum(axis=-1, keepdims=True)
def predict(self, X):
return self._mean_func(X), self._var_func(X)
def predict_noiseless(self, X):
return self.predict(X)
class MockConstantModel(MockIModel, IModelWithNoise):
"""Model the predicts the same output distribution everywhere"""
def predict(self, X):
# Return mean 1 and variance 8
return np.ones([X.shape[0], 1]), 8 * np.ones([X.shape[0], 1])
def predict_noiseless(self, X):
# Return mean 1 and variance 1
return np.ones([X.shape[0], 1]), np.ones([X.shape[0], 1])
def test_mean_plugin_ei_same_as_standard_on_noiseless():
np.random.seed(42)
X = np.random.randn(10, 3)
Y = deterministic_test_func(X)
model = MockNoiselessModel(X, Y)
mean_plugin_ei = MeanPluginExpectedImprovement(model)
standard_ei = ExpectedImprovement(model)
x_new = np.random.randn(100, 3)
## Assert the two expected improvement are equal
assert pytest.approx(standard_ei.evaluate(x_new)) == mean_plugin_ei.evaluate(x_new)
def test_mean_plugin_expected_improvement_returns_expected():
np.random.seed(43)
X = np.random.randn(10, 3)
Y = np.random.randn(10, 1)
model = MockConstantModel(X, Y)
mean_plugin_ei = MeanPluginExpectedImprovement(model)
x_new = np.random.randn(100, 3)
acquisition_values = mean_plugin_ei.evaluate(x_new)
# The mean at every previously observed point will be 1, hence y_minimum will be 1.0.
# The predicted values in the batch should all have mean 1 and variance 1
# The correct expected improvement for Gaussian Y ~ Normal(1, 1), and y_minimum = 1.0 is 0.3989422804014327
assert pytest.approx(0.3989422804014327, abs=0, rel=1e-7) == acquisition_values
| EmuKit/emukit | tests/emukit/bayesian_optimization/test_mean_plugin_expected_improvement.py | Python | apache-2.0 | 2,899 | [
"Gaussian"
] | 412e22909bc73673bc1b8f0baaaca82ad4aa8d15e29154bbdfe70b3e65b4d5f2 |
# -*- coding: utf-8 -*-
#
# Copyright 2008 - 2019 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
_TRY_PYSIDE = True
#_TRY_PYSIDE = False
| dursobr/Pythics | pythics/settings.py | Python | gpl-3.0 | 812 | [
"Brian"
] | cc4afec76adaa5da802c917f890b3946b99a8efefeb67fb83dddb2a58928e60d |
import ispyb.model.__future__
import ispyb.model.datacollection
import ispyb.model.sample
def test_dc_no_sample(testdb, testconfig):
ispyb.model.__future__.enable(testconfig)
dc = testdb.get_data_collection(1002287)
assert dc.sample is None
def test_dc_sample_groups(testdb, testconfig):
ispyb.model.__future__.enable(testconfig)
dc = testdb.get_data_collection(1066786)
assert dc.sample.id == 398810
def test_sample_group_no_linked_dcids(testdb, testconfig):
ispyb.model.__future__.enable(testconfig)
sample = ispyb.model.sample.Sample(398816, testdb)
assert str(sample) == "Sample #398816 (not yet loaded from database)"
sample.reload()
assert len(sample.dcids) == 0
assert (
str(sample)
== """\
Sample #398816
Name : thau88
Crystal id : 310037
Container id : 34874
DCIDs : None\
"""
)
assert sample.container.containerid == 34874
def test_sample_group_linked_dcids(testdb, testconfig):
ispyb.model.__future__.enable(testconfig)
sample = ispyb.model.sample.Sample(398810, testdb)
sample.reload()
assert sample.name == "thau8"
assert sample.dcids == [1066786]
assert (
str(sample)
== """\
Sample #398810
Name : thau8
Crystal id : 333301
Container id : 34864
DCIDs : 1066786\
"""
)
def test_get_sample(testdb, testconfig):
sample = testdb.get_sample(398810)
assert isinstance(sample, ispyb.model.sample.Sample)
assert sample.id == 398810
| DiamondLightSource/ispyb-api | tests/model/test_sample.py | Python | apache-2.0 | 1,526 | [
"CRYSTAL"
] | ab5762df093c907d5d41130dc83e626b15b1893c2a9b6eb584f82a845764d584 |
# -*- coding: utf-8 -*-
#
# Pyplis is a Python library for the analysis of UV SO2 camera data
# Copyright (C) 2017 Jonas Gliss (jonasgliss@gmail.com)
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License a
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""Pyplis module contains the following processing classes and methods.
1. :class:`ImgStack`: Object for storage of 3D image data
#. :class:`PixelMeanTimeSeries`: storage and post analysis of time\
series of average pixel intensities
"""
from __future__ import (absolute_import, division)
from numpy import (vstack, empty, ones, asarray, sum, dstack, float32, zeros,
poly1d, polyfit, argmin, where, logical_and, rollaxis,
delete, hstack)
from scipy.ndimage.filters import gaussian_filter1d, median_filter
from copy import deepcopy
from datetime import datetime, timedelta
from matplotlib.pyplot import subplots
from matplotlib.dates import date2num, DateFormatter
from pandas import Series, concat, DatetimeIndex
from cv2 import pyrDown, pyrUp
from os.path import join, exists, dirname, basename, isdir, abspath
from astropy.io import fits
import six
from pyplis import logger
from .image import Img
from .setupclasses import Camera
from .helpers import to_datetime, make_circular_mask
from .glob import DEFAULT_ROI
class ImgStack(object):
"""Image stack object.
The images are stacked into a 3D numpy array, note, that for large datasets
this may cause MemoryErrors. This object is for instance used to perform
a DOAS field of view search (see also :mod:`doascalib`).
It provides basic image processing functionality, for instance changing
the pyramid level, time merging with other time series data (e.g. DOAS
CD time series vector).
The most important attributes (data objects) are:
1. ``self.stack``: 3D numpy array containing stacked images. The first
axis corresponds to the time axis, allowing for easy image access,
e.g. ``self.stack[10]`` would yield the 11th image in the time series.
2. ``self.start_acq``: 1D array containing acquisition time stamps
(datetime objects)
3. ``self.texps``: 1D array conaining exposure times in s for each
image
4. ``self.add_data``: 1D array which can be used to store additional
data for each image (e.g. DOAS CD vector)
Todo
----
1. Include optical flow routine for emission rate retrieval
Parameters
----------
height : int
height of images to be stacked
width : int
width of images to be stacked
num : int
number of images to be stacked
dtype :
numerical data type (e.g. uint8, makes the necessary space smaller,
default: float32)
stack_id : str
string ID of this object ("")
img_prep : dict
additional information about the preparation state of the images
(e.g. roi, gauss pyramid level, dark corrected?, blurred?)
**stack_data
can be used to pass stack data directly
"""
def __init__(self, height=0, width=0, img_num=0, dtype=float32,
stack_id="", img_prep=None, camera=None, **stack_data):
self.stack_id = stack_id
self.dtype = dtype
self.current_index = 0
self.stack = None
self.start_acq = None
self.texps = None
self.add_data = None
self._access_mask = None
if img_prep is None:
img_prep = {"pyrlevel": 0}
self.img_prep = img_prep
self.roi_abs = DEFAULT_ROI
self._cam = Camera()
self.init_stack_array(height, width, img_num)
if "stack" in stack_data:
self.set_stack_data(**stack_data)
if isinstance(camera, Camera):
self.camera = camera
def init_stack_array(self, height=0, width=0, img_num=0):
"""Initialize the actual stack data array.
Note
----
All current data stored in :attr:`stack`, :attr:`start_acq`,
:attr:`texps`, :attr:`add_data` will be deleted.
Parameters
----------
height : int
height of images to be stacked
width : int
width of images to be stacked
num : int
number of images to be stacked
"""
try:
self.stack = empty((int(img_num), int(height), int(width))).\
astype(self.dtype)
except MemoryError:
raise MemoryError("Could not initiate empty 3D numpy array "
"(d, h, w): (%s, %s, %s)" % (img_num, height,
width))
self.start_acq = asarray([datetime(1900, 1, 1)] * img_num)
self.texps = zeros(img_num, dtype=float32)
self.add_data = zeros(img_num, dtype=float32)
self._access_mask = zeros(img_num, dtype=bool)
self.current_index = 0
@property
def last_index(self):
"""Return last index."""
return self.num_of_imgs - 1
@property
def start(self):
"""Return start time stamp of first image."""
try:
i = self.start_acq[self._access_mask][0]
add = timedelta(self.texps[self._access_mask][0] / 86400.)
return i + add
except IndexError:
raise IndexError("Stack is empty...")
except BaseException:
raise ValueError("Start acquisition time could accessed in stack")
@property
def stop(self):
"""Return start time stamp of first image."""
try:
i = self.start_acq[self._access_mask][-1]
add = timedelta(self.texps[self._access_mask][-1] / 86400.)
return i + add
except IndexError:
raise IndexError("Stack is empty...")
except BaseException:
raise ValueError("Start acquisition time could accessed in stack")
@property
def time_stamps(self):
"""Acq. time stamps of all images."""
try:
dts = ([timedelta(x / (2 * 86400.)) for x in self.texps])
return self.start_acq + asarray(dts)
except BaseException:
raise ValueError("Failed to access information about acquisition "
"time stamps and / or exposure times")
@property
def pyrlevel(self):
"""Gauss pyramid level of images in stack."""
return self.img_prep["pyrlevel"]
@property
def camera(self):
"""Camera object assigned to stack."""
return self._cam
@camera.setter
def camera(self, value):
if isinstance(value, Camera):
self._cam = value
else:
raise TypeError("Need Camera object...")
@property
def num_of_imgs(self):
"""Depth of stack."""
return self.stack.shape[0]
def check_index(self, idx=0):
if 0 <= idx <= self.last_index:
return
elif idx == self.num_of_imgs:
self._extend_stack_array()
else:
raise IndexError("Invalid index %d for inserting image in stack "
"with current depth %d" % (idx, self.num_of_imgs))
def _extend_stack_array(self):
"""Extend the first index of the stack array."""
h, w = self.shape[1:]
try:
self.stack = vstack((self.stack, empty((1, h, w))))
except MemoryError:
raise MemoryError("Cannot add more data to stack due to memory "
"overflow...")
self.start_acq = hstack((self.start_acq, [datetime(1900, 1, 1)]))
self.texps = hstack((self.texps, [0.0]))
self.add_data = hstack((self.add_data, [0.0]))
self._access_mask = hstack((self._access_mask, [False]))
def insert_img(self, pos, img_arr, start_acq=datetime(1900, 1, 1),
texp=0.0, add_data=0.0):
"""Insert an image into the stack at provided index.
Parameters
----------
pos : int
Insert position of img in stack
img_arr : array
image data (must have same dimension than ``self.stack.shape[:2]``,
can also be of type :obj:`Img`)
start_acq : datetime
acquisition time stamp of image, defaults to datetime(1900, 1, 1)
texp : float
exposure time of image (in units of s), defaults to 0.0
add_data
arbitrary additional data appended to list :attr:`add_data`
"""
try:
img_arr = img_arr.img
except BaseException:
pass
if sum(self.shape) == 0:
h, w = img_arr.shape
self.init_stack_array(height=h, width=w, img_num=1)
self.check_index(pos)
self.stack[pos] = img_arr
self.start_acq[pos] = to_datetime(start_acq)
self.texps[pos] = texp
self.add_data[pos] = add_data
self._access_mask[pos] = True
def add_img(self, img_arr, start_acq=datetime(1900, 1, 1), texp=0.0,
add_data=0.0):
"""Add image at current index position.
The image is inserted at the current index position ``current_index``
which is increased by 1 afterwards. If the latter exceeds the dimension
of the actual stack data array :attr:`stack`, the stack shape will be
extended by 1.
Parameters
----------
img_arr : array
image data (must have same dimension than ``self.stack.shape[:2]``)
start_acq : datetime
acquisition time stamp of image, defaults to datetime(1900, 1, 1)
texp : float
exposure time of image (in units of s), defaults to 0.0
add_data
arbitrary additional data appended to list :attr:`add_data`
"""
# ==============================================================================
# if self.current_index >= self.last_index:
# print self.last_index
# raise IndexError("Last stack index reached...")
# ==============================================================================
self.insert_img(self.current_index, img_arr, start_acq, texp, add_data)
self.current_index += 1
def make_circular_access_mask(self, cx, cy, radius):
"""Create a circular mask for stack.
Parameters
----------
cx : int
x position of centre
cy : nint
y position of centre
radius : int
radius
Returns
-------
array
circular mask (use e.g. like ``img[mask]`` which will return a
1D vector containing all pixel values of ``img`` that fall into
the mask)
"""
# cx, cy = self.img_prep.map_coordinates(pos_x_abs, pos_y_abs)
h, w = self.stack.shape[1:]
return make_circular_mask(h, w, cx, cy, radius)
def set_stack_data(self, stack, start_acq=None, texps=None):
"""Set the current data based on input.
Parameters
----------
stack : array
3D numpy array containing the image stack data
start_acq : :obj:`array`, optional
array containing acquisition time stamps
texps : obj:`array`, optional
array containing exposure times
"""
num = stack.shape[0]
self.stack = stack
if start_acq is None:
start_acq = asarray([datetime(1900, 1, 1)] * num)
self.start_acq = start_acq
if texps is None:
texps = zeros(num, dtype=float32)
self.texps = texps
self._access_mask = ones(num, dtype=bool)
def get_data(self):
"""Get stack data (containing of stack, acq. and exp. times).
Returns
-------
tuple
3-element tuple containing
- :obj:`array`: stack data
- :obj:`array`: acq. time stamps
- :obj:`array`: exposure times
"""
m = self._access_mask
return (self.stack[m], asarray(self.time_stamps)[m],
asarray(self.texps)[m])
def apply_mask(self, mask):
"""Convolves the stack data with a input mask along time axis.
Parameter
---------
mask : array
2D bool mask for image pixel access
Returns
-------
tuple
3-element tuple containing
- :obj:`array`: 3D numpy array containing convolved stack data
- :obj:`array`: acq. time stamps
- :obj:`array`: exposure times
"""
# mask_norm = boolMask.astype(float32)/sum(boolMask)
d = self.get_data()
# [:, :, newaxis])#, d[1], d[2])
data_conv = (d[0] * mask.astype(float32))
return (data_conv, d[1], d[2])
def get_time_series(self, pos_x=None, pos_y=None, radius=1, mask=None):
"""Get time series in a ROI.
Retrieve time series at a given pixel position *in stack
coordinates* in a circular pixel neighbourhood.
Parameters
----------
pos_x : int
x position of center pixel on detector
pos_y : int
y position of center pixel on detector
radius : float
radius of pixel disk on detector (centered around pos_x, pos_y,
default: 1)
mask : array
mask for image pixel access, default is None, if the mask is
specified and valid (i.e. same shape than images in stack) then
the other three input parameter are ignored
Returns
-------
tuple
2-element tuple containing
- :obj:`Series`: time series data
- :obj:`array`: pixel access mask used to convolve stack images
"""
d = self.get_data()
try:
data_mask, start_acq, texps = self.apply_mask(mask)
except BaseException:
if not radius > 0:
raise ValueError("Invalid input for param radius (3. pos): "
"value must be larger than 0, got %d"
% radius)
if radius == 1:
mask = zeros(self.shape[1:]).astype(bool)
mask[pos_y, pos_x] = True
s = Series(d[0][self._access_mask, pos_y, pos_x], d[1])
return s, mask
mask = self.make_circular_access_mask(pos_x, pos_y, radius)
data_mask, start_acq, texps = self.apply_mask(mask)
values = data_mask.sum((1, 2)) / float(sum(mask))
return Series(values, start_acq), mask
def merge_with_time_series(self, time_series, method="average",
**kwargs):
"""High level wrapper for data merging.
Choose from either of three methods to perform an index merging based
on time stamps of stack and of other time series data (provided on
input).
Parameters
----------
time_series : Series
time series data supposed to be merged with stack data
method : str
merge method, currently available methods are:
- average: determine new stack containing images averaged based
on start / stop time stamps of each datapoint in input
``time_series`` (requires corresponding data to be available
in input, i.e. ``time_series`` must be of type
:class:`DoasResults` of ``pydoas`` library).
- nearest: perform merging based on nearest datapoint per image
- interpolation: perform cross interpolation onto unified time
index array from stack and time series data
**kwargs
additional keyword args specifying additional merge settings (e.g.
``itp_type=quadratic`` in case ``method=interpolation`` is used)
Returns
-------
tuple
2-element tuple containing
- :obj:`ImgStack`: new stack containing merged data
- :obj:`Series`: merged time series data
"""
if not isinstance(time_series, Series):
raise TypeError("Could not merge stack data with input data: "
"wrong type: %s" % type(time_series))
if method == "average":
try:
return self._merge_tseries_average(time_series, **kwargs)
except BaseException:
logger.info("Failed to merge data using method average, trying "
"method nearest instead")
method = "nearest"
if method == "nearest":
return self._merge_tseries_nearest(time_series, **kwargs)
elif method == "interpolation":
return self._merge_tseries_cross_interpolation(time_series,
**kwargs)
else:
raise TypeError("Unkown merge type: %s. Choose from "
"[nearest, average, interpolation]")
def _merge_tseries_nearest(self, time_series):
"""Find nearest in time image for each time stamp in input series.
Find indices (and time differences) in input time series of nearest
data point for each image in this stack. Then, get rid of all indices
showing double occurences using time delta information.
"""
stack, time_stamps, texps = self.get_data()
nearest_idxs, del_ts = self.get_nearest_indices(time_series.index)
img_idxs = []
spec_idxs_final = []
del_ts_abs = []
for idx in range(min(nearest_idxs), max(nearest_idxs) + 1):
logger.info("Current tseries index %s" % idx)
matches = where(nearest_idxs == idx)[0]
if len(matches) > 0:
del_ts_temp = del_ts[matches]
spec_idxs_final.append(idx)
del_ts_abs.append(min(del_ts_temp))
img_idxs.append(matches[argmin(del_ts_temp)])
series_new = time_series[spec_idxs_final]
try:
series_new.fit_errs = time_series.fit_errs[spec_idxs_final]
except BaseException:
pass
stack_new = self.stack[img_idxs]
texps_new = asarray(self.texps[img_idxs])
start_acq_new = asarray(self.start_acq[img_idxs])
stack_obj_new = ImgStack(stack_id=self.stack_id,
img_prep=self.img_prep, stack=stack_new,
start_acq=start_acq_new, texps=texps_new)
stack_obj_new.roi_abs = self.roi_abs
stack_obj_new.add_data = series_new
return (stack_obj_new, series_new)
def _merge_tseries_cross_interpolation(self, time_series,
itp_type="linear"):
"""Merge this stack with input data using interpolation.
:param Series time_series_data: pandas Series object containing time
series data (e.g. DOAS column densities)
:param str itp_type: interpolation type (passed to
:class:`pandas.DataFrame` which does the interpolation, default is
linear)
"""
h, w = self.shape[1:]
stack, time_stamps, _ = self.get_data()
# first crop time series data based on start / stop time stamps
time_series = self.crop_other_tseries(time_series)
time_series.name = None
if not len(time_series) > 0:
raise IndexError("Time merging failed, data does not overlap")
# interpolate exposure times
s0 = Series(self.texps, time_stamps)
try:
errs = Series(time_series.fit_errs, time_series.index)
df0 = concat([s0, time_series, errs], axis=1).\
interpolate(itp_type).dropna()
except BaseException:
df0 = concat([s0, time_series], axis=1).\
interpolate(itp_type).dropna()
new_num = len(df0[0])
if not new_num >= self.num_of_imgs:
raise ValueError("Unexpected error, length of merged data "
"array does not exceed length of inital image "
"stack...")
# create new arrays for the merged stack
new_stack = empty((new_num, h, w))
new_acq_times = df0[0].index
new_texps = df0[0].values
for i in range(h):
for j in range(w):
logger.info("Stack interpolation active...: current img row (y):"
"%s (%s)" % (i, j))
# get series from stack at current pixel
series_stack = Series(stack[:, i, j], time_stamps)
# create a dataframe
df = concat([series_stack, df0[1]], axis=1).\
interpolate(itp_type).dropna()
# throw all N/A values
# df = df.dropna()
new_stack[:, i, j] = df[0].values
stack_obj = ImgStack(new_num, h, w,
stack_id=self.stack_id,
img_prep=self.img_prep)
stack_obj.roi_abs = self.roi_abs
# print new_stack.shape, new_acq_times.shape, new_texps.shape
stack_obj.set_stack_data(new_stack, new_acq_times, new_texps)
new_series = df[1]
try:
new_series.fit_errs = df0[2].values
except BaseException:
logger.info("Failed to access / process errors on time series data")
return (stack_obj, new_series)
def _merge_tseries_average(self, time_series):
"""Make new stack of averaged images based on input start / stop arrays.
The averaging is based on the start / stop time stamps (e.g. of
measured spectra) specified by two input arrays.
These arrays must have the same length.
The method loops over these arrays indices and at each iteration step
k, all images (wihin this stack) falling into the corresponding
start / stop interval are averaged and added to a new stack of averaged
images. Indices k (of the input arrays) for which
no images can be found are added to the list ``bad_indices`` (second
return parameter) and have to be removed from the corresponding data
in case, these data (e.g. DOAS SO2 CD time series) is supposed to be
compared with the averaged stack.
Parameters
----------
time_series : DoasResults
Time series containing DOAS results, including arrays
for start / stop acquisition time stamps (required for averaging)
Returns
-------
tuple
2-element tuple containing
- :class:`ImgStack`: new stack object with averaged images
- :obj:`list`: list of bad indices (where no overlap was found)
"""
try:
if not time_series.has_start_stop_acqtamps():
raise ValueError("No start / stop acquisition time stamps "
"available in input data...")
start_acq = asarray(time_series.start_acq)
stop_acq = asarray(time_series.stop_acq)
except BaseException:
raise
stack, times, texps = self.get_data()
h, w = stack.shape[1:]
num = len(start_acq)
# new_stack = empty((h, w, self.num_of_imgs))
new_acq_times = []
new_texps = []
bad_indices = []
counter = 0
for k in range(num):
i = start_acq[k]
f = stop_acq[k]
texp = (f - i).total_seconds()
cond = (times >= i) & (times < f)
if sum(cond) > 0:
# ==============================================================================
# print ("Found %s images for spectrum #%s (of %s)"
# %(sum(cond), k, num))
# ==============================================================================
im = stack[cond].mean(axis=0)
if counter == 0:
new_stack = im
else:
new_stack = dstack((new_stack, im))
new_acq_times.append(i + (f - i) / 2)
# img_avg_info.append(sum(cond))
new_texps.append(texp)
counter += 1
else:
bad_indices.append(k)
new_stack = rollaxis(new_stack, 2)
stack_obj = ImgStack(len(new_texps), h, w,
stack_id=self.stack_id,
img_prep=self.img_prep)
stack_obj.roi_abs = self.roi_abs
stack_obj.set_stack_data(new_stack, asarray(new_acq_times),
asarray(new_texps))
tseries = time_series.drop(time_series.index[bad_indices])
try:
errs = delete(time_series.fit_errs, bad_indices)
tseries.fit_errs = errs
except BaseException:
pass
return (stack_obj, tseries)
"""Helpers
"""
def crop_other_tseries(self, time_series):
"""Crops other time series object based on start / stop time stamps."""
# ==============================================================================
# start = self.start - self.total_time_period_in_seconds() * tol_borders
# stop = self.stop + self.total_time_period_in_seconds() * tol_borders
# ==============================================================================
cond = logical_and(time_series.index >= self.start,
time_series.index <= self.stop)
new = time_series[cond]
try:
new.fit_errs = new.fit_errs[cond]
except BaseException:
pass
return new
def total_time_period_in_seconds(self):
"""Return start time stamp of first image."""
return (self.stop - self.start).total_seconds()
def get_nearest_indices(self, tstamps_other):
"""Find indices of time stamps nearest to img acq. time stamps.
Parameters
----------
tstamps_other :
datetime, or datetime array of other time series for which closest
index / indices are searched
"""
idx = []
delt = []
img_stamps = self.time_stamps[self._access_mask]
for tstamp in img_stamps:
diff = [x.total_seconds() for x in abs(tstamps_other - tstamp)]
delt.append(min(diff))
idx.append(argmin(diff))
return asarray(idx), asarray(delt)
def get_nearest_img(self, time_stamp):
"""Return stack image which is nearest to input timestamp.
Searches the nearest image(s) with respect to input datetime(s)
:param (datetime, ndarray) time_stamps: the actual time stamp(s) (for
instance from another time series object)
"""
raise NotImplementedError
def has_data(self):
"""Return bool.""" # fixme: improve this doc
return bool(sum(self._access_mask))
def sum(self, *args, **kwargs):
"""Sum over all pixels of stack.
Parameters
----------
*args
non-keyword arguments passed to :func:`sum` of numpy array
**kwargs
keyword arguments passed to :func:`sum` of numpy array
Returns
-------
float
result of summation operation
"""
return self.stack.sum(*args, **kwargs)
def mean(self, *args, **kwargs):
"""Apply numpy.mean function to stack data.
:param *args: non keyword arguments passed to :func:`numpy.mean`
applied to stack data
:param **kwargs: keyword arguments passed to :func:`numpy.mean`
applied to stack data
"""
return self.stack.mean(*args, **kwargs)
def std(self, *args, **kwargs):
"""Apply numpy.std function to stack data.
:param *args: non keyword arguments passed to :func:`numpy.std`
applied to stack data
:param **kwargs: keyword arguments passed to :func:`numpy.std`
applied to stack data
"""
return self.stack.std(*args, **kwargs)
@property
def shape(self):
"""Return stack shape."""
return self.stack.shape
@property
def ndim(self):
"""Return stack dimension."""
return self.stack.ndim
"""Plots / visualisation"""
def show_img(self, index=0):
"""Show image at input index.
Parameters
----------
index : int
index of image in stack
"""
stack, ts, _ = self.get_data()
im = Img(stack[index], start_acq=ts[index], texp=self.texps[index])
im.edit_log.update(self.img_prep)
im.roi_abs = self.roi_abs
return im.show()
def pyr_down(self, steps=0):
"""Reduce the stack image size using gaussian pyramid.
Parameters
----------
steps : int
steps down in the pyramide
Returns
-------
ImgStack
new, downscaled image stack object
"""
if not steps:
return
h, w = Img(self.stack[0]).pyr_down(steps).shape
prep = deepcopy(self.img_prep)
new_stack = ImgStack(height=h, width=w, img_num=self.num_of_imgs,
stack_id=self.stack_id, img_prep=prep)
for i in range(self.shape[0]):
im = self.stack[i]
for k in range(steps):
im = pyrDown(im)
new_stack.add_img(img_arr=im, start_acq=self.start_acq[i],
texp=self.texps[i], add_data=self.add_data[i])
new_stack._format_check()
new_stack.img_prep["pyrlevel"] += steps
return new_stack
def pyr_up(self, steps):
"""Increasing the image size using gaussian pyramide.
:param int steps: steps down in the pyramide
Algorithm used: :func:`cv2.pyrUp`
"""
if not steps:
return
h, w = Img(self.stack[0]).pyr_up(steps).shape
prep = deepcopy(self.img_prep)
new_stack = ImgStack(height=h, width=w, img_num=self.num_of_imgs,
stack_id=self.stack_id, img_prep=prep)
for i in range(self.shape[0]):
im = self.stack[i]
for k in range(steps):
im = pyrUp(im)
new_stack.add_img(img_arr=im, start_acq=self.start_acq[i],
texp=self.texps[i], add_data=self.add_data[i])
new_stack._format_check()
new_stack.img_prep["pyrlevel"] -= steps
return new_stack
def to_pyrlevel(self, final_state=0):
"""Down / upscale image to a given pyramide level."""
steps = final_state - self.img_prep["pyrlevel"]
if steps > 0:
return self.pyr_down(steps)
elif steps < 0:
return self.pyr_up(-steps)
else:
return self
def duplicate(self):
"""Return deepcopy of this object."""
return deepcopy(self)
def _format_check(self):
"""Check if all relevant data arrays have the same length."""
if not all([len(x) == self.num_of_imgs for x in [self.add_data,
self.texps,
self._access_mask,
self.start_acq]]):
raise ValueError("Mismatch in array lengths of stack data, check"
"add_data, texps, start_acq, _access_mask")
def load_stack_fits(self, file_path):
"""Load stack object (fits).
Note
----
FITS stores in Big-endian and needs to be converted into little-endian
(see `this issue <https://github.com/astropy/astropy/issues/1156>`__).
We follow the suggested fix and use::
byteswap().newbyteorder()
on any loaded data array.
Parameters
----------
file_path : str
file path of stack
"""
if not exists(file_path):
raise IOError("ImgStack could not be loaded, path does not exist")
hdu = fits.open(file_path)
self.set_stack_data(hdu[0].data.byteswap().newbyteorder().
astype(self.dtype))
prep = Img().edit_log
for key, val in six.iteritems(hdu[0].header):
if key.lower() in prep.keys():
self.img_prep[key.lower()] = val
self.stack_id = hdu[0].header["stack_id"]
try:
times = hdu[1].data["start_acq"].byteswap().newbyteorder()
self.start_acq = asarray([datetime.strptime(x, "%Y%m%d%H%M%S%f")
for x in times])
except BaseException:
logger.warning("Failed to import acquisition times")
try:
self.texps = asarray(
hdu[1].data["texps"].byteswap().newbyteorder())
except BaseException:
logger.warning("Failed to import exposure times")
try:
self._access_mask = asarray(hdu[1].data["_access_mask"].
byteswap().newbyteorder())
except BaseException:
logger.warning("Failed to import data access mask")
try:
self.add_data = asarray(hdu[1].data["add_data"].byteswap().
newbyteorder())
except BaseException:
logger.warning("Failed to import data additional data")
self.roi_abs = hdu[2].data["roi_abs"].byteswap().\
newbyteorder()
self._format_check()
def save_as_fits(self, save_dir=None, save_name=None,
overwrite_existing=True):
"""Save stack as FITS file."""
self._format_check()
# returns abspath of current wkdir if None
save_dir = abspath(save_dir)
if not isdir(save_dir): # save_dir is a file path
save_name = basename(save_dir)
save_dir = dirname(save_dir)
if save_name is None:
save_name = ("pyplis_imgstack_id_%s_%s_%s_%s.fts"
% (self.stack_id,
self.start.strftime("%Y%m%d"),
self.start.strftime("%H%M"),
self.stop.strftime("%H%M")))
else:
save_name = save_name.split(".")[0] + ".fts"
logger.info("DIR: %s" % save_dir)
logger.info("Name: %s" % save_name)
hdu = fits.PrimaryHDU()
start_acq_str = [x.strftime("%Y%m%d%H%M%S%f") for x in self.start_acq]
col1 = fits.Column(name="start_acq", format="25A", array=start_acq_str)
col2 = fits.Column(name="texps", format="D", array=self.texps)
col3 = fits.Column(name="_access_mask", format="L",
array=self._access_mask)
col4 = fits.Column(name="add_data", format="D", array=self.add_data)
cols = fits.ColDefs([col1, col2, col3, col4])
arrays = fits.BinTableHDU.from_columns(cols)
col5 = fits.Column(name="roi_abs", format="I", array=self.roi_abs)
roi_abs = fits.BinTableHDU.from_columns([col5])
hdu.data = self.stack
hdu.header.update(self.img_prep)
hdu.header["stack_id"] = self.stack_id
hdu.header.append()
hdulist = fits.HDUList([hdu, arrays, roi_abs])
path = join(save_dir, save_name)
if exists(path):
logger.info("Stack already exists at %s and will be overwritten"
% path)
try:
hdulist.writeto(path, clobber=overwrite_existing)
except BaseException:
logger.warning("Failed to save stack to FITS File "
"(check previous warnings)")
"""Magic methods"""
def __str__(self):
raise NotImplementedError
def __sub__(self, other):
"""Subtract data.
:param other: data to be subtracted object (e.g. offband stack)
"""
new = self.duplicate()
try:
new.stack = self.stack - other.stack
new.stack_id = "%s - %s" % (self.stack_id, other.stack_id)
except BaseException:
new.stack = self.stack - other
new.stack_id = "%s - %s" % (self.stack_id, other)
return new
def find_registration_shift_optflow(on_img, off_img,
roi_abs=DEFAULT_ROI, **flow_settings):
"""Search average shift between two images using optical flow.
Computes optical flow between two input images and determines the
registration shift based on peaks in two histograms of the orientation
angle distribution and vector magnitued distribution of the retrieved
flow field. The histogram analysis may be reduced to a certain ROI in the
images.
The default settings used here correspond to the settings suggested by
Peters et al., Use of motion estimation algorithms for improved flux
measurements using SO2 cameras, JVGR, 2015.
Parameters
----------
on_img : Img
onband image containing (preferably fixed) objects in the scene that
can be tracked
off_img : Img
corresponding offband image (ideally recorded at the same time)
roi_abs : list
if specified, the optical flow histogram parameters are retrieved from
the flow field within this ROI (else, the whole image is used)
**flow_settings
additional keyword args specifying the optical flow computation and
post analysis settings (see
:class:`pyplis.plumespeed.FarnebackSettings` for details)
Returns
-------
tuple
2-element tuple containing
- float: shift in x-direction
- float: shift in y-direction
"""
if not on_img.shape == off_img.shape:
raise ValueError("Shape mismatch between input images")
if on_img.pyrlevel != 0:
logger.warning("Input images are at pyramid level %d and registration shift "
"will be computed for this pyramid level")
# from pyplis import OptflowFarneback
# flow = OptflowFarneback(on_img, off_img, **flow_settings)
raise NotImplementedError("Under development")
class PixelMeanTimeSeries(Series):
"""A time series of mean pixel values.
This class implements a ``pandas.Series`` object with extended
functionality representing time series data of pixel mean values in a
certain image region.
.. note::
This object is only used to store results of a mean series analysis
in a certain ROI, it does not include any algorithms for actually
calculating the series
"""
std = None
texps = None
img_prep = {}
roi_abs = None
poly_model = None
def __init__(self, data, start_acq, std=None, texps=None, roi_abs=None,
img_prep=None, **kwargs):
"""Initialize pixel mean time series.
:param ndarray data: data array
(is passed into pandas Series init -> ``self.values``)
:param ndarray start_acq: array containing acquisition time stamps
(is passed into pandas Series init -> ``self.index``)
:param ndarray std: array containing standard deviations
:param ndarray texps: array containing exposure times
:param list roi_abs: image area from which data was extracted, list of
shape: ``[x0, y0, x1, y1]``
:param dict img_prep: dictionary containing information about image
preparation settings (e.g. blurring, etc..) or other
important information which may need to be stored
:param **kwargs: additional keyword parameters which are passed to
the initiation of the :class:`pandas.Series` object
"""
super(PixelMeanTimeSeries, self).__init__(data, start_acq, **kwargs)
if img_prep is None:
img_prep = {}
try:
if len(texps) == len(data):
self.texps = texps
except BaseException:
self.texps = zeros(len(data), dtype=float32)
try:
if len(std) == len(data):
self.std = std
except BaseException:
self.std = zeros(len(data), dtype=float32)
self.img_prep = img_prep
self.roi_abs = roi_abs
for key, val in six.iteritems(kwargs):
self[key] = val
@property
def start(self):
return self.index[0]
@property
def stop(self):
return self.index[-1]
def get_data_normalised(self, texp=None):
"""Normalise the mean value to a given exposure time.
:param float texp (None): the exposure time to which all deviating
times will be normalised. If None, the values will be normalised
to the largest available exposure time
:return: A new :class:`PixelMeanTimeSeries`instance with normalised
data
"""
try:
if texp is None:
texp = self.texps.max()
facs = texp / self.texps
ts = self.texps * facs
return PixelMeanTimeSeries(self.values * facs, self.index,
self.std, ts, self.roi_abs,
self.img_prep)
except Exception as e:
logger.info("Failed to normalise data bases on exposure times:\n%s\n\n"
% repr(e))
def fit_polynomial(self, order=2):
"""Fit polynomial to data series.
:param int order: order of polynomial
:returns:
- poly1d, the fitted polynomial
"""
s = self.dropna()
num = len(s)
if num == 1:
raise ValueError("Could not fit polynomial to PixelMeanTimeSeries"
" object: only one data point available")
elif num == 2:
logger.warning("PixelMeanTimeSeries object only contains 2 data points, "
"setting polyfit order to one (default is 2)")
order = 1
x = [date2num(idx) for idx in s.index]
y = s.values
p = poly1d(polyfit(x, y, deg=order))
self.poly_model = p
return p
def includes_timestamp(self, time_stamp, ext_border_secs=0.0):
"""Check if input time stamp is included in this dataset.
:param datetime time_stamp: the time stamp to be checked
:param float ext_border_secs: extend start / stop range (default 0 s)
:return:
- bool, True / False (timestamp is within interval)
"""
i = self.start - timedelta(ext_border_secs / 86400.0)
f = self.stop + timedelta(ext_border_secs / 86400.0)
if i <= to_datetime(time_stamp) <= f:
return True
return False
def get_poly_vals(self, time_stamps, ext_border_secs=0.0):
"""Get value of polynomial at input time stamp.
:param datetime time_stamp: poly input value
"""
if not isinstance(self.poly_model, poly1d):
raise AttributeError("No polynomial available, please call"
"function fit_polynomial first")
if isinstance(time_stamps, datetime):
time_stamps = [time_stamps, ]
if not any([isinstance(time_stamps, x)
for x in [list, DatetimeIndex]]):
raise ValueError("Invalid input for time stamps, need list")
if not all([self.includes_timestamp(x, ext_border_secs)
for x in time_stamps]):
raise IndexError("At least one of the time stamps is not included "
"in this series: %s - %s"
% (self.start, self.stop))
values = []
for time_stamp in time_stamps:
values.append(self.poly_model(date2num(time_stamp)))
return asarray(values)
def estimate_noise_amplitude(self, sigma_gauss=1, median_size=3, plot=0):
"""Estimate the amplitude of the noise in the data.
Steps:
1. Determines high frequency variations by applying binomial
filter (sigma = 3) to data and subtract this from data,
resulting in a residual
2. Median filtering of residual signal to get rid of narrow peaks
(i.e. where the original data shows abrupt changes)
3. subtract both signals and determine std
..note::
Beta version: no guarantee it works for all cases
"""
# make bool array of indices considered (initally all)
y0 = median_filter(self.values, 3)
y1 = gaussian_filter1d(y0, sigma_gauss)
res0 = y0 - y1
res1 = median_filter(res0, median_size)
diff = res1 - res0
if plot:
fig, ax = subplots(2, 1)
ax[0].plot(y0, "-c", label="y0")
ax[0].plot(y1, "--xr", label="y1: Smoothed y0")
ax[0].legend(
loc='best',
fancybox=True,
framealpha=0.5,
fontsize=10)
ax[1].plot(res0, "--c", label="res0: y0 - y1")
ax[1].plot(res1, "--r", label="res1: Median(res0)")
ax[1].plot(diff, "--b", label="diff: res1 - res0")
ax[1].legend(
loc='best',
fancybox=True,
framealpha=0.5,
fontsize=10)
return diff.std()
def plot(self, include_tit=True, date_fmt=None, **kwargs):
"""Plot time series.
Parameters
----------
include_tit : bool
Include a title
date_fmt : str
Date / time formatting string for x labels, passed to
:class:`DateFormatter` instance (optional)
**kwargs
Additional keyword arguments passed to pandas Series plot method
Returns
-------
axes
matplotlib axes instance
"""
try:
self.index = self.index.to_pydatetime()
except BaseException:
pass
try:
if "style" not in kwargs:
kwargs["style"] = "--x"
ax = super(PixelMeanTimeSeries, self).plot(**kwargs)
try:
if date_fmt is not None:
ax.xaxis.set_major_formatter(DateFormatter(date_fmt))
except BaseException:
pass
if include_tit:
ax.set_title("Mean value (%s), roi_abs: %s"
% (self.name, self.roi_abs))
ax.grid()
return ax
except Exception as e:
logger.info(repr(e))
fig, ax = subplots(1, 1)
ax.text(.1, .1, "Plot of PixelMeanTimeSeries failed...")
fig.canvas.draw()
return ax
def __setitem__(self, key, value):
"""Update class item."""
logger.info("%s : %s" % (key, value))
if key in self.__dict__:
logger.info("Writing...")
self.__dict__[key] = value
def __call__(self, normalised=False):
"""Return the current data arrays (mean, std)."""
if normalised:
return self.get_data_normalised()
return self.get_data()
# ==============================================================================
# import matplotlib.animation as animation
#
# def animate_stack(img_stack):
#
# fig = figure() # make figure
#
# # make axesimage object
# # the vmin and vmax here are very important to get the color map correct
# im = imshow(sta, cmap=plt.get_cmap('jet'), vmin=0, vmax=255)
#
# # function to update figure
# def updatefig(j):
# # set the data in the axesimage object
# im.set_array(imagelist[j])
# # return the artists set
# return im,
# # kick off the animation
# animation.FuncAnimation(fig, updatefig, frames=range(20),
# interval=50, blit=True)
# plt.show()
# ==============================================================================
| jgliss/pyplis | pyplis/processing.py | Python | gpl-3.0 | 48,843 | [
"Gaussian"
] | b0d958c871ecaf4f762e13b3753291c7f787422efd9cf5e55a49fde68db5dda0 |
from func import *
# ATTTENTION! Maybe there are some mistakes in neuron parameters!
logger = logging.getLogger('neuromodulation')
startbuild = datetime.datetime.now()
nest.ResetKernel()
nest.SetKernelStatus({'overwrite_files': True,
'local_num_threads': 8,
'resolution': 0.1})
generate_neurons(500000)
# Init parameters of our synapse models
DOPA_synparams_ex['vt'] = nest.Create('volume_transmitter')[0]
DOPA_synparams_in['vt'] = nest.Create('volume_transmitter')[0]
SERO_synparams_in['vt'] = nest.Create('volume_transmitter')[0]
SERO_synparams_ex['vt'] = nest.Create('volume_transmitter')[0]
NORA_synparams_ex['vt'] = nest.Create('volume_transmitter')[0]
nest.CopyModel('static_synapse', gen_static_syn, static_syn)
nest.CopyModel('stdp_synapse', glu_synapse, STDP_synparams_Glu)
nest.CopyModel('stdp_synapse', gaba_synapse, STDP_synparams_GABA)
nest.CopyModel('stdp_synapse', ach_synapse, STDP_synparams_ACh)
nest.CopyModel('stdp_dopamine_synapse', dopa_synapse_ex, DOPA_synparams_ex)
nest.CopyModel('stdp_dopamine_synapse', dopa_synapse_in, DOPA_synparams_in)
nest.CopyModel('stdp_serotonin_synapse', sero_synapse_ex, SERO_synparams_ex)
nest.CopyModel('stdp_serotonin_synapse', sero_synapse_in, SERO_synparams_in)
nest.CopyModel('stdp_noradrenaline_synapse', nora_synapse_ex, NORA_synparams_ex)
## - my .50
logger.debug("* * * Start connection initialisation")
####################################################################
# * * * ventral pathway * * *
connect(ldt[ldt_Ach],thalamus[thalamus_Glu], syn_type=ACh, weight_coef=5)
connect(ldt[ldt_Ach], bnst[bnst_Ach], syn_type=ACh, weight_coef=0.005)
connect(ldt[ldt_Ach], lc[lc_N0], syn_type=ACh, weight_coef=0.005)
connect(ldt[ldt_Ach], prefrontal[pfc_Glu0], syn_type=ACh, weight_coef=0.5)
connect(thalamus[thalamus_Glu], motor[motor_Glu0], syn_type=Glu, weight_coef=0.005)
connect(thalamus[thalamus_Glu], motor[motor_Glu1], syn_type=Glu, weight_coef=0.005)
connect(thalamus[thalamus_Glu], motor[motor_5HT], syn_type=Glu, weight_coef=0.005)
connect(motor[motor_Glu0], lc[lc_N0], syn_type=Glu, weight_coef=0.005)
connect(motor[motor_Glu1], lc[lc_N0], syn_type=Glu, weight_coef=0.005)
connect(prefrontal[pfc_Glu0], lc[lc_N0], syn_type=Glu, weight_coef=0.005)
connect(prefrontal[pfc_Glu1], bnst[bnst_Glu], syn_type=Glu, weight_coef=0.005)
connect(bnst[bnst_Glu], bnst[bnst_GABA], syn_type=Glu, weight_coef=0.005)
connect(bnst[bnst_Ach], amygdala[amygdala_Ach], syn_type=ACh, weight_coef=0.005)
connect(bnst[bnst_GABA], hypothalamus[hypothalamus_pvn_GABA], syn_type=GABA, weight_coef=0.0005)
connect(amygdala[amygdala_Ach], lc[lc_Ach], syn_type=ACh, weight_coef=0.005)
connect(amygdala[amygdala_GABA], bnst[bnst_GABA], syn_type=GABA, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_D1], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_D2], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_tan], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_5HT], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_Ach], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_GABA], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_GABA1], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_GABA0], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_5HT], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_NA], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_Ach], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_DA], syn_type=Glu, weight_coef=0.005)
connect(hypothalamus[hypothalamus_pvn_GABA], motor[motor_Glu0], syn_type=GABA, weight_coef=0.5)
connect(hypothalamus[hypothalamus_pvn_GABA], motor[motor_Glu1], syn_type=GABA, weight_coef=0.5)
connect(hypothalamus[hypothalamus_pvn_GABA], motor[motor_5HT], syn_type=GABA, weight_coef=0.5)
#inside LC
connect(lc[lc_Ach], lc[lc_GABA], syn_type=ACh, weight_coef=0.005)
connect(lc[lc_Ach], lc[lc_N0], syn_type=ACh, weight_coef=0.005)
connect(lc[lc_Ach], lc[lc_N1], syn_type=ACh, weight_coef=0.005)
connect(lc[lc_D1], lc[lc_N0], syn_type=DA_ex, weight_coef=0.005)
connect(lc[lc_D2], lc[lc_N1], syn_type=DA_in, weight_coef=0.005)
connect(lc[lc_GABA], lc[lc_N0], syn_type=GABA, weight_coef=0.005)
#* * * dorsal pathway * * *
connect(pgi[pgi_Glu], lc[lc_N0], syn_type=Glu, weight_coef=0.005)
connect(pgi[pgi_Glu], lc[lc_N1], syn_type=Glu, weight_coef=0.005)
connect(pgi[pgi_GABA], lc[lc_GABA], syn_type=GABA, weight_coef=0.005)
connect(prh[prh_GABA], lc[lc_GABA], syn_type=GABA, weight_coef=0.005)
connect(striatum[striatum_tan], lc[lc_GABA], syn_type=GABA, weight_coef=0.005)
connect(vta[vta_DA0], lc[lc_D1], syn_type=DA_ex, weight_coef=0.005)
connect(vta[vta_DA0], lc[lc_D2], syn_type=DA_in, weight_coef=0.005)
connect(vta[vta_DA1], striatum[striatum_tan], syn_type=DA_ex, weight_coef=0.005)
connect(vta[vta_DA1], striatum[striatum_GABA], syn_type=DA_ex, weight_coef=0.005)
wse = 0.1
wsi = 0.0001
#
# * * * NIGROSTRIATAL PATHWAY* * *
connect(motor[motor_Glu0], striatum[striatum_D1], syn_type=Glu, weight_coef=0.005)
connect(motor[motor_Glu0], snc[snc_DA], syn_type=Glu, weight_coef=0.005)
connect(motor[motor_Glu0], striatum[striatum_D2], syn_type=Glu, weight_coef=0.05)
connect(motor[motor_Glu0], thalamus[thalamus_Glu], syn_type=Glu, weight_coef=3) #0.0008
connect(motor[motor_Glu0], prefrontal[pfc_5HT], syn_type=Glu, weight_coef=0.3) ######not in the diagram
connect(motor[motor_Glu0], motor[motor_5HT], syn_type=Glu, weight_coef=0.003) ######not in the diagram
connect(motor[motor_Glu0], stn[stn_Glu], syn_type=Glu, weight_coef=7)
connect(motor[motor_Glu1], striatum[striatum_D1], syn_type=Glu)
connect(motor[motor_Glu1], striatum[striatum_D2], syn_type=Glu)
connect(motor[motor_Glu0], thalamus[thalamus_Glu], syn_type=Glu,weight_coef=5)
connect(motor[motor_Glu1], stn[stn_Glu], syn_type=Glu)
connect(motor[motor_Glu1], nac[nac_GABA0], syn_type=GABA)
connect(striatum[striatum_tan], striatum[striatum_D1], syn_type=GABA)
connect(striatum[striatum_tan], striatum[striatum_D2], syn_type=Glu)
connect(striatum[striatum_D1], snr[snr_GABA], syn_type=GABA, weight_coef=0.001)
connect(striatum[striatum_D1], gpi[gpi_GABA], syn_type=GABA, weight_coef=0.001)
connect(striatum[striatum_D1], gpe[gpe_GABA], syn_type=GABA, weight_coef=0.005)
connect(striatum[striatum_D2], gpe[gpe_GABA], syn_type=GABA, weight_coef=1)
connect(gpe[gpe_GABA], stn[stn_Glu], syn_type=GABA, weight_coef=0.0001)
connect(gpe[gpe_GABA], striatum[striatum_D1], syn_type=GABA, weight_coef=0.001)
connect(gpe[gpe_GABA], striatum[striatum_D2], syn_type=GABA, weight_coef=0.3)
connect(gpe[gpe_GABA], gpi[gpi_GABA], syn_type=GABA, weight_coef=0.0001)
connect(gpe[gpe_GABA], snr[snr_GABA], syn_type=GABA, weight_coef=0.0001)
connect(stn[stn_Glu], snr[snr_GABA], syn_type=Glu, weight_coef=0.2)
connect(stn[stn_Glu], gpi[gpi_GABA], syn_type=Glu, weight_coef=0.2)
connect(stn[stn_Glu], gpe[gpe_GABA], syn_type=Glu, weight_coef=0.3)
connect(stn[stn_Glu], snc[snc_DA], syn_type=Glu, weight_coef=0.01)
connect(gpi[gpi_GABA], thalamus[thalamus_Glu], syn_type=GABA, weight_coef=0.0001) # weight_coef=3)
connect(snr[snr_GABA], thalamus[thalamus_Glu], syn_type=GABA, weight_coef=0.0001) # weight_coef=3)
connect(thalamus[thalamus_Glu], motor[motor_Glu1], syn_type=Glu)
connect(thalamus[thalamus_Glu], stn[stn_Glu], syn_type=Glu, weight_coef=1) #005
connect(thalamus[thalamus_Glu], striatum[striatum_D1], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_D2], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_tan], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_Ach], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_GABA], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_5HT], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], nac[nac_GABA0], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_GABA1], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_Ach], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_DA], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_5HT], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_NA], syn_type=Glu)
# * * * INTEGRATED PATHWAY * * *
connect(prefrontal[pfc_Glu0], vta[vta_DA0], syn_type=Glu)
connect(prefrontal[pfc_Glu0], nac[nac_GABA1], syn_type=Glu)
connect(prefrontal[pfc_Glu1], vta[vta_GABA2], syn_type=Glu)
connect(prefrontal[pfc_Glu1], nac[nac_GABA1], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_GABA0], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_GABA1], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_Ach], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_DA], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_5HT], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_NA], syn_type=Glu)
connect(amygdala[amygdala_Glu], striatum[striatum_D1], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_D2], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_tan], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_Ach], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_5HT], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_GABA], syn_type=Glu, weight_coef=0.3)
# * * * MESOCORTICOLIMBIC PATHWAY * * *
connect(nac[nac_Ach], nac[nac_GABA1], syn_type=ACh)
connect(nac[nac_GABA0], nac[nac_GABA1],syn_type=GABA,)
connect(nac[nac_GABA1], vta[vta_GABA2],syn_type=GABA,)
connect(vta[vta_GABA0], prefrontal[pfc_Glu0],syn_type=GABA,weight_coef=0.0005)
connect(vta[vta_GABA0], pptg[pptg_GABA],syn_type=GABA,)
connect(vta[vta_GABA1], vta[vta_DA0],syn_type=GABA,)
connect(vta[vta_GABA1], vta[vta_DA1],syn_type=GABA,)
connect(vta[vta_GABA2], nac[nac_GABA1],syn_type=GABA,)
connect(pptg[pptg_GABA], vta[vta_GABA0],syn_type=GABA,)
connect(pptg[pptg_GABA], snc[snc_GABA], syn_type=GABA,weight_coef=0.005)
connect(pptg[pptg_ACh], vta[vta_GABA0], syn_type=ACh)
connect(pptg[pptg_ACh], vta[vta_DA1], syn_type=ACh)
connect(pptg[pptg_Glu], vta[vta_GABA0], syn_type=Glu)
connect(pptg[pptg_Glu], vta[vta_DA1], syn_type=Glu)
connect(pptg[pptg_ACh], striatum[striatum_D1], syn_type=ACh, weight_coef=0.3)
connect(pptg[pptg_ACh], snc[snc_GABA], syn_type=ACh, weight_coef=0.005)
connect(pptg[pptg_Glu], snc[snc_DA], syn_type=Glu, weight_coef=0.005)
if noradrenaline_flag:
logger.debug("* * * Making neuromodulating connections...")
#vt_ex = nest.Create('volume_transmitter')
#vt_in = nest.Create('volume_transmitter')
#NORA_synparams_ex['vt'] = vt_ex[0]
#NORA_synparams_in['vt'] = vt_in[0]
connect(nts[nts_a1], lc[lc_N0], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a1], bnst[bnst_Glu], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], lc[lc_N1], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], striatum[striatum_tan], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], striatum[striatum_GABA], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], amygdala[amygdala_Glu], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], amygdala[amygdala_Ach], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], amygdala[amygdala_GABA], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], bnst[bnst_Glu], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], motor[motor_Glu0], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], motor[motor_Glu1], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], prefrontal[pfc_Glu1], syn_type=NA_ex, weight_coef=0.5)
connect(lc[lc_N0], vta[vta_a1], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], ldt[ldt_a1], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], ldt[ldt_a2], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N1], striatum[striatum_tan], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N1], striatum[striatum_GABA], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N1], rn[rn_a1], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N1], rn[rn_a2], syn_type=NA_ex, weight_coef=0.005)
connect(rn[rn_a1], rn[rn_dr], syn_type=NA_ex, weight_coef=0.005)
connect(rn[rn_a2], rn[rn_mnr], syn_type=NA_ex, weight_coef=0.005)
connect(rn[rn_a2], rn[rn_rpa], syn_type=NA_ex, weight_coef=0.005)
connect(rn[rn_a2], rn[rn_rmg], syn_type=NA_ex, weight_coef=0.005)
#connect(vta[vta_a1], vta[vta_DA1], syn_type=NA_in, weight_coef=0.005)
if serotonin_flag:
# * * * AFFERENT PROJECTIONS * *
connect(vta[vta_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse)
connect(septum[septum_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse)
connect(septum[septum_5HT], rn[rn_mnr], syn_type=SERO_ex, weight_coef=wse)
connect(prefrontal[pfc_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse)
connect(prefrontal[pfc_5HT], rn[rn_mnr], syn_type=SERO_ex, weight_coef=wse)
connect(hypothalamus[hypothalamus_5HT], rn[rn_rmg], syn_type=SERO_ex, weight_coef=wse)
connect(hypothalamus[hypothalamus_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse)
connect(periaqueductal_gray[periaqueductal_gray_5HT], rn[rn_rmg], syn_type=SERO_ex, weight_coef=wse)
connect(periaqueductal_gray[periaqueductal_gray_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse)
connect(bnst[bnst_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse)
connect(amygdala[amygdala_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse)
connect(amygdala[amygdala_5HT], rn[rn_rmg], syn_type=SERO_ex, weight_coef=wse)
connect(hippocampus[hippocampus_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse)
# * * * EFFERENT PROJECTIONS * * *
connect(rn[rn_dr], striatum[striatum_5HT], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], striatum[striatum_D2], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], striatum[striatum_GABA], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], striatum[striatum_Ach], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], nac[nac_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], nac[nac_GABA0], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], nac[nac_GABA1], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], nac[nac_Ach], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], nac[nac_DA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], snr[snr_GABA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], septum[septum_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) #? tune weights
connect(rn[rn_dr], thalamus[thalamus_Glu], syn_type=SERO_in, weight_coef=wsi) #? tune weights
connect(rn[rn_dr], lateral_cortex[lateral_cortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], entorhinal_cortex[entorhinal_cortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], prefrontal[pfc_5HT], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], prefrontal[pfc_Glu0], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], prefrontal[pfc_Glu1], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], prefrontal[pfc_DA], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], prefrontal[pfc_NA], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], lateral_tegmental_area[lateral_tegmental_area_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], lc[lc_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], lc[lc_N0], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], lc[lc_N1], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], bnst[bnst_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], bnst[bnst_Glu], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], bnst[bnst_GABA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], bnst[bnst_Ach], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], hippocampus[hippocampus_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], amygdala[amygdala_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], amygdala[amygdala_Glu], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], amygdala[amygdala_GABA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], amygdala[amygdala_Ach], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], vta[vta_5HT], syn_type=SERO_in, weight_coef=wsi) #!!! 0.005
connect(rn[rn_mnr], vta[vta_a1], syn_type=SERO_in, weight_coef=wsi) #!!! 0.005
connect(rn[rn_mnr], vta[vta_DA1], syn_type=SERO_in, weight_coef=wsi) #!!! 0.005
connect(rn[rn_mnr], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) #?
connect(rn[rn_mnr], thalamus[thalamus_Glu], syn_type=SERO_in, weight_coef=wsi) #? tune weights 0.005
connect(rn[rn_mnr], prefrontal[pfc_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], prefrontal[pfc_Glu0], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], prefrontal[pfc_Glu1], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], motor[motor_Glu0], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], motor[motor_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], insular_cortex[insular_cortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], medial_cortex[medial_cortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], neocortex[neocortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], hypothalamus[hypothalamus_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], hypothalamus[hypothalamus_pvn_GABA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], hippocampus[hippocampus_5HT], syn_type=SERO_in, weight_coef=wsi)
# * * * THALAMOCORTICAL PATHWAY * * *
connect(thalamus[thalamus_5HT], prefrontal[pfc_5HT], syn_type=SERO_in, weight_coef=wse)
connect(thalamus[thalamus_5HT], motor[motor_5HT], syn_type=SERO_ex, weight_coef=wse)
connect(thalamus[thalamus_5HT], motor[motor_Glu0], syn_type=SERO_ex, weight_coef=wse)
connect(prefrontal[pfc_5HT], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) # main was 0.005
connect(motor[motor_5HT], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) # main was 0.005
if dopamine_flag:
logger.debug("* * * Making neuromodulating connections...")
# NIGROSTRIATAL
connect(snc[snc_DA], striatum[striatum_D1], syn_type=DA_ex)
connect(snc[snc_DA], gpe[gpe_GABA], syn_type=DA_ex)
connect(snc[snc_DA], stn[stn_Glu], syn_type=DA_ex)
connect(snc[snc_DA], nac[nac_GABA0], syn_type=DA_ex)
connect(snc[snc_DA], nac[nac_GABA1], syn_type=DA_ex)
connect(snc[snc_DA], striatum[striatum_D2], syn_type=DA_in)
connect(snc[snc_DA], striatum[striatum_tan], syn_type=DA_in)
# MESOCORTICOLIMBIC
connect(vta[vta_DA0], striatum[striatum_D1], syn_type=DA_ex)
connect(vta[vta_DA0], striatum[striatum_D2], syn_type=DA_in)
connect(vta[vta_DA0], prefrontal[pfc_Glu0], syn_type=DA_ex,weight_coef=0.5)
connect(vta[vta_DA0], prefrontal[pfc_Glu1], syn_type=DA_ex,weight_coef=0.5)
connect(vta[vta_DA1], nac[nac_GABA0], syn_type=DA_ex)
connect(vta[vta_DA1], nac[nac_GABA1], syn_type=DA_ex)
if dopamine_flag and serotonin_flag and noradrenaline_flag:
# * * * DOPAMINE INTERACTION * * *
connect(prefrontal[pfc_5HT], prefrontal[pfc_DA], syn_type=SERO_ex, weight_coef=wse)
connect(prefrontal[pfc_DA], vta[vta_5HT], syn_type=DA_in, weight_coef=0.005)
connect(prefrontal[pfc_DA], vta[vta_DA1], syn_type=DA_in, weight_coef=0.005)
#connect(vta[vta_5HT], vta[vta_DA1], syn_type=SERO_in, weight_coef=0.005)
connect(vta[vta_5HT], vta[vta_DA1], syn_type=SERO_ex, weight_coef=wse)
connect(vta[vta_DA1], prefrontal[pfc_5HT], syn_type=DA_ex, weight_coef=0.5)
connect(vta[vta_DA1], prefrontal[pfc_DA], syn_type=DA_ex, weight_coef=0.5)
#connect(vta[vta_DA1], striatum[striatum_5HT], syn_type=DOPA_in, weight_coef=0.005)
connect(vta[vta_DA1], striatum[striatum_5HT], syn_type=DA_ex, weight_coef=0.005)
#connect(vta[vta_DA1], striatum[striatum_DA], syn_type=DOPA_in, weight_coef=0.005)
connect(vta[vta_DA1], striatum[striatum_D1], syn_type=DA_ex, weight_coef=0.005)
#connect(vta[vta_DA1], nac[nac_5HT], syn_type=DOPA_in, weight_coef=0.005)
connect(vta[vta_DA1], nac[nac_5HT], syn_type=DA_ex, weight_coef=0.005)
#connect(vta[vta_DA1], nac[nac_DA], syn_type=DOPA_in, weight_coef=0.005)
connect(vta[vta_DA1], nac[nac_DA], syn_type=DA_ex, weight_coef=0.005)
#connect(striatum[striatum_5HT], striatum[striatum_DA], syn_type=SERO_in, weight_coef=0.005)
connect(striatum[striatum_5HT], striatum[striatum_D1], syn_type=SERO_ex, weight_coef=wse) #??????????????????????????????????? D1, D2?
#connect(striatum[striatum_DA], snr[snr_GABA], syn_type=DOPA_in, weight_coef=0.005)
connect(striatum[striatum_D1], snr[snr_GABA], syn_type=DA_ex, weight_coef=0.005)
#connect(striatum[striatum_DA], snc[snc_DA], syn_type=DOPA_in, weight_coef=0.005)
# connect(striatum[striatum_D1], snc[snc_GABA], syn_type=DA_ex, weight_coef=0.005)
connect(striatum[striatum_D1], snc[snc_DA], syn_type=DA_ex, weight_coef=0.005)
connect(nac[nac_5HT], nac[nac_DA], syn_type=SERO_ex, weight_coef=wse)
connect(snr[snr_GABA], snc[snc_DA], syn_type=SERO_in, weight_coef=wsi)
connect(snc[snc_GABA], striatum[striatum_5HT], syn_type=DA_in, weight_coef=0.005) #?
connect(snc[snc_DA], striatum[striatum_5HT], syn_type=DA_in, weight_coef=0.005)
connect(snc[snc_DA], striatum[striatum_D1], syn_type=DA_in, weight_coef=0.005)
connect(snc[snc_DA], nac[nac_5HT], syn_type=DA_in, weight_coef=0.005)
connect(snc[snc_DA], nac[nac_DA], syn_type=DA_in, weight_coef=0.005)
connect(lc[lc_5HT], lc[lc_D1], syn_type=SERO_ex, weight_coef=0.005)
connect(lc[lc_D1], rn[rn_dr], syn_type=DA_ex, weight_coef=0.005)
# * * * NORADRENALINE INTERACTION * * *
connect(lc[lc_5HT], lc[lc_N0], syn_type=SERO_in, weight_coef=0.005)
connect(lc[lc_5HT], lc[lc_N1], syn_type=SERO_in, weight_coef=0.005)
logger.debug("* * * Attaching spike generators...")
######################distress/anguish
connect_generator(nts[nts_a1], 100., 200., rate=250, coef_part=1)
connect_generator(nts[nts_a2], 100., 200., rate=250, coef_part=1)
connect_generator(prh[prh_GABA], 100., 200., rate=250, coef_part=1)
connect_generator(pgi[pgi_GABA], 100., 200., rate=250, coef_part=1)
connect_generator(pgi[pgi_Glu], 100., 200., rate=250, coef_part=1)
connect_generator(ldt[ldt_a1], 100., 200., rate=250, coef_part=1)
connect_generator(ldt[ldt_a2], 100., 200., rate=250, coef_part=1)
connect_generator(ldt[ldt_Ach], 100., 200., rate=250, coef_part=1)
connect_generator(lc[lc_N0], 100., 200., rate=250, coef_part=1)
#connect_generator(lc[lc_N1], 100., 200., rate=250, coef_part=1)
################################fear/terror
connect_generator(motor[motor_Glu0], 300., 400., rate=250, coef_part=1)
connect_generator(pptg[pptg_GABA], 300., 400., rate=250, coef_part=1)
connect_generator(pptg[pptg_Glu], 300., 400., rate=250, coef_part=1)
connect_generator(pptg[pptg_ACh], 300., 400., rate=250, coef_part=1)
connect_generator(amygdala[amygdala_Glu], 300., 400., rate=250, coef_part=1)
connect_generator(snc[snc_DA], 300., 400., rate=250, coef_part=1)
connect_generator(vta[vta_DA0], 300., 400., rate=250, coef_part=1)
############################anger/rage
connect_generator(nts[nts_a1], 500., 600., rate=250, coef_part=1)
connect_generator(nts[nts_a2], 500., 600., rate=250, coef_part=1)
connect_generator(prh[prh_GABA], 500., 600., rate=250, coef_part=1)
connect_generator(pgi[pgi_GABA], 500., 600., rate=250, coef_part=1)
connect_generator(pgi[pgi_Glu], 500., 600., rate=250, coef_part=1)
connect_generator(ldt[ldt_a1], 500., 600., rate=250, coef_part=1)
connect_generator(ldt[ldt_a2], 500., 600., rate=250, coef_part=1)
connect_generator(ldt[ldt_Ach], 500., 600., rate=250, coef_part=1)
connect_generator(lc[lc_N0], 500., 600., rate=250, coef_part=1)
#connect_generator(lc[lc_N1], 500., 600., rate=250, coef_part=1)
connect_generator(motor[motor_Glu0], 500., 600., rate=250, coef_part=1)
connect_generator(pptg[pptg_GABA], 500., 600., rate=250, coef_part=1)
connect_generator(pptg[pptg_Glu], 500., 600., rate=250, coef_part=1)
connect_generator(pptg[pptg_ACh], 500., 600., rate=250, coef_part=1)
connect_generator(amygdala[amygdala_Glu], 500., 600., rate=250, coef_part=1)
connect_generator(snc[snc_DA], 500., 600., rate=250, coef_part=1)
connect_generator(vta[vta_DA0], 500., 600., rate=250, coef_part=1)
#########################contempt/disgust
connect_generator(prefrontal[pfc_5HT], 700., 800., rate=250, coef_part=1)
connect_generator(motor[motor_5HT], 700., 800., rate=250, coef_part=1)
connect_generator(rn[rn_dr], 700., 800., rate=250, coef_part=1)
connect_generator(rn[rn_mnr], 700., 800., rate=250, coef_part=1)
#################################surprise
connect_generator(nts[nts_a1], 900., 1000., rate=250, coef_part=1)
connect_generator(nts[nts_a2], 900., 1000., rate=250, coef_part=1)
connect_generator(prh[prh_GABA], 900., 1000., rate=250, coef_part=1)
connect_generator(pgi[pgi_GABA], 900., 1000., rate=250, coef_part=1)
connect_generator(pgi[pgi_Glu], 900., 1000., rate=250, coef_part=1)
connect_generator(ldt[ldt_a1], 900., 1000., rate=250, coef_part=1)
connect_generator(ldt[ldt_a2], 900., 1000., rate=250, coef_part=1)
connect_generator(ldt[ldt_Ach], 900., 1000., rate=250, coef_part=1)
connect_generator(lc[lc_N0], 900., 1000., rate=250, coef_part=1)
#connect_generator(lc[lc_N1], 900., 1000., rate=250, coef_part=1)
connect_generator(prefrontal[pfc_5HT], 900., 1000., rate=250, coef_part=1)
connect_generator(motor[motor_5HT], 900., 1000., rate=250, coef_part=1)
connect_generator(rn[rn_dr], 900., 1000., rate=250, coef_part=1)
connect_generator(rn[rn_mnr], 900., 1000., rate=250, coef_part=1)
##############################enjoyment/joy
connect_generator(prefrontal[pfc_5HT], 1100., 1200., rate=250, coef_part=1)
connect_generator(motor[motor_5HT], 1100., 1200., rate=250, coef_part=1)
connect_generator(rn[rn_dr], 1100., 1200., rate=250, coef_part=1)
connect_generator(rn[rn_mnr], 1100., 1200., rate=250, coef_part=1)
connect_generator(motor[motor_Glu0], 1100., 1200., rate=250, coef_part=1)
connect_generator(pptg[pptg_GABA], 1100., 1200., rate=250, coef_part=1)
connect_generator(pptg[pptg_Glu], 1100., 1200., rate=250, coef_part=1)
connect_generator(pptg[pptg_ACh], 1100., 1200., rate=250, coef_part=1)
connect_generator(amygdala[amygdala_Glu], 1100., 1200., rate=250, coef_part=1)
connect_generator(snc[snc_DA], 1100., 1200., rate=250, coef_part=1)
connect_generator(vta[vta_DA0], 1100., 1200., rate=250, coef_part=1)
##############excitement
connect_generator(nts[nts_a1], 1300., 1400., rate=250, coef_part=1)
connect_generator(nts[nts_a2], 1300., 1400., rate=250, coef_part=1)
connect_generator(prh[prh_GABA], 1300., 1400., rate=250, coef_part=1)
connect_generator(pgi[pgi_GABA], 1300., 1400., rate=250, coef_part=1)
connect_generator(pgi[pgi_Glu], 1300., 1400., rate=250, coef_part=1)
connect_generator(ldt[ldt_a1], 1300., 1400., rate=250, coef_part=1)
connect_generator(ldt[ldt_a2], 1300., 1400., rate=250, coef_part=1)
connect_generator(ldt[ldt_Ach], 1300., 1400., rate=250, coef_part=1)
connect_generator(lc[lc_N0], 1300., 1400., rate=250, coef_part=1)
#connect_generator(lc[lc_N1], 1300., 1400., rate=250, coef_part=1)
connect_generator(prefrontal[pfc_5HT], 1300., 1400., rate=250, coef_part=1)
connect_generator(motor[motor_5HT], 1300., 1400., rate=250, coef_part=1)
connect_generator(rn[rn_dr], 1300., 1400., rate=250, coef_part=1)
connect_generator(rn[rn_mnr], 1300., 1400., rate=250, coef_part=1)
connect_generator(motor[motor_Glu0], 1300., 1400., rate=250, coef_part=1)
connect_generator(pptg[pptg_GABA], 1300., 1400., rate=250, coef_part=1)
connect_generator(pptg[pptg_Glu], 1300., 1400., rate=250, coef_part=1)
connect_generator(pptg[pptg_ACh], 1300., 1400., rate=250, coef_part=1)
connect_generator(amygdala[amygdala_Glu], 1300., 1400., rate=250, coef_part=1)
connect_generator(snc[snc_DA], 1300., 1400., rate=250, coef_part=1)
connect_generator(vta[vta_DA0], 1300., 1400., rate=250, coef_part=1)
logger.debug("* * * Attaching spikes detector")
connect_detector(lc[lc_D1])
connect_detector(lc[lc_D2])
connect_detector(lc[lc_Ach])
connect_detector(lc[lc_N0])
connect_detector(lc[lc_N1])
connect_detector(lc[lc_GABA])
connect_detector(lc[lc_5HT])
connect_detector(nts[nts_a1])
connect_detector(nts[nts_a2])
connect_detector(rn[rn_a1])
connect_detector(rn[rn_a2])
connect_detector(rn[rn_rmg])
connect_detector(rn[rn_rpa])
connect_detector(rn[rn_dr])
connect_detector(rn[rn_mnr])
connect_detector(thalamus[thalamus_5HT])
connect_detector(thalamus[thalamus_Glu])
connect_detector(vta[vta_GABA0])
connect_detector(vta[vta_GABA1])
connect_detector(vta[vta_GABA2])
connect_detector(vta[vta_DA0])
connect_detector(vta[vta_DA1])
connect_detector(vta[vta_a1])
connect_detector(vta[vta_5HT])
connect_detector(snc[snc_GABA])
connect_detector(snc[snc_DA])
connect_detector(snr[snr_GABA])
connect_detector(prefrontal[pfc_Glu0])
connect_detector(prefrontal[pfc_Glu1])
connect_detector(prefrontal[pfc_DA])
connect_detector(prefrontal[pfc_5HT])
connect_detector(striatum[striatum_D1])
connect_detector(striatum[striatum_D2])
connect_detector(striatum[striatum_tan])
connect_detector(striatum[striatum_5HT])
connect_detector(striatum[striatum_Ach])
connect_detector(striatum[striatum_GABA])
connect_detector(motor[motor_Glu0])
connect_detector(motor[motor_Glu1])
connect_detector(motor[motor_5HT])
connect_detector(hypothalamus[hypothalamus_5HT])
connect_detector(hypothalamus[hypothalamus_pvn_GABA])
logger.debug("* * * Attaching multimeters")
connect_multimeter(lc[lc_D1])
connect_multimeter(lc[lc_D2])
connect_multimeter(lc[lc_Ach])
connect_multimeter(lc[lc_N0])
connect_multimeter(lc[lc_N1])
connect_multimeter(lc[lc_GABA])
connect_multimeter(lc[lc_5HT])
connect_multimeter(nts[nts_a1])
connect_multimeter(nts[nts_a2])
connect_multimeter(rn[rn_a1])
connect_multimeter(rn[rn_a2])
connect_multimeter(rn[rn_rmg])
connect_multimeter(rn[rn_rpa])
connect_multimeter(rn[rn_dr])
connect_multimeter(rn[rn_mnr])
connect_multimeter(thalamus[thalamus_5HT])
connect_multimeter(thalamus[thalamus_Glu])
connect_multimeter(vta[vta_GABA0])
connect_multimeter(vta[vta_GABA1])
connect_multimeter(vta[vta_GABA2])
connect_multimeter(vta[vta_DA0])
connect_multimeter(vta[vta_DA1])
connect_multimeter(vta[vta_a1])
connect_multimeter(vta[vta_5HT])
connect_multimeter(snc[snc_GABA])
connect_multimeter(snc[snc_DA])
connect_multimeter(snr[snr_GABA])
connect_multimeter(prefrontal[pfc_Glu0])
connect_multimeter(prefrontal[pfc_Glu1])
connect_multimeter(prefrontal[pfc_DA])
connect_multimeter(prefrontal[pfc_5HT])
connect_multimeter(striatum[striatum_D1])
connect_multimeter(striatum[striatum_D2])
connect_multimeter(striatum[striatum_tan])
connect_multimeter(striatum[striatum_5HT])
connect_multimeter(striatum[striatum_Ach])
connect_multimeter(striatum[striatum_GABA])
connect_multimeter(motor[motor_Glu0])
connect_multimeter(motor[motor_Glu1])
connect_multimeter(motor[motor_5HT])
connect_multimeter(hypothalamus[hypothalamus_5HT])
connect_multimeter(hypothalamus[hypothalamus_pvn_GABA])
"""
logger.debug("* * * Attaching spikes detector")
for part in getAllParts():
connect_detector(part)
logger.debug("* * * Attaching multimeters")
for part in getAllParts():
connect_multimeter(part)
"""
del generate_neurons, connect, connect_generator, connect_detector, connect_multimeter
endbuild = datetime.datetime.now()
simulate()
get_log(startbuild, endbuild)
save(GUI=status_gui)
| research-team/NEUCOGAR | NEST/cube/integration/integration-10/run8.py | Python | gpl-2.0 | 32,025 | [
"NEURON"
] | d98169e880541652e3edd278a03c6aa02ca4b2cefac441e55ea1c6eb3f71dcda |
# -*- coding: utf-8 -*-
"""
Some helpful tools
"""
import numpy as np
def minmax(input):
"""
Calculate min, max for each row
"""
input = np.asfarray(input)
assert input.ndim == 2
min = input.min(axis=0)
max = input.max(axis=0)
out = [x for x in zip(min, max)]
return tuple(out)
class Norm:
def __init__(self, x):
x = np.asfarray(x)
if x.ndim != 2:
raise ValueError('x mast have 2 dimensions')
min = np.min(x, axis=0)
dist = np.max(x, axis=0) - min
min.shape = 1, min.size
dist.shape = 1, dist.size
self.min = min
self.dist = dist
def __call__(self, x):
x = np.asfarray(x)
res = (x - self.min) / self.dist
return res
def renorm(self, x):
x = np.asfarray(x)
res = x * self.dist + self.min
return res
def load(fname):
from pickle import load
with open(fname, 'rb') as file:
net = load(file)
return net
def save(net, fname):
from pickle import dump
with open(fname, 'wb') as file:
dump(net, file)
def np_size(net):
"""
Calculate count of al network parameters (weight, bias, etc...)
"""
size = 0
for l in net.layers:
for prop in l.np.values():
size += prop.size
return size
def np_get(net):
"""
Get all network parameters in one array
"""
size = np_size(net)
result = np.zeros(size)
start = 0
for l in net.layers:
for prop in l.np.values():
result[start: start + prop.size] = prop.flat[:]
start += prop.size
return result
def np_set(net, np_data):
"""
Set network parameters
:Example:
>>> import neurolab as nl
>>> net = nl.net.newff([[-1, 1]], [3, 1])
>>> x = np_get(net)
>>> x.fill(100)
>>> np_set(net, x)
>>> net.layers[0].np['w'].tolist()
[[100.0], [100.0], [100.0]]
"""
start = 0
for l in net.layers:
for prop in l.np:
size = l.np[prop].size
values = np_data[start: start + size]
values.shape = l.np[prop].shape
l.np[prop][:] = values
start += size
def np_get_ref(net):
"""
Get all network parameters in one array as reference
Change array -> change networks
:Example:
>>> import neurolab as nl
>>> net = nl.net.newff([[-1, 1]], [3, 1])
>>> x = np_get_ref(net)
>>> x.fill(10)
>>> net.layers[0].np['w'].tolist()
[[10.0], [10.0], [10.0]]
"""
size = np_size(net)
x = np.empty(size)
st = 0
for l in net.layers:
for k, v in l.np.items():
x[st: st + v.size] = v.flatten()
l.np[k] = x[st: st + v.size]
l.np[k].shape = v.shape
st += v.size
return x
def ff_grad_step(net, out, tar, grad=None):
"""
Calc gradient with backpropogete method,
for feed-forward neuron networks on each step
:Parametrs:
net: Net
Feed-forward network
inp: array, size = net.ci
Input array
tar: array, size = net.co
Train target
deriv: callable
Derivative of error function
grad: list of dict default(None)
Grad on previous step
:Returns:
grad: list of dict
Gradient of net for each layer,
format:[{'w':..., 'b':...},{'w':..., 'b':...},...]
"""
delt = [None] * len(net.layers)
if grad is None:
grad = []
for i, l in enumerate(net.layers):
grad.append({})
for k, v in l.np.items():
grad[i][k] = np.zeros(v.shape)
# for output layer
ln = len(net.layers) - 1
layer = net.layers[ln]
delt[ln] = net.errorf.deriv(tar, out) * layer.transf.deriv(layer.s, out)
delt[ln] = np.negative(delt[ln])
delt[ln].shape = delt[ln].size, 1
grad[ln]['w'] += delt[ln] * layer.inp
grad[ln]['b'] += delt[ln].reshape(delt[ln].size)
bp = range(len(net.layers) - 2, -1, -1)
for ln in bp:
layer = net.layers[ln]
next = ln + 1
dS = np.sum(net.layers[next].np['w'] * delt[next], axis=0)
delt[ln] = dS * layer.transf.deriv(layer.s, layer.out)
delt[ln].shape = delt[ln].size, 1
grad[ln]['w'] += delt[ln] * layer.inp
grad[ln]['b'] += delt[ln].reshape(delt[ln].size)
return grad
def ff_grad(net, input, target):
"""
Calc and accumulate gradient with backpropogete method,
for feed-forward neuron networks on each step
:Parameters:
net: Net
Feed-forward network
input: array, shape = N,net.ci
Input array
target: array, shape = N,net.co
Train target
deriv: callable
Derivative of error function
:Returns:
grad: list of dict
Gradient of net for each layer,
format:[{'w':..., 'b':...},{'w':..., 'b':...},...]
grad_flat: array
All neurons property's in 1 array (reference of grad)
It link to grad (changes grad is changes grad_flat)
output: array
output of network
"""
# Init grad and link to grad_falt
grad = []
grad_flat = np.zeros(np_size(net))
st = 0
for i, l in enumerate(net.layers):
grad.append({})
for k, v in l.np.items():
grad[i][k] = grad_flat[st: st + v.size]
grad[i][k].shape = v.shape
st += v.size
output = []
# Calculate grad for all batch
for inp, tar in zip(input, target):
out = net.step(inp)
ff_grad_step(net, out, tar, grad)
output.append(out)
return grad, grad_flat, np.row_stack(output)
def reg_norms(net, ord=2):
"""
Calculate norm of weights and and biases for calculating
the regularization term.
:Parameters:
net: neurolab net object
:Keywords:
ord: int
order of norm for regularization term. Usually in {1,2}
"""
# Assemble weights and biases into 1D vectors
w = []
b = []
for layer in net.layers:
w.extend(layer.np['w'].reshape(layer.np['w'].size))
b.extend(layer.np['b'].reshape(layer.np['b'].size))
# Calculate norms
w = np.linalg.norm(w, ord=ord)
b = np.linalg.norm(b, ord=ord)
return w, b
def reg_error(e, net, rr):
"""
Apply regularization for result to error function
:Parameters:
e: float
current error position
net: neurolab net object
rr: float
regularization rate [0, 1]
:Return:
output: array
Gradient with regularization
"""
w, b = reg_norms(net)
e += rr * w + rr * b
return e
def reg_grad(grad, net, rr):
"""
Correction gradient for regularization
:Parameters:
grad: list of dict
grad without regularization
net: neurolab net object
rr: float
regularization rate [0, 1]
:Return:
output: array
Gradient with regularization
"""
for i, l in enumerate(net.layers):
grad[i]['w'] += rr * l.np['w']
grad[i]['b'] += rr * l.np['b']
return grad
| blagasz/python-ann | neurolab/tool.py | Python | gpl-2.0 | 7,250 | [
"NEURON"
] | 88c7e583aad0d4531f7502039d6a546ce7d7f39fafe2b5a1985e0c6fd031ee27 |
#!/usr/bin/env python
'''
uframe endpoints
'''
__author__ = 'Andy Bird'
#base
from flask import (jsonify, request, current_app, url_for, Flask, make_response,
url_for, Response)
from ooiservices.app import db, cache, celery
from ooiservices.app.uframe import uframe as api
from ooiservices.app.models import (Array, PlatformDeployment,
InstrumentDeployment, Stream,
StreamParameter, Organization,
Instrumentname, Annotation)
from ooiservices.app.main.authentication import auth,verify_auth
from ooiservices.app.main.errors import internal_server_error
from urllib import urlencode
# data ones
from ooiservices.app.uframe.data import get_data, get_simple_data,COSMO_CONSTANT,find_parameter_ids
from ooiservices.app.uframe.plotting import generate_plot
from ooiservices.app.uframe.assetController import _get_events_by_ref_des
from datetime import datetime
from dateutil.parser import parse as parse_date
import requests
# primarily for encoding coordinates to GeoJSON
from geojson import LineString
#additional ones
import json
import datetime
import math
import csv
import io
import numpy as np
import pytz
from ooiservices.app.main.routes import get_display_name_by_rd
from ooiservices.app.main.arrays import get_arrays, get_array
from contextlib import closing
import time
from ooiservices.app.models import PlatformDeployment
import urllib2
requests.adapters.DEFAULT_RETRIES = 2
CACHE_TIMEOUT = 86400
def dfs_streams():
uframe_url, timeout, timeout_read = get_uframe_info()
TOC = uframe_url+'/toc'
streams = []
toc = requests.get(TOC, timeout=(timeout, timeout_read))
toc = toc.json()
for instrument in toc:
parameters_dict = parameters_in_instrument(instrument)
streams = data_streams_in_instrument(instrument, parameters_dict, streams)
return streams
def parameters_in_instrument(instrument):
parameters_dict = {}
parameter_list = []
stream_parameters = []
stream_variable_type = []
stream_units = []
stream_variables_shape = []
for parameter in instrument['instrument_parameters']:
if parameter['shape'].lower() in ['scalar', 'function']:
if parameter['stream'] not in parameters_dict.iterkeys():
parameters_dict[parameter['stream']] = []
parameters_dict[parameter['stream']+'_variable_type'] = []
parameters_dict[parameter['stream']+'_units'] = []
parameters_dict[parameter['stream']+'_variables_shape'] = []
parameters_dict[parameter['stream']+'_pdId'] = []
parameters_dict[parameter['stream']].append(parameter['particleKey'])
parameters_dict[parameter['stream']+'_variable_type'].append(parameter['type'].lower())
parameters_dict[parameter['stream']+'_units'].append(parameter['units'])
parameters_dict[parameter['stream']+'_variables_shape'].append(parameter['shape'].lower())
parameters_dict[parameter['stream']+'_pdId'].append(parameter['pdId'].lower())
return parameters_dict
def data_streams_in_instrument(instrument, parameters_dict, streams):
for data_stream in instrument['streams']:
stream = (
instrument['platform_code'],
instrument['mooring_code'],
instrument['instrument_code'],
data_stream['method'],
data_stream['stream'],
instrument['reference_designator'],
data_stream['beginTime'],
data_stream['endTime'],
parameters_dict[data_stream['stream']],
parameters_dict[data_stream['stream']+'_variable_type'],
parameters_dict[data_stream['stream']+'_units'],
parameters_dict[data_stream['stream']+'_variables_shape'],
parameters_dict[data_stream['stream']+'_pdId']
)
#current_app.logger.info("GET %s", each['reference_designator'])
streams.append(stream)
return streams
def split_stream_name(ui_stream_name):
'''
Splits the hypenated reference designator and stream type into a tuple of
(mooring, platform, instrument, stream_type, stream)
'''
print ui_stream_name
mooring, platform, instrument = ui_stream_name.split('-', 2)
instrument, stream_type, stream = instrument.split('_', 2)
return (mooring, platform, instrument, stream_type, stream)
def combine_stream_name(mooring, platform, instrument, stream_type, stream):
first_part = '-'.join([mooring, platform, instrument])
all_of_it = '_'.join([first_part, stream_type, stream])
return all_of_it
def iso_to_timestamp(iso8601):
dt = parse_date(iso8601)
t = (dt - datetime(1970, 1, 1, tzinfo=pytz.utc)).total_seconds()
return t
def dict_from_stream(mooring, platform, instrument, stream_type, stream, reference_designator, beginTime, endTime, variables, variable_type, units, variables_shape, parameter_id):
HOST = str(current_app.config['HOST'])
PORT = str(current_app.config['PORT'])
SERVICE_LOCATION = 'http://'+HOST+":"+PORT
ref = mooring + "-" + platform + "-" + instrument
stream_name = '_'.join([stream_type, stream])
ref = '-'.join([mooring, platform, instrument])
data_dict = {}
data_dict['start'] = beginTime
data_dict['end'] = endTime
data_dict['reference_designator'] = reference_designator
data_dict['stream_name'] = stream_name
data_dict['variables'] = []
data_dict['variable_types'] = {}
data_dict['units'] = {}
data_dict['variables_shape'] = {}
data_dict['display_name'] = get_display_name_by_rd(ref)
data_dict['download'] = {
"csv":"/".join(['api/uframe/get_csv', stream_name, ref]),
"json":"/".join(['api/uframe/get_json', stream_name, ref]),
"netcdf":"/".join(['api/uframe/get_netcdf', stream_name, ref]),
"profile":"/".join(['api/uframe/get_profiles', stream_name, ref])
}
data_dict['variables'] = variables
data_dict['variable_type'] = variable_type
data_dict['units'] = units
data_dict['variables_shape'] = variables_shape
data_dict['parameter_id'] = parameter_id
return data_dict
@api.route('/stream')
#@auth.login_required
def streams_list():
'''
Accepts stream_name or reference_designator as a URL argument
'''
if request.args.get('stream_name'):
try:
dict_from_stream(request.args.get('stream_name'))
except Exception as e:
current_app.logger.exception('**** (1) exception: ' + e.message)
return jsonify(error=e.message), 500
cached = cache.get('stream_list')
if cached:
retval = cached
else:
try:
streams = dfs_streams()
except Exception as e:
current_app.logger.exception('**** (2) exception: ' + e.message)
return jsonify(error=e.message), 500
retval = []
for stream in streams:
try:
data_dict = dict_from_stream(*stream)
except Exception as e:
current_app.logger.exception('\n**** (3) exception: ' + e.message)
continue
if request.args.get('reference_designator'):
if request.args.get('reference_designator') != data_dict['reference_designator']:
continue
retval.append(data_dict)
cache.set('stream_list', retval, timeout=CACHE_TIMEOUT)
if request.args.get('min') == 'True':
for obj in retval:
try:
del obj['parameter_id']
del obj['units']
del obj['variable_type']
del obj['variable_types']
del obj['variables']
del obj['variables_shape']
except KeyError:
raise
if request.args.get('search') and request.args.get('search') != "":
return_list = []
search_term = request.args.get('search')
for item in retval:
if search_term.lower() in str(item['stream_name']).lower():
return_list.append(item)
if search_term.lower() in str(item['display_name']).lower():
return_list.append(item)
if search_term.lower() in str(item['reference_designator']).lower():
return_list.append(item)
retval = return_list
if request.args.get('startAt'):
start_at = int(request.args.get('startAt'))
count = int(request.args.get('count'))
total = int(len(retval))
retval_slice = retval[start_at:(start_at + count)]
result = jsonify({"count": count,
"total": total,
"startAt": start_at,
"streams": retval_slice})
return result
else:
return jsonify(streams=retval)
#@cache.memoize(timeout=3600)
@auth.login_required
@api.route('/get_glider_track/<string:ref>')
def get_uframe_glider_track(ref):
'''
Given a reference designator, returns a GeoJSON LineString for glider
tracks
'''
# we will always want the telemetered data, and the engineering stream
# data should reside in the same place
res = get_json('telemetered_glider_eng_telemetered', ref)
try:
if res.status_code == 200:
res_arr = json.loads(res.data)['data']
# load the JSON into a shapely LineString.
track = LineString([(pt['m_gps_lon'], pt['m_gps_lat'])
for pt in res_arr if pt['m_gps_lon'] != 'NaN'
and pt['m_gps_lat'] != 'NaN'])
# serialize the Python object of containing tracks to GeoJSON
return Response(json.dumps(track),
mimetype='application/json')
else:
# if not a valid response, attempt to return the response as is.
return Response(json.dumps({'type': "LineString",'coordinates':[],'note':'invalid status code'}),
mimetype='application/json')
#return res.text, res.status_code, res.headers.items()
except AttributeError:
#return nothing
return Response(json.dumps({'type': "LineString",'coordinates':[],'note':'AttributeError'}),
mimetype='application/json')
#@cache.memoize(timeout=3600)
def get_uframe_streams(mooring, platform, instrument, stream_type):
'''
Lists all the streams
'''
try:
uframe_url, timeout, timeout_read = get_uframe_info()
url = '/'.join([uframe_url, mooring, platform, instrument, stream_type])
current_app.logger.info("GET %s", url)
response = requests.get(url, timeout=(timeout, timeout_read))
return response
except Exception as e:
return internal_server_error('uframe connection cannot be made.' + str(e.message))
#@cache.memoize(timeout=3600)
def get_uframe_stream(mooring, platform, instrument, stream):
'''
Lists the reference designators for the streams
'''
try:
uframe_url, timeout, timeout_read = get_uframe_info()
url = "/".join([uframe_url, mooring, platform, instrument, stream])
current_app.logger.info("GET %s", url)
response = requests.get(url, timeout=(timeout, timeout_read))
return response
except Exception as e:
#return internal_server_error('uframe connection cannot be made.' + str(e.message))
return _response_internal_server_error()
def get_uframe_toc():
uframe_url = current_app.config['UFRAME_URL'] + current_app.config['UFRAME_TOC']
r = requests.get(uframe_url)
if r.status_code == 200:
d = r.json()
for row in d:
try:
# FIX FOR THE WRONG WAY ROUND
temp1 = row['platform_code']
temp2 = row['mooring_code']
row['mooring_code'] = temp1
row['platform_code'] = temp2
#
instrument_display_name = PlatformDeployment._get_display_name(row['reference_designator'])
split_name = instrument_display_name.split(' - ')
row['instrument_display_name'] = split_name[-1]
row['mooring_display_name'] = split_name[0]
row['platform_display_name'] = split_name[1]
except:
row['instrument_display_name'] = ""
row['platform_display_name'] = ""
row['mooring_display_name'] = ""
return d
else:
return []
@api.route('/get_structured_toc')
@cache.memoize(timeout=1600)
def get_structured_toc():
try:
mooring_list = []
mooring_key = []
platform_list = []
platform_key = []
instrument_list = []
instrument_key = []
data = get_uframe_toc()
for d in data:
if d['reference_designator'] not in instrument_key:
instrument_list.append({'array_code':d['reference_designator'][0:2],
'display_name': d['instrument_display_name'],
'mooring_code': d['mooring_code'],
'platform_code': d['platform_code'],
'instrument_code': d['platform_code'],
'streams':d['streams'],
'instrument_parameters':d['instrument_parameters'],
'reference_designator':d['reference_designator']
})
instrument_key.append(d['reference_designator'])
if d['mooring_code'] not in mooring_key:
mooring_list.append({'array_code':d['reference_designator'][0:2],
'mooring_code':d['mooring_code'],
'platform_code':d['platform_code'],
'display_name':d['mooring_display_name'],
'geo_location':[],
'reference_designator':d['mooring_code']
})
mooring_key.append(d['mooring_code'])
if d['mooring_code']+d['platform_code'] not in platform_key:
platform_list.append({'array_code':d['reference_designator'][0:2],
'platform_code':d['platform_code'],
'mooring_code':d['mooring_code'],
'reference_designator':d['reference_designator'],
'display_name': d['platform_display_name']
})
platform_key.append(d['mooring_code']+d['platform_code'])
return jsonify(toc={"moorings":mooring_list,
"platforms":platform_list,
"instruments":instrument_list
})
except Exception as e:
return internal_server_error('uframe connection cannot be made.' + str(e.message))
@api.route('/get_toc')
@cache.memoize(timeout=1600)
def get_toc():
try:
data = get_uframe_toc()
return jsonify(toc=data)
except Exception as e:
return internal_server_error('uframe connection cannot be made.' + str(e.message))
@api.route('/get_instrument_metadata/<string:ref>', methods=['GET'])
#@cache.memoize(timeout=3600)
def get_uframe_instrument_metadata(ref):
'''
Returns the uFrame metadata response for a given stream
'''
try:
mooring, platform, instrument = ref.split('-', 2)
uframe_url, timeout, timeout_read = get_uframe_info()
url = "/".join([uframe_url, mooring, platform, instrument, 'metadata'])
response = requests.get(url, timeout=(timeout, timeout_read))
if response.status_code == 200:
data = response.json()
return jsonify(metadata=data['parameters'])
return jsonify(metadata={}), 404
except Exception as e:
return internal_server_error('uframe connection cannot be made.' + str(e.message))
@api.route('/get_metadata_parameters/<string:ref>', methods=['GET'])
#@cache.memoize(timeout=3600)
def get_uframe_instrument_metadata_parameters(ref):
'''
Returns the uFrame metadata parameters for a given stream
'''
try:
mooring, platform, instrument = ref.split('-', 2)
uframe_url, timeout, timeout_read = get_uframe_info()
url = "/".join([uframe_url, mooring, platform, instrument, 'metadata', 'parameters'])
#current_app.logger.info("GET %s", url)
response = requests.get(url, timeout=(timeout, timeout_read))
return response
except:
return _response_internal_server_error()
def _response_internal_server_error(msg=None):
message = json.dumps('"error" : "uframe connection cannot be made."')
if msg:
message = json.dumps(msg)
response = make_response()
response.content = message
response.status_code = 500
response.headers["Content-Type"] = "application/json"
return response
@auth.login_required
@api.route('/get_metadata_times/<string:ref>', methods=['GET'])
#@cache.memoize(timeout=3600)
def get_uframe_stream_metadata_times(ref):
'''
Returns the uFrame time bounds response for a given stream
'''
mooring, platform, instrument = ref.split('-', 2)
try:
uframe_url, timeout, timeout_read = get_uframe_info()
url = "/".join([uframe_url, mooring, platform, instrument, 'metadata','times'])
#current_app.logger.info("GET %s", url)
response = requests.get(url, timeout=(timeout, timeout_read))
if response.status_code == 200:
return response
return jsonify(times={}), 200
except Exception as e:
return internal_server_error('uframe connection cannot be made.' + str(e.message))
#@cache.memoize(timeout=3600)
#DEPRECATED
def get_uframe_stream_contents(mooring, platform, instrument, stream_type, stream, start_time, end_time, dpa_flag, provenance='false', annotations='false'):
"""
Gets the bounded stream contents, start_time and end_time need to be datetime objects; returns Respnse object.
"""
try:
if dpa_flag == '0':
query = '?beginDT=%s&endDT=%s&include_provenance=%s&include_annotations=%s' % (start_time, end_time, provenance, annotations)
else:
query = '?beginDT=%s&endDT=%s&include_provenance=%s&include_annotations=%s&execDPA=true' % (start_time, end_time, provenance, annotations)
uframe_url, timeout, timeout_read = get_uframe_info()
url = "/".join([uframe_url, mooring, platform, instrument, stream_type, stream + query])
current_app.logger.debug('***** url: ' + url)
response = requests.get(url, timeout=(timeout, timeout_read))
if not response:
raise Exception('No data available from uFrame for this request.')
if response.status_code != 200:
raise Exception('(%s) failed to retrieve stream contents from uFrame', response.status_code)
#pass
return response
except Exception as e:
return internal_server_error('uFrame connection cannot be made. ' + str(e.message))
def get_uframe_plot_contents_chunked(mooring, platform, instrument, stream_type, stream, start_time, end_time, dpa_flag, parameter_ids):
'''
Gets the bounded stream contents, start_time and end_time need to be datetime objects
'''
try:
if dpa_flag == '0' and len(parameter_ids)<1:
query = '?beginDT=%s&endDT=%s&limit=%s' % (start_time, end_time, current_app.config['DATA_POINTS'])
elif dpa_flag == '1' and len(parameter_ids)<1:
query = '?beginDT=%s&endDT=%s&limit=%s&execDPA=true' % (start_time, end_time, current_app.config['DATA_POINTS'])
elif dpa_flag == '0' and len(parameter_ids)>0:
query = '?beginDT=%s&endDT=%s&limit=%s¶meters=%s' % (start_time, end_time, current_app.config['DATA_POINTS'], ','.join(parameter_ids))
elif dpa_flag == '1' and len(parameter_ids)>0:
query = '?beginDT=%s&endDT=%s&limit=%s&execDPA=true¶meters=%s' % (start_time, end_time, current_app.config['DATA_POINTS'], ','.join(map(str, parameter_ids)))
GA_URL = current_app.config['GOOGLE_ANALYTICS_URL']+'&ec=plot&ea=%s&el=%s' % ('-'.join([mooring, platform, instrument, stream]), '-'.join([start_time, end_time]))
UFRAME_DATA = current_app.config['UFRAME_URL'] + current_app.config['UFRAME_URL_BASE']
url = "/".join([UFRAME_DATA,mooring, platform, instrument, stream_type, stream + query])
print "***:",url
TOO_BIG = 1024 * 1024 * 15 # 15MB
CHUNK_SIZE = 1024 * 32 #...KB
TOTAL_SECONDS = 20
dataBlock = ""
idx = 0
#counter
t0 = time.time()
with closing(requests.get(url,stream=True)) as response:
content_length = 0
for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
content_length = content_length + CHUNK_SIZE
t1 = time.time()
total = t1-t0
idx+=1
if content_length > TOO_BIG or total > TOTAL_SECONDS:
#('uframe response to large.')
# break it down to the last know good spot
t00 = time.time()
idx_c = dataBlock.rfind('}, {')
dataBlock = dataBlock[:idx_c]
dataBlock+="}\n]"
t11 = time.time()
totaln = t11-t00
print "size_limit or time reached", content_length/(1024),total,idx
return json.loads(dataBlock),200
# all the data is in the resonse return it as normal
#previousBlock = dataBlock
dataBlock+=chunk
#print "transfer complete",content_length/(1024 * 1024),total
#if str(dataBlock[-3:-1]) != '} ]':
# idx_c = dataBlock.rfind('}')
# dataBlock = dataBlock[:idx_c]
# dataBlock+="} ]"
# print 'uFrame appended Error Message to Stream',"\n",dataBlock[-3:-1]
idx_c = dataBlock.rfind('}\n]')
print idx_c
if idx_c == -1:
dataBlock+="]"
urllib2.urlopen(GA_URL)
return json.loads(dataBlock),200
except Exception,e:
#return json.loads(dataBlock), 200
print str(e)
return internal_server_error('uframe connection unstable. '+str(e)),500
def get_uframe_stream_contents_chunked(mooring, platform, instrument, stream_type, stream, start_time, end_time, dpa_flag):
'''
Gets the bounded stream contents, start_time and end_time need to be datetime objects
'''
try:
if dpa_flag == '0':
query = '?beginDT=%s&endDT=%s' % (start_time, end_time)
else:
query = '?beginDT=%s&endDT=%s&execDPA=true' % (start_time, end_time)
UFRAME_DATA = current_app.config['UFRAME_URL'] + current_app.config['UFRAME_URL_BASE']
url = "/".join([UFRAME_DATA,mooring, platform, instrument, stream_type, stream + query])
print "***:",url
TOO_BIG = 1024 * 1024 * 15 # 15MB
CHUNK_SIZE = 1024 * 32 #...KB
TOTAL_SECONDS = 20
dataBlock = ""
idx = 0
#counter
t0 = time.time()
with closing(requests.get(url,stream=True)) as response:
content_length = 0
for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
content_length = content_length + CHUNK_SIZE
t1 = time.time()
total = t1-t0
idx+=1
if content_length > TOO_BIG or total > TOTAL_SECONDS:
#('uframe response to large.')
# break it down to the last know good spot
t00 = time.time()
idx_c = dataBlock.rfind('}, {')
dataBlock = dataBlock[:idx_c]
dataBlock+="} ]"
t11 = time.time()
totaln = t11-t00
print "size_limit or time reached",content_length/(1024 * 1024),total,totaln,idx
return json.loads(dataBlock),200
# all the data is in the resonse return it as normal
#previousBlock = dataBlock
dataBlock+=chunk
#print "transfer complete",content_length/(1024 * 1024),total
#if str(dataBlock[-3:-1]) != '} ]':
# idx_c = dataBlock.rfind('}')
# dataBlock = dataBlock[:idx_c]
# dataBlock+="} ]"
# print 'uFrame appended Error Message to Stream',"\n",dataBlock[-3:-1]
idx_c = dataBlock.rfind('} ]')
if idx_c == -1:
dataBlock+="]"
return json.loads(dataBlock),200
except Exception,e:
#return json.loads(dataBlock), 200
return internal_server_error('uframe connection unstable.'),500
def get_uframe_info():
'''
returns uframe configuration information. (uframe_url, uframe timeout_connect and timeout_read.)
'''
uframe_url = current_app.config['UFRAME_URL'] + current_app.config['UFRAME_URL_BASE']
timeout = current_app.config['UFRAME_TIMEOUT_CONNECT']
timeout_read = current_app.config['UFRAME_TIMEOUT_READ']
return uframe_url, timeout, timeout_read
def validate_date_time(start_time, end_time):
'''
uframe_data_request_limit = int(current_app.config['UFRAME_DATA_REQUEST_LIMIT'])/1440
new_end_time_strp = datetime.datetime.strptime(start_time, "%Y-%m-%dT%H:%M:%S.%fZ") + datetime.timedelta(days=uframe_data_request_limit)
old_end_time_strp = datetime.datetime.strptime(end_time, "%Y-%m-%dT%H:%M:%S.%fZ")
new_end_time = datetime.datetime.strftime(new_end_time_strp, "%Y-%m-%dT%H:%M:%S.%fZ")
if old_end_time_strp > new_end_time_strp:
end_time = new_end_time
'''
return end_time
@auth.login_required
@api.route('/get_csv/<string:stream>/<string:ref>/<string:start_time>/<string:end_time>/<string:dpa_flag>',methods=['GET'])
def get_csv(stream, ref,start_time,end_time,dpa_flag):
mooring, platform, instrument = ref.split('-', 2)
stream_type, stream = stream.split('_', 1)
#figures out if its in a date time range
end_time = validate_date_time(start_time, end_time)
data = get_uframe_stream_contents(mooring, platform, instrument, stream_type, stream, start_time, end_time, dpa_flag)
try:
GA_URL = current_app.config['GOOGLE_ANALYTICS_URL']+'&ec=download_csv&ea=%s&el=%s' % ('-'.join([mooring, platform, instrument, stream]), '-'.join([start_time, end_time]))
urllib2.urlopen(GA_URL)
except KeyError:
pass
if data.status_code != 200:
return data, data.status_code, dict(data.headers)
output = io.BytesIO()
data = data.json()
f = csv.DictWriter(output, fieldnames = data[0].keys())
f.writeheader()
for row in data:
f.writerow(row)
filename = '-'.join([stream, ref])
buf = output.getvalue()
returned_csv = make_response(buf)
returned_csv.headers["Content-Disposition"] = "attachment; filename=%s.csv" % filename
returned_csv.headers["Content-Type"] = "text/csv"
output.close()
return returned_csv
@auth.login_required
@api.route('/get_json/<string:stream>/<string:ref>/<string:start_time>/<string:end_time>/<string:dpa_flag>/<string:provenance>/<string:annotations>',methods=['GET'])
def get_json(stream, ref, start_time, end_time, dpa_flag, provenance, annotations):
mooring, platform, instrument = ref.split('-', 2)
stream_type, stream = stream.split('_', 1)
#figures out if its in a date time range
end_time = validate_date_time(start_time, end_time)
data = get_uframe_stream_contents(mooring, platform, instrument, stream_type, stream, start_time, end_time, dpa_flag, provenance, annotations)
try:
GA_URL = current_app.config['GOOGLE_ANALYTICS_URL']+'&ec=download_json&ea=%s&el=%s' % ('-'.join([mooring, platform, instrument, stream]), '-'.join([start_time, end_time]))
urllib2.urlopen(GA_URL)
except KeyError:
pass
if data.status_code != 200:
return data, data.status_code, dict(data.headers)
response = '{"data":%s}' % data.content
filename = '-'.join([stream,ref])
returned_json = make_response(response)
returned_json.headers["Content-Disposition"] = "attachment; filename=%s.json"%filename
returned_json.headers["Content-Type"] = "application/json"
return returned_json
@auth.login_required
@api.route('/get_netcdf/<string:stream>/<string:ref>/<string:start_time>/<string:end_time>/<string:dpa_flag>/<string:provenance>/<string:annotations>', methods=['GET'])
def get_netcdf(stream, ref, start_time, end_time, dpa_flag, provenance, annotations):
mooring, platform, instrument = ref.split('-', 2)
stream_type, stream = stream.split('_', 1)
try:
GA_URL = current_app.config['GOOGLE_ANALYTICS_URL']+'&ec=download_netcdf&ea=%s&el=%s' % ('-'.join([mooring, platform, instrument, stream]), '-'.join([start_time, end_time]))
urllib2.urlopen(GA_URL)
except KeyError:
pass
uframe_url, timeout, timeout_read = get_uframe_info()
# url = '/'.join([uframe_url, mooring, platform, instrument, stream_type, stream, start_time, end_time, dpa_flag])
if dpa_flag == '0':
query = '?beginDT=%s&endDT=%s&include_provenance=%s&include_annotations=%s' % (start_time, end_time, provenance, annotations)
else:
query = '?beginDT=%s&endDT=%s&include_provenance=%s&include_annotations=%s&execDPA=true' % (start_time, end_time, provenance, annotations)
query += '&format=application/netcdf'
uframe_url, timeout, timeout_read = get_uframe_info()
url = "/".join([uframe_url, mooring, platform, instrument, stream_type, stream + query])
current_app.logger.debug('***** url: ' + url)
response = requests.get(url, timeout=(timeout, timeout_read))
if response.status_code != 200:
return response.text, response.status_code
filename = '-'.join([stream, ref])
buf = response.content
returned_netcdf = make_response(buf)
returned_netcdf.headers["Content-Disposition"] = "attachment; filename=%s.zip" % filename
returned_netcdf.headers["Content-Type"] = "application/zip"
return returned_netcdf
#@auth.login_required
@api.route('/get_data/<string:instrument>/<string:stream>/<string:yvar>/<string:xvar>', methods=['GET'])
def get_data_api(stream, instrument, yvar, xvar):
# return if error
try:
xvar = xvar.split(',')
yvar = yvar.split(',')
resp_data,units = get_simple_data(stream, instrument, yvar, xvar)
except Exception as err:
return jsonify(error='%s' % str(err.message)), 400
return jsonify(data=resp_data,units=units)
@auth.login_required
@api.route('/plot/<string:instrument>/<string:stream>', methods=['GET'])
def get_svg_plot(instrument, stream):
# from ooiservices.app.uframe.controller import split_stream_name
# Ok first make a list out of stream and instrument
instrument = instrument.split(',')
#instrument.append(instrument[0])
stream = stream.split(',')
#stream.append(stream[0])
plot_format = request.args.get('format', 'svg')
# time series vs profile
plot_layout = request.args.get('plotLayout', 'timeseries')
xvar = request.args.get('xvar', 'time')
yvar = request.args.get('yvar', None)
# There can be multiple variables so get into a list
xvar = xvar.split(',')
yvar = yvar.split(',')
if len(instrument) == len(stream):
pass # everything the same
else:
instrument = [instrument[0]]
stream = [stream[0]]
yvar = [yvar[0]]
xvar = [xvar[0]]
# create bool from request
# use_line = to_bool(request.args.get('line', True))
use_scatter = to_bool(request.args.get('scatter', True))
use_event = to_bool(request.args.get('event', True))
qaqc = int(request.args.get('qaqc', 0))
# Get Events!
events = {}
if use_event:
try:
response = _get_events_by_ref_des(instrument[0])
events = json.loads(response.data)
except Exception as err:
current_app.logger.exception(str(err.message))
return jsonify(error=str(err.message)), 400
profileid = request.args.get('profileId', None)
# need a yvar for sure
if yvar is None:
return jsonify(error='Error: yvar is required'), 400
height = float(request.args.get('height', 100)) # px
width = float(request.args.get('width', 100)) # px
# do conversion of the data from pixels to inches for plot
height_in = height / 96.
width_in = width / 96.
# get the data from uFrame
try:
if plot_layout == "depthprofile":
data = get_process_profile_data(stream[0], instrument[0], yvar[0], xvar[0])
else:
if len(instrument) == 1:
data = get_data(stream[0], instrument[0], yvar, xvar)
elif len(instrument) > 1: # Multiple datasets
data = []
for idx, instr in enumerate(instrument):
stream_data = get_data(stream[idx], instr, [yvar[idx]], [xvar[idx]])
data.append(stream_data)
except Exception as err:
current_app.logger.exception(str(err.message))
return jsonify(error=str(err.message)), 400
if not data:
return jsonify(error='No data returned for %s' % plot_layout), 400
# return if error
if 'error' in data or 'Error' in data:
return jsonify(error=data['error']), 400
# generate plot
some_tuple = ('a', 'b')
if str(type(data)) == str(type(some_tuple)) and plot_layout == "depthprofile":
return jsonify(error='tuple data returned for %s' % plot_layout), 400
if isinstance(data, dict):
# get title
title = PlatformDeployment._get_display_name(instrument[0])
if len(title) > 50:
title = ''.join(title.split('-')[0:-1]) + '\n' + title.split('-')[-1]
data['title'] = title
data['height'] = height_in
data['width'] = width_in
else:
for idx, streamx in enumerate(stream):
title = PlatformDeployment._get_display_name(instrument[idx])
if len(title) > 50:
title = ''.join(title.split('-')[0:-1]) + '\n' + title.split('-')[-1]
data[idx]['title'] = title
data[idx]['height'] = height_in
data[idx]['width'] = width_in
plot_options = {'plot_format': plot_format,
'plot_layout': plot_layout,
'use_scatter': use_scatter,
'events': events,
'profileid': profileid,
'width_in': width_in,
'use_qaqc': qaqc,
'st_date': request.args['startdate'],
'ed_date': request.args['enddate']}
try:
buf = generate_plot(data, plot_options)
content_header_map = {
'svg' : 'image/svg+xml',
'png' : 'image/png'
}
return buf.read(), 200, {'Content-Type': content_header_map[plot_format]}
except Exception as err:
current_app.logger.exception(str(err.message))
return jsonify(error='Error generating {0} plot: {1}'.format(plot_options['plot_layout'], str(err.message))), 400
def get_process_profile_data(stream, instrument, xvar, yvar):
'''
NOTE: i have to swap the inputs (xvar, yvar) around at this point to get the plot to work....
'''
try:
join_name ='_'.join([str(instrument), str(stream)])
mooring, platform, instrument, stream_type, stream = split_stream_name(join_name)
parameter_ids, y_units, x_units = find_parameter_ids(mooring, platform, instrument, [yvar], [xvar])
data = get_profile_data(mooring, platform, instrument, stream_type, stream, parameter_ids)
if not data or data == None:
raise Exception('profiles not present in data')
except Exception as e:
raise Exception('%s' % str(e.message))
'''
# check the data is in the first row
if yvar not in data[0] or xvar not in data[0]:
data = {'error':'requested fields not in data'}
return data
if 'profile_id' not in data[0]:
data = {'error':'profiles not present in data'}
return data
'''
y_data = []
x_data = []
time = []
profile_id_list = []
profile_count = -1
for i, row in enumerate(data):
if (row['profile_id']) >= 0:
profile_id = int(row['profile_id'])
if profile_id not in profile_id_list:
y_data.append([])
x_data.append([])
time.append(float(row['pk']['time']))
profile_id_list.append(profile_id)
profile_count += 1
try:
y_data[profile_count].append(row[yvar])
x_data[profile_count].append(row[xvar])
except Exception as e:
raise Exception('profiles not present in data')
return {'x': x_data, 'y': y_data, 'x_field': xvar, "y_field": yvar, 'time': time}
def get_profile_data(mooring, platform, instrument, stream_type, stream, parameter_ids):
'''
process uframe data into profiles
'''
try:
data = []
if 'startdate' in request.args and 'enddate' in request.args:
st_date = request.args['startdate']
ed_date = request.args['enddate']
if 'dpa_flag' in request.args:
dpa_flag = request.args['dpa_flag']
else:
dpa_flag = "0"
ed_date = validate_date_time(st_date, ed_date)
data, status_code = get_uframe_plot_contents_chunked(mooring, platform, instrument, stream_type, stream, st_date, ed_date, dpa_flag, parameter_ids)
else:
message = 'Failed to make plot - start end dates not applied'
current_app.logger.exception(message)
raise Exception(message)
if status_code != 200:
raise IOError("uFrame unable to get data for this request.")
current_app.logger.debug('\n --- retrieved data from uframe for profile processing...')
# Note: assumes data has depth and time is ordinal
# Need to add assertions and try and exceptions to check data
time = []
depth = []
request_xvar = None
if request.args['xvar']:
junk = request.args['xvar']
test_request_xvar = junk.encode('ascii','ignore')
if type(test_request_xvar) == type(''):
if ',' in test_request_xvar:
chunk_request_var = test_request_xvar.split(',',1)
if len(chunk_request_var) > 0:
request_xvar = chunk_request_var[0]
else:
request_xvar = test_request_xvar
else:
message = 'Failed to make plot - no xvar provided in request'
current_app.logger.exception(message)
raise Exception(message)
if not request_xvar:
message = 'Failed to make plot - unable to process xvar provided in request'
current_app.logger.exception(message)
raise Exception(message)
for row in data:
depth.append(int(row[request_xvar]))
time.append(float(row['pk']['time']))
matrix = np.column_stack((time, depth))
tz = matrix
origTz = tz
INT = 10
# tz length must equal profile_list length
# maxi = np.amax(tz[:, 0])
# mini = np.amin(tz[:, 0])
# getting a range from min to max time with 10 seconds or milliseconds. I have no idea.
ts = (np.arange(np.amin(tz[:, 0]), np.amax(tz[:, 0]), INT)).T
# interpolation adds additional points on the line within f(t), f(t+1) time is a function of depth
itz = np.interp(ts, tz[:, 0], tz[:, 1])
newtz = np.column_stack((ts, itz))
# 5 unit moving average
WINDOW = 5
weights = np.repeat(1.0, WINDOW) / WINDOW
ma = np.convolve(newtz[:, 1], weights)[WINDOW-1:-(WINDOW-1)]
# take the diff and change negatives to -1 and postives to 1
dZ = np.sign(np.diff(ma))
# repeat for second derivative
dZ = np.convolve(dZ, weights)[WINDOW-1:-(WINDOW-1)]
dZ = np.sign(dZ)
r0 = 1
r1 = len(dZ) + 1
dZero = np.diff(dZ)
start = []
stop = []
# find where the slope changes
dr = [start.append(i) for (i, val) in enumerate(dZero) if val != 0]
if len(start) == 0:
raise Exception('Unable to determine where slope changes.')
for i in range(len(start)-1):
stop.append(start[i+1])
stop.append(start[0])
start_stop = np.column_stack((start, stop))
start_times = np.take(newtz[:, 0], start)
stop_times = np.take(newtz[:, 0], stop)
start_times = start_times - INT*2
stop_times = stop_times + INT*2
depth_profiles = []
for i in range(len(start_times)):
profile_id = i
proInds = origTz[(origTz[:, 0] >= start_times[i]) & (origTz[:, 0] <= stop_times[i])]
value = proInds.shape[0]
z = np.full((value, 1), profile_id)
pro = np.append(proInds, z, axis=1)
depth_profiles.append(pro)
depth_profiles = np.concatenate(depth_profiles)
# I NEED to CHECK FOR DUPLICATE TIMES !!!!! NOT YET DONE!!!!
# Start stop times may result in over laps on original data set. (see function above)
# May be an issue, requires further enquiry
profile_list = []
for row in data:
try:
# Need to add epsilon. Floating point error may occur
where = np.argwhere(depth_profiles == float(row['pk']['time']))
index = where[0]
rowloc = index[0]
if len(where) and int(row[request_xvar]) == depth_profiles[rowloc][1]:
row['profile_id'] = depth_profiles[rowloc][2]
profile_list.append(row)
except IndexError:
row['profile_id'] = None
profile_list.append(row)
except Exception as err:
raise Exception('%s' % str(err.message))
# profile length should equal tz length
return profile_list
except Exception as err:
current_app.logger.exception('\n* (pass) exception: ' + str(err.message))
# @auth.login_required
@api.route('/get_profiles/<string:stream>/<string:instrument>', methods=['GET'])
def get_profiles(stream, instrument):
filename = '-'.join([stream, instrument, "profiles"])
content_headers = {'Content-Type': 'application/json', 'Content-Disposition': "attachment; filename=%s.json" % filename}
try:
profiles = get_profile_data(instrument, stream)
except Exception as e:
return jsonify(error=e.message), 400, content_headers
if profiles is None:
return jsonify(), 204, content_headers
return jsonify(profiles=profiles), 200, content_headers
def make_cache_key():
return urlencode(request.args)
def to_bool(value):
"""
Converts 'something' to boolean. Raises exception for invalid formats
Possible True values: 1, True, "1", "TRue", "yes", "y", "t"
Possible False values: 0, False, None, [], {}, "", "0", "faLse", "no", "n", "f", 0.0, ...
"""
if str(value).lower() in ("yes", "y", "true", "t", "1"):
return True
if str(value).lower() in ("no", "n", "false", "f", "0", "0.0", "", "none", "[]", "{}"):
return False
raise Exception('Invalid value for boolean conversion: ' + str(value))
def to_bool_str(value):
"""
Converts 'something' to boolean. Raises exception for invalid formats
Possible True values: 1, True, "1", "TRue", "yes", "y", "t"
Possible False values: 0, False, None, [], {}, "", "0", "faLse", "no", "n", "f", 0.0, ...
"""
if str(value).lower() in ("yes", "y", "true", "t", "1"):
return "1"
if str(value).lower() in ("no", "n", "false", "f", "0", "0.0", "", "none", "[]", "{}"):
return "0"
raise Exception('Invalid value for boolean conversion: ' + str(value))
| birdage/ooi-ui-services | ooiservices/app/uframe/controller.py | Python | apache-2.0 | 44,986 | [
"NetCDF"
] | 87c7e98db2a2e2e576f6fdbb5de19c142159b9de6296f31c9fb73e3681cc1736 |
# -*- coding: utf-8 -*-
#########################################################################
## rdesigneur0_5.py ---
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU General Public License version 2 or later.
## See the file COPYING.LIB for the full notice.
#########################################################################
##########################################################################
## This class builds models of
## Reaction-Diffusion and Electrical SIGnaling in NEURons.
## It loads in neuronal and chemical signaling models and embeds the
## latter in the former, including mapping entities like calcium and
## channel conductances, between them.
##########################################################################
from __future__ import print_function
from __future__ import absolute_import
import imp
import os
import moose
import numpy as np
import pylab
import math
import rdesigneur.rmoogli
from rdesigneur.rdesigneurProtos import *
from moose.neuroml.NeuroML import NeuroML
from moose.neuroml.ChannelML import ChannelML
try:
from lxml import etree
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5
import xml.etree.ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree
except ImportError:
print("Failed to import ElementTree from any known place")
import csv
#EREST_ACT = -70e-3
class BuildError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#######################################################################
class rdesigneur:
"""The rdesigneur class is used to build models incorporating
reaction-diffusion and electrical signaling in neurons.
Params:
useGssa: True/False for GSSA in spine and PSD
combineSegments: True/False for NeuroML models
diffusionLength: default 2e-6
adaptCa: [( Ca_wildcard_string, chem_wildcard_string, offset, scale ),...]
adaptChem: [( Chem_wildcard_string, elec_wildcard_string, offset, scale ),...]
I need to put the extra channels now into the NeuroML definition.
"""
################################################################
def __init__(self,
modelPath = '/model',
turnOffElec = False,
useGssa = True,
combineSegments = True,
stealCellFromLibrary = False,
diffusionLength= 2e-6,
meshLambda = -1.0, #This is a backward compatibility hack
temperature = 32,
chemDt= 0.001, # Much finer than MOOSE, for multiscale
diffDt= 0.001, # 10x finer than MOOSE, for multiscale
elecDt= 50e-6, # Same default as from MOOSE
chemPlotDt = 1.0, # Same default as from MOOSE
elecPlotDt = 0.1e-3, # Same default as from MOOSE
cellProto = [],
spineProto = [],
chanProto = [],
chemProto = [],
passiveDistrib= [],
spineDistrib= [],
chanDistrib = [],
chemDistrib = [],
adaptorList= [],
stimList = [],
plotList = [],
moogList = [],
params = None
):
""" Constructor of the rdesigner. This just sets up internal fields
for the model building, it doesn't actually create any objects.
"""
self.modelPath = modelPath
self.turnOffElec = turnOffElec
self.useGssa = useGssa
self.combineSegments = combineSegments
self.stealCellFromLibrary = stealCellFromLibrary
self.diffusionLength= diffusionLength
if meshLambda > 0.0:
print("Warning: meshLambda argument is deprecated. Please use 'diffusionLength' instead.\nFor now rdesigneur will accept this argument.")
self.diffusionLength = meshLambda
self.temperature = temperature
self.chemDt= chemDt
self.diffDt= diffDt
self.elecDt= elecDt
self.elecPlotDt= elecPlotDt
self.chemPlotDt= chemPlotDt
self.cellProtoList = cellProto
self.spineProtoList = spineProto
self.chanProtoList = chanProto
self.chemProtoList = chemProto
self.passiveDistrib = passiveDistrib
self.spineDistrib = spineDistrib
self.chanDistrib = chanDistrib
self.chemDistrib = chemDistrib
self.params = params
self.adaptorList = adaptorList
self.stimList = stimList
self.plotList = plotList
self.saveList = plotList #ADDED BY Sarthak
self.saveAs = []
self.moogList = moogList
self.plotNames = []
self.saveNames = []
self.moogNames = []
self.cellPortionElist = []
self.spineComptElist = []
self.tabForXML = []
if not moose.exists( '/library' ):
library = moose.Neutral( '/library' )
try:
self.buildCellProto()
self.buildChanProto()
self.buildSpineProto()
self.buildChemProto()
except BuildError as msg:
print("Error: rdesigneur: Prototype build failed:", msg)
quit()
################################################################
def _printModelStats( self ):
print("Rdesigneur: Elec model has",
self.elecid.numCompartments, "compartments and",
self.elecid.numSpines, "spines on",
len( self.cellPortionElist ), "compartments.")
if hasattr( self , 'chemid' ):
dmstoich = moose.element( self.dendCompt.path + '/stoich' )
print("Chem part of model has ",
self.dendCompt.mesh.num, "dendrite voxels X",
dmstoich.numAllPools, "pools,\n ")
if hasattr( self , 'spineCompt' ):
smstoich = moose.element( self.spineCompt.path + '/stoich')
pmstoich = moose.element( self.psdCompt.path + '/stoich' )
print(self.spineCompt.mesh.num, "spine voxels X",
smstoich.numAllPools, "pools,",
self.psdCompt.mesh.num, "psd voxels X",
pmstoich.numAllPools, "pools.")
def buildModel( self, modelPath = '/model' ):
if moose.exists( modelPath ):
print("rdesigneur::buildModel: Build failed. Model '",
modelPath, "' already exists.")
return
self.model = moose.Neutral( modelPath )
self.modelPath = modelPath
try:
# Protos made in the init phase. Now install the elec and
# chem protos on model.
self.installCellFromProtos()
# Now assign all the distributions
self.buildPassiveDistrib()
self.buildChanDistrib()
self.buildSpineDistrib()
self.buildChemDistrib()
self._configureSolvers()
self.buildAdaptors()
self._buildPlots()
self._buildMoogli()
self._buildStims()
self._configureClocks()
self._printModelStats()
self._savePlots()
except BuildError as msg:
print("Error: rdesigneur: model build failed:", msg)
moose.delete( self.model )
def installCellFromProtos( self ):
if self.stealCellFromLibrary:
moose.move( self.elecid, self.model )
if self.elecid.name != 'elec':
self.elecid.name = 'elec'
else:
moose.copy( self.elecid, self.model, 'elec' )
self.elecid = moose.element( self.model.path + '/elec' )
self.elecid.buildSegmentTree() # rebuild: copy has happened.
if hasattr( self, 'chemid' ):
self.validateChem()
if self.stealCellFromLibrary:
moose.move( self.chemid, self.model )
if self.chemid.name != 'chem':
self.chemid.name = 'chem'
else:
moose.copy( self.chemid, self.model, 'chem' )
self.chemid = moose.element( self.model.path + '/chem' )
ep = self.elecid.path
somaList = moose.wildcardFind( ep + '/#oma#[ISA=CompartmentBase]' )
if len( somaList ) == 0:
somaList = moose.wildcardFind( ep + '/#[ISA=CompartmentBase]' )
if len( somaList ) == 0:
raise BuildError( "installCellFromProto: No soma found" )
maxdia = 0.0
for i in somaList:
if ( i.diameter > maxdia ):
self.soma = i
################################################################
# Some utility functions for building prototypes.
################################################################
# Return true if it is a function.
def buildProtoFromFunction( self, func, protoName ):
bracePos = func.find( '()' )
if bracePos == -1:
return False
modPos = func.find( "." )
if ( modPos != -1 ): # Function is in a file, load and check
pathTokens = func[0:modPos].split('/')
pathTokens = ['/'] + pathTokens
modulePath = os.path.join(*pathTokens[:-1])
moduleName = pathTokens[-1]
funcName = func[modPos+1:bracePos]
moduleFile, pathName, description = imp.find_module(moduleName, [modulePath])
try:
module = imp.load_module(moduleName, moduleFile, pathName, description)
funcObj = getattr(module, funcName)
funcObj(protoName)
return True
finally:
moduleFile.close()
return False
if not func[0:bracePos] in globals():
raise BuildError( \
protoName + " Proto: global function '" +func+"' not known.")
globals().get( func[0:bracePos] )( protoName )
return True
# Class or file options. True if extension is found in
def isKnownClassOrFile( self, name, suffices ):
for i in suffices:
if name.rfind( '.'+i ) >= 0 :
return True
return False
# Checks all protos, builds them and return true. If it was a file
# then it has to return false and invite the calling function to build
# If it fails then the exception takes over.
def checkAndBuildProto( self, protoType, protoVec, knownClasses, knownFileTypes ):
if len(protoVec) != 2:
raise BuildError( \
protoType + "Proto: nargs should be 2, is " + \
str( len(protoVec) ))
if moose.exists( '/library/' + protoVec[1] ):
# Assume the job is already done, just skip it.
return True
'''
raise BuildError( \
protoType + "Proto: object /library/" + \
protoVec[1] + " already exists." )
'''
# Check and build the proto from a class name
if protoVec[0][:5] == 'moose':
protoName = protoVec[0][6:]
if self.isKnownClassOrFile( protoName, knownClasses ):
try:
getAttr( moose, protoName )( '/library/' + protoVec[1] )
except AttributeError:
raise BuildError( protoType + "Proto: Moose class '" \
+ protoVec[0] + "' not found." )
return True
if self.buildProtoFromFunction( protoVec[0], protoVec[1] ):
return True
# Maybe the proto is already in memory
# Avoid relative file paths going toward root
if protoVec[0][:3] != "../":
if moose.exists( protoVec[0] ):
moose.copy( protoVec[0], '/library/' + protoVec[1] )
return True
if moose.exists( '/library/' + protoVec[0] ):
#moose.copy('/library/' + protoVec[0], '/library/', protoVec[1])
print('renaming /library/' + protoVec[0] + ' to ' + protoVec[1])
moose.element( '/library/' + protoVec[0]).name = protoVec[1]
#moose.le( '/library' )
return True
# Check if there is a matching suffix for file type.
if self.isKnownClassOrFile( protoVec[0], knownFileTypes ):
return False
else:
raise BuildError( \
protoType + "Proto: File type '" + protoVec[0] + \
"' not known." )
return True
################################################################
# Here are the functions to build the type-specific prototypes.
################################################################
def buildCellProto( self ):
if len( self.cellProtoList ) == 0:
''' Make HH squid model sized compartment:
len and dia 500 microns. CM = 0.01 F/m^2, RA =
'''
self.elecid = makePassiveHHsoma( name = 'cell' )
assert( moose.exists( '/library/cell/soma' ) )
self.soma = moose.element( '/library/cell/soma' )
'''
self.elecid = moose.Neuron( '/library/cell' )
dia = 500e-6
self.soma = buildCompt( self.elecid, 'soma', dia, dia, 0.0,
0.33333333, 3000, 0.01 )
self.soma.initVm = -65e-3 # Resting of -65, from HH
self.soma.Em = -54.4e-3 # 10.6 mV above resting of -65, from HH
'''
for i in self.cellProtoList:
if self.checkAndBuildProto( "cell", i, \
["Compartment", "SymCompartment"], ["swc", "p", "nml", "xml"] ):
self.elecid = moose.element( '/library/' + i[1] )
else:
self._loadElec( i[0], i[1] )
self.elecid.buildSegmentTree()
def buildSpineProto( self ):
for i in self.spineProtoList:
if not self.checkAndBuildProto( "spine", i, \
["Compartment", "SymCompartment"], ["swc", "p", "nml", "xml"] ):
self._loadElec( i[0], i[1] )
def parseChanName( self, name ):
if name[-4:] == ".xml":
period = name.rfind( '.' )
slash = name.rfind( '/' )
if ( slash >= period ):
raise BuildError( "chanProto: bad filename:" + i[0] )
if ( slash < 0 ):
return name[:period]
else:
return name[slash+1:period]
def buildChanProto( self ):
for i in self.chanProtoList:
if len(i) == 1:
chanName = self.parseChanName( i[0] )
else:
chanName = i[1]
j = [i[0], chanName]
if not self.checkAndBuildProto( "chan", j, [], ["xml"] ):
cm = ChannelML( {'temperature': self.temperature} )
cm.readChannelMLFromFile( i[0] )
if ( len( i ) == 2 ):
chan = moose.element( '/library/' + chanName )
chan.name = i[1]
def buildChemProto( self ):
for i in self.chemProtoList:
if not self.checkAndBuildProto( "chem", i, \
["Pool"], ["g", "sbml", "xml" ] ):
self._loadChem( i[0], i[1] )
self.chemid = moose.element( '/library/' + i[1] )
################################################################
# Here we set up the distributions
################################################################
def buildPassiveDistrib( self ):
temp = []
for i in self.passiveDistrib:
temp.extend( i )
temp.extend( [""] )
self.elecid.passiveDistribution = temp
def buildChanDistrib( self ):
temp = []
for i in self.chanDistrib:
temp.extend( i )
temp.extend( [""] )
self.elecid.channelDistribution = temp
def buildSpineDistrib( self ):
# For uniformity and conciseness, we don't use a dictionary.
# ordering of spine distrib is
# name, path, spacing, spacingDistrib, size, sizeDistrib, angle, angleDistrib
# [i for i in L1 if i in L2]
# The first two args are compulsory, and don't need arg keys.
usageStr = 'Usage: name, path, [spacing, spacingDistrib, size, sizeDistrib, angle, angleDistrib]'
temp = []
defaults = ['spine', '#dend#,#apical#', '10e-6', '1e-6', '1', '0.5', '0', '6.2831853' ]
argKeys = ['spacing', 'spacingDistrib', 'size', 'sizeDistrib', 'angle', 'angleDistrib' ]
for i in self.spineDistrib:
if len(i) >= 2 :
arg = i[:2]
# Backward compat hack here
bcKeys = [ j for j in i[2:] if j in argKeys ]
if len( bcKeys ) > 0: # Looks like we have an old arg str
print('Rdesigneur::buildSpineDistrib: Warning: Deprecated argument format.\nWill accept for now.')
print(usageStr)
temp.extend( i + [''] )
elif len( i ) > len( defaults ):
print('Rdesigneur::buildSpineDistrib: Warning: too many arguments in spine definition')
print(usageStr)
else:
optArg = i[2:] + defaults[ len(i):]
assert( len( optArg ) == len( argKeys ) )
for j in zip( argKeys, optArg ):
arg.extend( [j[0], j[1]] )
temp.extend( arg + [''] )
self.elecid.spineDistribution = temp
def buildChemDistrib( self ):
for i in self.chemDistrib:
pair = i[1] + " " + i[3]
# Assign any other params. Possibly the first param should
# be a global scaling factor.
self.cellPortionElist = self.elecid.compartmentsFromExpression[ pair ]
if len( self.cellPortionElist ) == 0:
raise BuildError( \
"buildChemDistrib: No elec compartments found in path: '" \
+ pair + "'" )
self.spineComptElist = self.elecid.spinesFromExpression[ pair ]
'''
if len( self.spineComptElist ) == 0:
raise BuildError( \
"buildChemDistrib: No spine compartments found in path: '" \
+ pair + "'" )
'''
# Build the neuroMesh
# Check if it is good. Need to catch the ValueError here.
self._buildNeuroMesh()
# Assign the solvers
################################################################
# Here we set up the adaptors
################################################################
def findMeshOnName( self, name ):
pos = name.find( '/' )
if ( pos != -1 ):
temp = name[:pos]
if temp == 'psd' or temp == 'spine' or temp == 'dend':
return ( temp, name[pos+1:] )
return ("","")
def buildAdaptors( self ):
for i in self.adaptorList:
mesh, name = self.findMeshOnName( i[0] )
if mesh == "":
mesh, name = self.findMeshOnName( i[2] )
if mesh == "":
raise BuildError( "buildAdaptors: Failed for " + i[2] )
self._buildAdaptor( mesh, i[0], i[1], name, i[3], True, i[4], i[5] )
else:
self._buildAdaptor( mesh, i[2], i[3], name, i[1], False, i[4], i[5] )
################################################################
# Here we set up the plots. Dummy for cases that don't match conditions
################################################################
def _collapseElistToPathAndClass( self, comptList, path, className ):
dummy = moose.element( '/' )
ret = [ dummy ] * len( comptList )
j = 0
for i in comptList:
if moose.exists( i.path + '/' + path ):
obj = moose.element( i.path + '/' + path )
if obj.isA[ className ]:
ret[j] = obj
j += 1
return ret
# Returns vector of source objects, and the field to use.
# plotSpec is of the form
# [ region_wildcard, region_expr, path, field, title]
def _parseComptField( self, comptList, plotSpec, knownFields ):
# Put in stuff to go through fields if the target is a chem object
field = plotSpec[3]
if not field in knownFields:
print("Warning: Rdesigneur::_parseComptField: Unknown field '", field, "'")
return (), ""
kf = knownFields[field] # Find the field to decide type.
if ( kf[0] == 'CaConcBase' or kf[0] == 'ChanBase' or kf[0] == 'NMDAChan' ):
objList = self._collapseElistToPathAndClass( comptList, plotSpec[2], kf[0] )
# print ("objList: ", len(objList), kf[1])
return objList, kf[1]
elif (field == 'n' or field == 'conc' ):
path = plotSpec[2]
pos = path.find( '/' )
if pos == -1: # Assume it is in the dend compartment.
path = 'dend/' + path
pos = path.find( '/' )
chemCompt = path[:pos]
cc = moose.element( self.modelPath + '/chem/' + chemCompt)
voxelVec = []
if ( chemCompt == 'dend' ):
for i in comptList:
voxelVec.extend( cc.dendVoxelsOnCompartment[i] )
else:
em = cc.elecComptMap
elecComptMap = { moose.element(em[i]):i for i in range(len(em)) }
for i in comptList:
if i in elecComptMap:
voxelVec.extend( [ elecComptMap[i] ] )
# Here we collapse the voxelVec into objects to plot.
allObj = moose.vec( self.modelPath + '/chem/' + plotSpec[2] )
#print "####### allObj=", self.modelPath + '/chem/' + plotSpec[2]
if len( allObj ) >= len( voxelVec ):
objList = [ allObj[int(j)] for j in voxelVec]
else:
objList = []
print( "Warning: Rdesigneur::_parseComptField: unknown Object: '", plotSpec[2], "'" )
#print "############", chemCompt, len(objList), kf[1]
return objList, kf[1]
else:
return comptList, kf[1]
def _buildPlots( self ):
knownFields = {
'Vm':('CompartmentBase', 'getVm', 1000, 'Memb. Potential (mV)' ),
'Im':('CompartmentBase', 'getIm', 1e9, 'Memb. current (nA)' ),
'inject':('CompartmentBase', 'getInject', 1e9, 'inject current (nA)' ),
'Gbar':('ChanBase', 'getGbar', 1e9, 'chan max conductance (nS)' ),
'Gk':('ChanBase', 'getGk', 1e9, 'chan conductance (nS)' ),
'Ik':('ChanBase', 'getIk', 1e9, 'chan current (nA)' ),
'ICa':('NMDAChan', 'getICa', 1e9, 'Ca current (nA)' ),
'Ca':('CaConcBase', 'getCa', 1e3, 'Ca conc (uM)' ),
'n':('PoolBase', 'getN', 1, '# of molecules'),
'conc':('PoolBase', 'getConc', 1000, 'Concentration (uM)' )
}
graphs = moose.Neutral( self.modelPath + '/graphs' )
dummy = moose.element( '/' )
k = 0
for i in self.plotList:
pair = i[0] + " " + i[1]
dendCompts = self.elecid.compartmentsFromExpression[ pair ]
spineCompts = self.elecid.spinesFromExpression[ pair ]
plotObj, plotField = self._parseComptField( dendCompts, i, knownFields )
plotObj2, plotField2 = self._parseComptField( spineCompts, i, knownFields )
assert( plotField == plotField2 )
plotObj3 = plotObj + plotObj2
numPlots = sum( i != dummy for i in plotObj3 )
if numPlots > 0:
tabname = graphs.path + '/plot' + str(k)
scale = knownFields[i[3]][2]
units = knownFields[i[3]][3]
self.plotNames.append( ( tabname, i[4], k, scale, units ) )
k += 1
if i[3] == 'n' or i[3] == 'conc':
tabs = moose.Table2( tabname, numPlots )
else:
tabs = moose.Table( tabname, numPlots )
vtabs = moose.vec( tabs )
q = 0
for p in [ x for x in plotObj3 if x != dummy ]:
moose.connect( vtabs[q], 'requestOut', p, plotField )
q += 1
def _buildMoogli( self ):
knownFields = {
'Vm':('CompartmentBase', 'getVm', 1000, 'Memb. Potential (mV)', -80.0, 40.0 ),
'Im':('CompartmentBase', 'getIm', 1e9, 'Memb. current (nA)', -10.0, 10.0 ),
'inject':('CompartmentBase', 'getInject', 1e9, 'inject current (nA)', -10.0, 10.0 ),
'Gbar':('ChanBase', 'getGbar', 1e9, 'chan max conductance (nS)', 0.0, 1.0 ),
'Gk':('ChanBase', 'getGk', 1e9, 'chan conductance (nS)', 0.0, 1.0 ),
'Ik':('ChanBase', 'getIk', 1e9, 'chan current (nA)', -10.0, 10.0 ),
'ICa':('NMDAChan', 'getICa', 1e9, 'Ca current (nA)', -10.0, 10.0 ),
'Ca':('CaConcBase', 'getCa', 1e3, 'Ca conc (uM)', 0.0, 10.0 ),
'n':('PoolBase', 'getN', 1, '# of molecules', 0.0, 200.0 ),
'conc':('PoolBase', 'getConc', 1000, 'Concentration (uM)', 0.0, 2.0 )
}
moogliBase = moose.Neutral( self.modelPath + '/moogli' )
k = 0
for i in self.moogList:
kf = knownFields[i[3]]
pair = i[0] + " " + i[1]
dendCompts = self.elecid.compartmentsFromExpression[ pair ]
spineCompts = self.elecid.spinesFromExpression[ pair ]
dendObj, mooField = self._parseComptField( dendCompts, i, knownFields )
spineObj, mooField2 = self._parseComptField( spineCompts, i, knownFields )
assert( mooField == mooField2 )
mooObj3 = dendObj + spineObj
numMoogli = len( mooObj3 )
#dendComptMap = self.dendCompt.elecComptMap
#self.moogliViewer = rmoogli.makeMoogli( self, mooObj3, mooField )
if len( i ) == 5:
i.extend( kf[4:6] )
elif len( i ) == 6:
i.extend( [kf[5]] )
#self.moogliViewer = rmoogli.makeMoogli( self, mooObj3, i, kf )
self.moogNames.append( rmoogli.makeMoogli( self, mooObj3, i, kf ) )
################################################################
# Here we display the plots and moogli
################################################################
def displayMoogli( self, moogliDt, runtime, rotation = math.pi/500.0):
rmoogli.displayMoogli( self, moogliDt, runtime, rotation )
def display( self ):
for i in self.plotNames:
pylab.figure( i[2] )
pylab.title( i[1] )
pylab.xlabel( "Time (s)" )
pylab.ylabel( i[4] )
vtab = moose.vec( i[0] )
t = np.arange( 0, vtab[0].vector.size, 1 ) * vtab[0].dt
for j in vtab:
pylab.plot( t, j.vector * i[3] )
if len( self.moogList ) > 0:
pylab.ion()
pylab.show(block=True)
self._save() #This calls the _save function which saves only if the filenames have been specified
################################################################
# Here we get the time-series data and write to various formats
################################################################
#[TO DO] Add NSDF output function
'''
The author of the functions -- [_savePlots(), _getTimeSeriesTable(), _writeXML(), _writeCSV(), _saveFormats(), _save()] is
Sarthak Sharma.
Email address: sarthaks442@gmail.com
'''
def _savePlots( self ):
knownFields = {
'Vm':('CompartmentBase', 'getVm', 1000, 'Memb. Potential (mV)' ),
'Im':('CompartmentBase', 'getIm', 1e9, 'Memb. current (nA)' ),
'inject':('CompartmentBase', 'getInject', 1e9, 'inject current (nA)' ),
'Gbar':('ChanBase', 'getGbar', 1e9, 'chan max conductance (nS)' ),
'Gk':('ChanBase', 'getGk', 1e9, 'chan conductance (nS)' ),
'Ik':('ChanBase', 'getIk', 1e9, 'chan current (nA)' ),
'ICa':('NMDAChan', 'getICa', 1e9, 'Ca current (nA)' ),
'Ca':('CaConcBase', 'getCa', 1e3, 'Ca conc (uM)' ),
'n':('PoolBase', 'getN', 1, '# of molecules'),
'conc':('PoolBase', 'getConc', 1000, 'Concentration (uM)' )
}
save_graphs = moose.Neutral( self.modelPath + '/save_graphs' )
dummy = moose.element( '/' )
k = 0
for i in self.saveList:
pair = i[0] + " " + i[1]
dendCompts = self.elecid.compartmentsFromExpression[ pair ]
spineCompts = self.elecid.spinesFromExpression[ pair ]
plotObj, plotField = self._parseComptField( dendCompts, i, knownFields )
plotObj2, plotField2 = self._parseComptField( spineCompts, i, knownFields )
assert( plotField == plotField2 )
plotObj3 = plotObj + plotObj2
numPlots = sum( i != dummy for i in plotObj3 )
if numPlots > 0:
save_tabname = save_graphs.path + '/save_plot' + str(k)
scale = knownFields[i[3]][2]
units = knownFields[i[3]][3]
self.saveNames.append( ( save_tabname, i[4], k, scale, units ) )
k += 1
if i[3] == 'n' or i[3] == 'conc':
save_tabs = moose.Table2( save_tabname, numPlots )
else:
save_tabs = moose.Table( save_tabname, numPlots )
save_vtabs = moose.vec( save_tabs )
q = 0
for p in [ x for x in plotObj3 if x != dummy ]:
moose.connect( save_vtabs[q], 'requestOut', p, plotField )
q += 1
def _getTimeSeriesTable( self ):
'''
This function gets the list with all the details of the simulation
required for plotting.
This function adds flexibility in terms of the details
we wish to store.
'''
knownFields = {
'Vm':('CompartmentBase', 'getVm', 1000, 'Memb. Potential (mV)' ),
'Im':('CompartmentBase', 'getIm', 1e9, 'Memb. current (nA)' ),
'inject':('CompartmentBase', 'getInject', 1e9, 'inject current (nA)' ),
'Gbar':('ChanBase', 'getGbar', 1e9, 'chan max conductance (nS)' ),
'Gk':('ChanBase', 'getGk', 1e9, 'chan conductance (nS)' ),
'Ik':('ChanBase', 'getIk', 1e9, 'chan current (nA)' ),
'ICa':('NMDAChan', 'getICa', 1e9, 'Ca current (nA)' ),
'Ca':('CaConcBase', 'getCa', 1e3, 'Ca conc (uM)' ),
'n':('PoolBase', 'getN', 1, '# of molecules'),
'conc':('PoolBase', 'getConc', 1000, 'Concentration (uM)' )
}
'''
This takes data from plotList
saveList is exactly like plotList but with a few additional arguments:
->It will have a resolution option, i.e., the number of decimal figures to which the value should be rounded
->There is a list of "saveAs" formats
With saveList, the user will able to set what all details he wishes to be saved.
'''
for i,ind in enumerate(self.saveNames):
pair = self.saveList[i][0] + " " + self.saveList[i][1]
dendCompts = self.elecid.compartmentsFromExpression[ pair ]
spineCompts = self.elecid.spinesFromExpression[ pair ]
# Here we get the object details from plotList
savePlotObj, plotField = self._parseComptField( dendCompts, self.saveList[i], knownFields )
savePlotObj2, plotField2 = self._parseComptField( spineCompts, self.saveList[i], knownFields )
savePlotObj3 = savePlotObj + savePlotObj2
rowList = list(ind)
save_vtab = moose.vec( ind[0] )
t = np.arange( 0, save_vtab[0].vector.size, 1 ) * save_vtab[0].dt
rowList.append(save_vtab[0].dt)
rowList.append(t)
rowList.append([jvec.vector * ind[3] for jvec in save_vtab]) #get values
rowList.append(self.saveList[i][3])
rowList.append(filter(lambda obj: obj.path != '/', savePlotObj3)) #this filters out dummy elements
if (type(self.saveList[i][-1])==int):
rowList.append(self.saveList[i][-1])
else:
rowList.append(12)
self.tabForXML.append(rowList)
rowList = []
timeSeriesTable = self.tabForXML # the list with all the details of plot
return timeSeriesTable
def _writeXML( self, filename, timeSeriesData ): #to write to XML file
plotData = timeSeriesData
print("[CAUTION] The '%s' file might be very large if all the compartments are to be saved." % filename)
root = etree.Element("TimeSeriesPlot")
parameters = etree.SubElement( root, "parameters" )
if self.params == None:
parameters.text = "None"
else:
assert(isinstance(self.params, dict)), "'params' should be a dictionary."
for pkey, pvalue in self.params.items():
parameter = etree.SubElement( parameters, str(pkey) )
parameter.text = str(pvalue)
#plotData contains all the details of a single plot
title = etree.SubElement( root, "timeSeries" )
title.set( 'title', str(plotData[1]))
title.set( 'field', str(plotData[8]))
title.set( 'scale', str(plotData[3]))
title.set( 'units', str(plotData[4]))
title.set( 'dt', str(plotData[5]))
p = []
assert(len(plotData[7]) == len(plotData[9]))
res = plotData[10]
for ind, jvec in enumerate(plotData[7]):
p.append( etree.SubElement( title, "data"))
p[-1].set( 'path', str(plotData[9][ind].path))
p[-1].text = ''.join( str(round(value,res)) + ' ' for value in jvec )
tree = etree.ElementTree(root)
tree.write(filename)
def _writeCSV(self, filename, timeSeriesData):
plotData = timeSeriesData
dataList = []
header = []
time = plotData[6]
res = plotData[10]
for ind, jvec in enumerate(plotData[7]):
header.append(plotData[9][ind].path)
dataList.append([round(value,res) for value in jvec.tolist()])
dl = [tuple(lst) for lst in dataList]
rows = zip(tuple(time), *dl)
header.insert(0, "time")
with open(filename, 'wb') as f:
writer = csv.writer(f, quoting=csv.QUOTE_MINIMAL)
writer.writerow(header)
for row in rows:
writer.writerow(row)
##########****SAVING*****###############
def _saveFormats(self, timeSeriesData, k, *filenames):
"This takes in the filenames and writes to corresponding format."
if filenames:
for filename in filenames:
for name in filename:
print (name)
if name[-4:] == '.xml':
self._writeXML(name, timeSeriesData)
print(name, " written")
elif name[-4:] == '.csv':
self._writeCSV(name, timeSeriesData)
print(name, " written")
else:
print("not possible")
pass
else:
pass
def _save( self ):
timeSeriesTable = self._getTimeSeriesTable()
for i,sList in enumerate(self.saveList):
if (len(sList) >= 6) and (type(sList[5]) != int):
self.saveAs.extend(filter(lambda fmt: type(fmt)!=int, sList[5:]))
try:
timeSeriesData = timeSeriesTable[i]
except IndexError:
print("The object to be plotted has all dummy elements.")
pass
self._saveFormats(timeSeriesData, i, self.saveAs)
self.saveAs=[]
else:
pass
else:
pass
################################################################
# Here we set up the stims
################################################################
def _buildStims( self ):
knownFields = {
'inject':('CompartmentBase', 'setInject'),
'Ca':('CaConcBase', 'getCa'),
'n':('PoolBase', 'setN'),
'conc':('PoolBase''setConc')
}
stims = moose.Neutral( self.modelPath + '/stims' )
k = 0
for i in self.stimList:
pair = i[0] + " " + i[1]
dendCompts = self.elecid.compartmentsFromExpression[ pair ]
spineCompts = self.elecid.spinesFromExpression[ pair ]
stimObj, stimField = self._parseComptField( dendCompts, i, knownFields )
stimObj2, stimField2 = self._parseComptField( spineCompts, i, knownFields )
assert( stimField == stimField2 )
stimObj3 = stimObj + stimObj2
numStim = len( stimObj3 )
if numStim > 0:
funcname = stims.path + '/stim' + str(k)
k += 1
func = moose.Function( funcname )
func.expr = i[4]
for q in stimObj3:
moose.connect( func, 'valueOut', q, stimField )
################################################################
# Utility function for setting up clocks.
def _configureClocks( self ):
if self.turnOffElec:
elecDt = 1e6
elecPlotDt = 1e6
diffDt = 0.1 # Slow it down again because no multiscaling
chemDt = 0.1 # Slow it down again because no multiscaling
else:
elecDt = self.elecDt
diffDt = self.diffDt
chemDt = self.chemDt
for i in range( 0, 9 ):
moose.setClock( i, elecDt )
moose.setClock( 10, diffDt )
for i in range( 11, 18 ):
moose.setClock( i, chemDt )
moose.setClock( 8, self.elecPlotDt )
moose.setClock( 18, self.chemPlotDt )
hsolve = moose.HSolve( self.elecid.path + '/hsolve' )
hsolve.dt = elecDt
hsolve.target = self.soma.path
################################################################
################################################################
################################################################
def validateFromMemory( self, epath, cpath ):
ret = self.validateChem()
return ret
#################################################################
# assumes ePath is the parent element of the electrical model,
# and cPath the parent element of the compts in the chem model
def buildFromMemory( self, ePath, cPath, doCopy = False ):
if not self.validateFromMemory( ePath, cPath ):
return
if doCopy:
x = moose.copy( cPath, self.model )
self.chemid = moose.element( x )
self.chemid.name = 'chem'
x = moose.copy( ePath, self.model )
self.elecid = moose.element( x )
self.elecid.name = 'elec'
else:
self.elecid = moose.element( ePath )
self.chemid = moose.element( cPath )
if self.elecid.path != self.model.path + '/elec':
if ( self.elecid.parent != self.model ):
moose.move( self.elecid, self.model )
self.elecid.name = 'elec'
if self.chemid.path != self.model.path + '/chem':
if ( self.chemid.parent != self.model ):
moose.move( self.chemid, self.model )
self.chemid.name = 'chem'
ep = self.elecid.path
somaList = moose.wildcardFind( ep + '/#oma#[ISA=CompartmentBase]' )
if len( somaList ) == 0:
somaList = moose.wildcardFind( ep + '/#[ISA=CompartmentBase]' )
assert( len( somaList ) > 0 )
maxdia = 0.0
for i in somaList:
if ( i.diameter > maxdia ):
self.soma = i
#self.soma = self.comptList[0]
self._decorateWithSpines()
self.spineList = moose.wildcardFind( ep + '/#spine#[ISA=CompartmentBase],' + ep + '/#head#[ISA=CompartmentBase]' )
if len( self.spineList ) == 0:
self.spineList = moose.wildcardFind( ep + '/#head#[ISA=CompartmentBase]' )
nmdarList = moose.wildcardFind( ep + '/##[ISA=NMDAChan]' )
self.comptList = moose.wildcardFind( ep + '/#[ISA=CompartmentBase]')
print("Rdesigneur: Elec model has ", len( self.comptList ),
" compartments and ", len( self.spineList ),
" spines with ", len( nmdarList ), " NMDARs")
self._buildNeuroMesh()
self._configureSolvers()
for i in self.adaptorList:
print(i)
self._buildAdaptor( i[0],i[1],i[2],i[3],i[4],i[5],i[6] )
################################################################
def buildFromFile( self, efile, cfile ):
self.efile = efile
self.cfile = cfile
self._loadElec( efile, 'tempelec' )
if len( self.chanDistrib ) > 0:
self.elecid.channelDistribution = self.chanDistrib
self.elecid.parseChanDistrib()
self._loadChem( cfile, 'tempchem' )
self.buildFromMemory( self.model.path + '/tempelec', self.model.path + '/tempchem' )
################################################################
# Utility function to add a single spine to the given parent.
# parent is parent compartment for this spine.
# spineProto is just that.
# pos is position (in metres ) along parent compartment
# angle is angle (in radians) to rotate spine wrt x in plane xy.
# Size is size scaling factor, 1 leaves as is.
# x, y, z are unit vectors. Z is along the parent compt.
# We first shift the spine over so that it is offset by the parent compt
# diameter.
# We then need to reorient the spine which lies along (i,0,0) to
# lie along x. X is a unit vector so this is done simply by
# multiplying each coord of the spine by x.
# Finally we rotate the spine around the z axis by the specified angle
# k is index of this spine.
def _addSpine( self, parent, spineProto, pos, angle, x, y, z, size, k ):
spine = moose.copy( spineProto, parent.parent, 'spine' + str(k) )
kids = spine[0].children
coords = []
ppos = np.array( [parent.x0, parent.y0, parent.z0] )
for i in kids:
#print i.name, k
j = i[0]
j.name += str(k)
#print 'j = ', j
coords.append( [j.x0, j.y0, j.z0] )
coords.append( [j.x, j.y, j.z] )
self._scaleSpineCompt( j, size )
moose.move( i, self.elecid )
origin = coords[0]
#print 'coords = ', coords
# Offset it so shaft starts from surface of parent cylinder
origin[0] -= parent.diameter / 2.0
coords = np.array( coords )
coords -= origin # place spine shaft base at origin.
rot = np.array( [x, [0,0,0], [0,0,0]] )
coords = np.dot( coords, rot )
moose.delete( spine )
moose.connect( parent, "raxial", kids[0], "axial" )
self._reorientSpine( kids, coords, ppos, pos, size, angle, x, y, z )
################################################################
## The spineid is the parent object of the prototype spine. The
## spine prototype can include any number of compartments, and each
## can have any number of voltage and ligand-gated channels, as well
## as CaConc and other mechanisms.
## The parentList is a list of Object Ids for parent compartments for
## the new spines
## The spacingDistrib is the width of a normal distribution around
## the spacing. Both are in metre units.
## The reference angle of 0 radians is facing away from the soma.
## In all cases we assume that the spine will be rotated so that its
## axis is perpendicular to the axis of the dendrite.
## The simplest way to put the spine in any random position is to have
## an angleDistrib of 2 pi. The algorithm selects any angle in the
## linear range of the angle distrib to add to the specified angle.
## With each position along the dendrite the algorithm computes a new
## spine direction, using rotation to increment the angle.
################################################################
def _decorateWithSpines( self ):
args = []
for i in self.addSpineList:
if not moose.exists( '/library/' + i[0] ):
print('Warning: _decorateWithSpines: spine proto ', i[0], ' not found.')
continue
s = ""
for j in range( 9 ):
s = s + str(i[j]) + ' '
args.append( s )
self.elecid.spineSpecification = args
self.elecid.parseSpines()
################################################################
def _loadElec( self, efile, elecname ):
if ( efile[ len( efile ) - 2:] == ".p" ):
self.elecid = moose.loadModel( efile, '/library/' + elecname)[0]
print(self.elecid)
elif ( efile[ len( efile ) - 4:] == ".swc" ):
self.elecid = moose.loadModel( efile, '/library/' + elecname)[0]
else:
nm = NeuroML()
print("in _loadElec, combineSegments = ", self.combineSegments)
nm.readNeuroMLFromFile( efile, \
params = {'combineSegments': self.combineSegments, \
'createPotentialSynapses': True } )
if moose.exists( '/cells' ):
kids = moose.wildcardFind( '/cells/#' )
else:
kids = moose.wildcardFind( '/library/#[ISA=Neuron],/library/#[TYPE=Neutral]' )
if ( kids[0].name == 'spine' ):
kids = kids[1:]
assert( len( kids ) > 0 )
self.elecid = kids[0]
temp = moose.wildcardFind( self.elecid.path + '/#[ISA=CompartmentBase]' )
transformNMDAR( self.elecid.path )
kids = moose.wildcardFind( '/library/##[0]' )
for i in kids:
i.tick = -1
#################################################################
# This assumes that the chemid is located in self.parent.path+/chem
# It moves the existing chem compartments into a NeuroMesh
# For now this requires that we have a dend, a spine and a PSD,
# with those names and volumes in decreasing order.
def validateChem( self ):
cpath = self.chemid.path
comptlist = moose.wildcardFind( cpath + '/#[ISA=ChemCompt]' )
if len( comptlist ) == 0:
raise BuildError( "validateChem: no compartment on: " + cpath )
if len( comptlist ) == 1:
return;
# Sort comptlist in decreasing order of volume
sortedComptlist = sorted( comptlist, key=lambda x: -x.volume )
if ( len( sortedComptlist ) != 3 ):
print(cpath, sortedComptlist)
raise BuildError( "validateChem: Require 3 chem compartments, have: " + str( len( sortedComptlist ) ) )
'''
if not( sortedComptlist[0].name.lower() == 'dend' and \
sortedComptlist[1].name.lower() == 'spine' and \
sortedComptlist[2].name.lower() == 'psd' ):
raise BuildError( "validateChem: Invalid compt names: require dend, spine and PSD.\nActual names = " \
+ sortedComptList[0].name + ", " \
+ sortedComptList[1].name + ", " \
+ sortedComptList[2].name )
'''
#################################################################
def _buildNeuroMesh( self ):
comptlist = moose.wildcardFind( self.chemid.path + '/#[ISA=ChemCompt]' )
sortedComptList = sorted( comptlist, key=lambda x: -x.volume )
# A little juggling here to put the chem pathways onto new meshes.
self.chemid.name = 'temp_chem'
newChemid = moose.Neutral( self.model.path + '/chem' )
self.dendCompt = moose.NeuroMesh( newChemid.path + '/dend' )
self.dendCompt.geometryPolicy = 'cylinder'
self.dendCompt.separateSpines = 0
if len( sortedComptList ) == 3:
self.dendCompt.separateSpines = 1
self.spineCompt = moose.SpineMesh( newChemid.path + '/spine' )
moose.connect( self.dendCompt, 'spineListOut', self.spineCompt, 'spineList' )
self.psdCompt = moose.PsdMesh( newChemid.path + '/psd' )
moose.connect( self.dendCompt, 'psdListOut', self.psdCompt, 'psdList','OneToOne')
#Move the old reac systems onto the new compartments.
self._moveCompt( sortedComptList[0], self.dendCompt )
if len( sortedComptList ) == 3:
self._moveCompt( sortedComptList[1], self.spineCompt )
self._moveCompt( sortedComptList[2], self.psdCompt )
self.dendCompt.diffLength = self.diffusionLength
self.dendCompt.subTree = self.cellPortionElist
moose.delete( self.chemid )
self.chemid = newChemid
#################################################################
def _configureSolvers( self ) :
if not hasattr( self, 'chemid' ):
return
if not hasattr( self, 'dendCompt' ):
raise BuildError( "configureSolvers: no chem meshes defined." )
dmksolve = moose.Ksolve( self.dendCompt.path + '/ksolve' )
dmdsolve = moose.Dsolve( self.dendCompt.path + '/dsolve' )
dmstoich = moose.Stoich( self.dendCompt.path + '/stoich' )
dmstoich.compartment = self.dendCompt
dmstoich.ksolve = dmksolve
dmstoich.dsolve = dmdsolve
dmstoich.path = self.dendCompt.path + "/##"
# Below we have code that only applies if there are spines
# Put in spine solvers. Note that these get info from the dendCompt
if hasattr( self, 'spineCompt' ):
if self.useGssa:
smksolve = moose.Gsolve( self.spineCompt.path + '/ksolve' )
else:
smksolve = moose.Ksolve( self.spineCompt.path + '/ksolve' )
smdsolve = moose.Dsolve( self.spineCompt.path + '/dsolve' )
smstoich = moose.Stoich( self.spineCompt.path + '/stoich' )
smstoich.compartment = self.spineCompt
smstoich.ksolve = smksolve
smstoich.dsolve = smdsolve
smstoich.path = self.spineCompt.path + "/##"
# Put in PSD solvers. Note that these get info from the dendCompt
if self.useGssa:
pmksolve = moose.Gsolve( self.psdCompt.path + '/ksolve' )
else:
pmksolve = moose.Ksolve( self.psdCompt.path + '/ksolve' )
pmdsolve = moose.Dsolve( self.psdCompt.path + '/dsolve' )
pmstoich = moose.Stoich( self.psdCompt.path + '/stoich' )
pmstoich.compartment = self.psdCompt
pmstoich.ksolve = pmksolve
pmstoich.dsolve = pmdsolve
pmstoich.path = self.psdCompt.path + "/##"
# Put in cross-compartment diffusion between ksolvers
dmdsolve.buildNeuroMeshJunctions( smdsolve, pmdsolve )
# Put in cross-compartment reactions between ksolvers
smstoich.buildXreacs( pmstoich )
#pmstoich.buildXreacs( smstoich )
smstoich.buildXreacs( dmstoich )
dmstoich.filterXreacs()
smstoich.filterXreacs()
pmstoich.filterXreacs()
# set up the connections so that the spine volume scaling can happen
self.elecid.setSpineAndPsdMesh( self.spineCompt, self.psdCompt)
self.elecid.setSpineAndPsdDsolve( smdsolve, pmdsolve )
################################################################
def _loadChem( self, fname, chemName ):
chem = moose.Neutral( '/library/' + chemName )
modelId = moose.loadModel( fname, chem.path, 'ee' )
comptlist = moose.wildcardFind( chem.path + '/#[ISA=ChemCompt]' )
if len( comptlist ) == 0:
print("loadChem: No compartment found in file: ", fname)
return
# Sort comptlist in decreasing order of volume
sortedComptlist = sorted( comptlist, key=lambda x: -x.volume )
if ( len( sortedComptlist ) != 3 ):
print("loadChem: Require 3 chem compartments, have: ",\
len( sortedComptlist ))
return False
sortedComptlist[0].name = 'dend'
sortedComptlist[1].name = 'spine'
sortedComptlist[2].name = 'psd'
################################################################
def _moveCompt( self, a, b ):
b.setVolumeNotRates( a.volume )
for i in moose.wildcardFind( a.path + '/#' ):
if ( i.name != 'mesh' ):
moose.move( i, b )
moose.delete( a )
################################################################
def _buildAdaptor( self, meshName, elecRelPath, elecField, \
chemRelPath, chemField, isElecToChem, offset, scale ):
#print "offset = ", offset, ", scale = ", scale
mesh = moose.element( '/model/chem/' + meshName )
#elecComptList = mesh.elecComptList
if elecRelPath == 'spine':
elecComptList = moose.vec( mesh.elecComptList[0].path + '/../spine' )
else:
elecComptList = mesh.elecComptList
'''
for i in elecComptList:
print i.diameter
print len( elecComptList[0] )
print elecComptList[0][0].parent.path
print "--------------------------------------"
spine = moose.vec( elecComptList[0].path + '/../spine' )
for i in spine:
print i.headDiameter
moose.le( elecComptList[0][0].parent )
'''
if len( elecComptList ) == 0:
raise BuildError( \
"buildAdaptor: no elec compts in elecComptList on: " + \
mesh.path )
startVoxelInCompt = mesh.startVoxelInCompt
endVoxelInCompt = mesh.endVoxelInCompt
capField = elecField[0].capitalize() + elecField[1:]
capChemField = chemField[0].capitalize() + chemField[1:]
chemPath = mesh.path + '/' + chemRelPath
if not( moose.exists( chemPath ) ):
raise BuildError( \
"Error: buildAdaptor: no chem obj in " + chemPath )
chemObj = moose.element( chemPath )
assert( chemObj.numData >= len( elecComptList ) )
adName = '/adapt'
for i in range( 1, len( elecRelPath ) ):
if ( elecRelPath[-i] == '/' ):
adName += elecRelPath[1-i]
break
ad = moose.Adaptor( chemObj.path + adName, len( elecComptList ) )
#print 'building ', len( elecComptList ), 'adaptors ', adName, ' for: ', mesh.name, elecRelPath, elecField, chemRelPath
av = ad.vec
chemVec = moose.element( mesh.path + '/' + chemRelPath ).vec
for i in zip( elecComptList, startVoxelInCompt, endVoxelInCompt, av ):
i[3].inputOffset = 0.0
i[3].outputOffset = offset
i[3].scale = scale
if elecRelPath == 'spine':
elObj = i[0]
else:
ePath = i[0].path + '/' + elecRelPath
if not( moose.exists( ePath ) ):
raise BuildError( \
"Error: buildAdaptor: no elec obj in " + ePath )
elObj = moose.element( i[0].path + '/' + elecRelPath )
if ( isElecToChem ):
elecFieldSrc = 'get' + capField
chemFieldDest = 'set' + capChemField
#print ePath, elecFieldSrc, scale
moose.connect( i[3], 'requestOut', elObj, elecFieldSrc )
for j in range( i[1], i[2] ):
moose.connect( i[3], 'output', chemVec[j],chemFieldDest)
else:
chemFieldSrc = 'get' + capChemField
elecFieldDest = 'set' + capField
for j in range( i[1], i[2] ):
moose.connect( i[3], 'requestOut', chemVec[j], chemFieldSrc)
msg = moose.connect( i[3], 'output', elObj, elecFieldDest )
| dharmasam9/moose-core | python/rdesigneur/rdesigneur.py | Python | gpl-3.0 | 57,169 | [
"MOOSE",
"NEURON"
] | 469d6ceccb96205e84fac36c09d6aafe480546f094f03c2bcff8cb1c017415cf |
#
# This file is part of Sequana software
#
# Copyright (c) 2016-2021 - Sequana Development Team
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Utilities to manipulate FastA files"""
import os
from pysam import FastxFile
from easydev import Progress
import textwrap
from sequana.stats import N50, L50
import colorlog
logger = colorlog.getLogger(__name__)
__all__ = ["FastA"]
def is_fasta(filename):
with open(filename, "r") as fin:
try:
line = fin.readline()
assert line.startswith(">")
line = fin.readline()
return True
except: # pragma: no cover
return False
# cannot inherit from FastxFile (no object in the API ?)
class FastA(object):
"""Class to handle FastA files
::
from sequana import FastA
f = FastA("test.fa")
read = next(f)
names = f.names
"""
def __init__(self, filename, verbose=False):
self._fasta = FastxFile(filename)
self.filename = filename
self._N = None
def __iter__(self):
return self
def __next__(self): # python 3
return self.next()
def next(self): # python 2
# reads 4 lines
try:
d = next(self._fasta)
return d
except KeyboardInterrupt: # pragma: no cover
# This should allow developers to break a loop that takes too long
# through the reads to run forever
self._fasta.close()
self._fasta = FastxFile(self._fasta.filename)
except:
self._fasta.close()
self._fasta = FastxFile(self._fasta.filename)
raise StopIteration
def __len__(self):
if self._N is None:
logger.info("Reading input fasta file...please wait")
self._N = sum(1 for x in FastxFile(self.filename))
return self._N
def _get_names(self):
return [this.name for this in self]
names = property(_get_names)
def _get_sequences(self):
return [this.sequence for this in self]
sequences = property(_get_sequences)
def _get_comment(self):
return [this.comment for this in self]
comments = property(_get_comment)
def _get_lengths(self):
return [len(this.sequence) for this in self]
lengths = property(_get_lengths)
def get_lengths_as_dict(self):
"""Return dictionary with sequence names and lengths as keys/values"""
return dict(zip(self.names, self.lengths))
def explode(self, outdir="."):
"""extract sequences from original file and save them into individual files"""
with open(self.filename, "r") as fin:
for line in fin.readlines():
if line.startswith(">"):
# ignore the comment and > character and use it as the
# filename
name = line.split()[0][1:]
try:
# if a file was already open, let us close it
fout.close()
except NameError:
pass
finally:
fout = open(f"{outdir}/{name}.fasta", "w")
fout.write(line)
else:
fout.write(line)
# need to close the last file
fout.close()
def format_contigs_denovo(self, output_file, len_min=500):
"""Remove contigs with sequence length below specific threshold.
:param str output_file: output file name.
:param int len_min: minimal length of contigs.
Example::
from sequana import FastA
contigs = FastA("denovo_assembly.fasta")
contigs.format_contigs_denovo("output.fasta", len_min=500)
"""
# catch basename of file without extension
project = os.path.basename(output_file).split(".")[0]
# check if directory exist
output_dir = os.path.dirname(output_file)
try:
if not os.path.exists(output_dir): # pragma: no cover
os.makedirs(output_dir)
except FileNotFoundError: # pragma: no cover
pass
n = 1
with open(output_file, "w") as fp:
for contigs in self:
if len(contigs.sequence) < len_min:
break
name = ">{}_{} {}\n".format(project, n, contigs.name)
sequence = (
"\n".join(
[
contigs.sequence[i : min(i + 80, len(contigs.sequence))]
for i in range(0, len(contigs.sequence), 80)
]
)
+ "\n"
)
fp.write(name + sequence)
n += 1
def filter(self, output_filename, names_to_keep=None, names_to_exclude=None):
"""save FastA excluding or including specific sequences"""
if names_to_exclude is None and names_to_keep is None: # pragma: no cover
logger.warning("No ids provided")
return
if names_to_exclude:
with open(self.filename) as fin:
with open(output_filename, "w") as fout:
skip = False
# do no use readlines. may be slower but may cause memory
# issue
for line in fin:
if line.startswith(">"):
if line[1:].split()[0] in names_to_exclude:
skip = True
else:
skip = False
if skip is False:
fout.write(line)
elif names_to_keep:
with open(self.filename) as fin:
with open(output_filename, "w") as fout:
# do no use readlines. may be slower but may cause memory
# issue
skip = True
for line in fin:
if line.startswith(">"):
if line[1:].split()[0] in names_to_keep:
skip = False
else:
skip = True
if skip is False:
fout.write(line)
def select_random_reads(self, N=None, output_filename="random.fasta"):
"""Select random reads and save in a file
:param int N: number of random unique reads to select
should provide a number but a list can be used as well.
:param str output_filename:
"""
import numpy as np
thisN = len(self)
if isinstance(N, int):
if N > thisN:
N = thisN
# create random set of reads to pick up
cherries = list(range(thisN))
np.random.shuffle(cherries)
# cast to set for efficient iteration
cherries = set(cherries[0:N])
elif isinstance(N, set):
cherries = N
elif isinstance(N, list):
cherries = set(N)
fasta = FastxFile(self.filename)
pb = Progress(thisN) # since we scan the entire file
with open(output_filename, "w") as fh:
for i, read in enumerate(fasta):
if i in cherries:
fh.write(read.__str__() + "\n")
else:
pass
pb.animate(i + 1)
return cherries
def get_stats(self):
"""Return a dictionary with basic statistics"""
from pylab import mean
stats = {}
stats["N"] = len(self.sequences)
stats["mean_length"] = mean(self.lengths)
stats["total_length"] = sum(self.lengths)
stats["N50"] = N50(self.lengths)
stats["L50"] = L50(self.lengths)
stats["min_length"] = min(self.lengths)
stats["max_length"] = max(self.lengths)
return stats
def summary(self, max_contigs=-1):
"""returns summary and print information on the stdout
This method is used when calling sequana standalone ::
sequana summary test.fasta
"""
from pylab import mean, argmax
# used by sequana summary fasta
summary = {"number_of_contigs": len(self.sequences)}
summary["total_contigs_length"] = sum(self.lengths)
summary["mean_contig_length"] = mean(self.lengths)
summary["max_contig_length"] = max(self.lengths)
summary["min_contig_length"] = min(self.lengths)
N = 0
lengths = self.lengths[:]
positions = list(range(len(lengths)))
stats = self.get_stats()
print("#sample_name: {}".format(self.filename))
print("#total length: {}".format(stats["total_length"]))
print("#N50: {}".format(stats["N50"]))
print("#Ncontig: {}".format(stats["N"]))
print("#L50: {}".format(stats["L50"]))
print("#max_contig_length: {}".format(stats["max_length"]))
print("#min_contig_length: {}".format(stats["min_length"]))
print("#mean_contig_length: {}".format(stats["mean_length"]))
print("contig name,length,count A,C,G,T,N")
if max_contigs == -1:
max_contigs = len(lengths) + 1
while lengths and N < max_contigs:
N += 1
index = argmax(lengths)
length = lengths.pop(index)
position = positions.pop(index)
sequence = self.sequences[position]
name = self.names[position]
print(
"{},{},{},{},{},{},{}".format(
name,
length,
sequence.count("A"),
sequence.count("C"),
sequence.count("G"),
sequence.count("T"),
sequence.count("N"),
)
)
def GC_content_sequence(self, sequence):
"""Return GC content in percentage of a sequence"""
GC = sequence.count("G") + sequence.count("g")
GC += sequence.count("C") + sequence.count("c")
return GC / len(sequence) * 100
def GC_content(self):
"""Return GC content in percentage of all sequences found in the FastA file"""
lengths = sum(self.lengths)
GC = 0
for seq in self.sequences:
GC += seq.count("G") + seq.count("g")
GC += seq.count("C") + seq.count("c")
return GC / lengths * 100
def reverse_and_save(self, filename):
"""Reverse sequences and save in a file"""
with open(filename, "w") as fout:
for read in self:
fout.write(">{}\t{}\n{}\n".format(read.name, read.comment, read.sequence[::-1]))
def save_ctg_to_fasta(self, ctgname, outname, max_length=-1):
"""Select a contig and save in a file"""
index = self.names.index(ctgname)
with open("{}.fa".format(outname), "w") as fout:
if max_length == -1:
fout.write(">{}\n{}".format(outname, self.sequences[index]))
else:
fout.write(">{}\n{}".format(outname, self.sequences[index][0:max_length]))
def to_fasta(self, outfile, width=80):
"""Save the input FastA file into a new file
The interest of this method is to wrap the sequence into 80 characters.
This is useful if the input file is not formatted correctly.
"""
with open(outfile, "w") as fout:
for name, comment, seq in zip(self.names, self.comments, self.sequences):
seq = "\n".join(textwrap.wrap(seq, width))
if comment is None:
fout.write(f">{name}\n{seq}\n")
else:
fout.write(f">{name}\t{comment}\n{seq}\n")
def to_igv_chrom_size(self, output):
"""Create a IGV file storing chromosomes and their sizes"""
data = self.get_lengths_as_dict()
with open(output, "w") as fout:
for k, v in data.items():
fout.write("{}\t{}\n".format(k, v))
def save_collapsed_fasta(self, outfile, ctgname, width=80, comment=None):
"""Concatenate all contigs and save results"""
with open(outfile, "w") as fout:
data = "".join(self.sequences)
seq = "\n".join(textwrap.wrap(data, width))
if comment is None:
fout.write(f">{ctgname}\n{seq}\n")
else:
fout.write(f">{ctgname}\t{comment}\n{seq}\n")
| sequana/sequana | sequana/fasta.py | Python | bsd-3-clause | 12,901 | [
"pysam"
] | 230e5f37299629c18626a33d2d167563dadae79080843fab810827cad2371786 |
# Copyright (C) 2012-2018
# Max Planck Institute for Polymer Research
# Copyright (C) 2008-2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************************
espressopp.interaction.Interaction
**********************************
This is an abstract class, only needed to be inherited from.
.. function:: espressopp.interaction.Interaction.bondType()
:rtype: int
.. function:: espressopp.interaction.Interaction.computeEnergy()
:rtype: real
.. function:: espressopp.interaction.Interaction.computeEnergyAA(atomtype)
:param type1: Type of particles with respect to which the atomistic energy is calculated.
:type type1: int
:rtype: real
.. function:: espressopp.interaction.Interaction.computeEnergyDeriv()
:rtype: real
.. function:: espressopp.interaction.Interaction.computeEnergyCG(atomtype)
:param type1: Type of particles with respect to which the coarse-grained energy is calculated.
:type type1: int
:rtype: real
.. function:: espressopp.interaction.Interaction.computeVirial()
:rtype: real
"""
from espressopp import pmi
from _espressopp import interaction_Interaction
unused, Nonbonded, Single, Pair, Angular, Dihedral, NonbondedSlow = range(7)
class InteractionLocal(object):
def computeEnergy(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.computeEnergy(self)
def computeEnergyAA(self, atomtype = None):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if atomtype is None:
return self.cxxclass.computeEnergyAA(self)
else:
return self.cxxclass.computeEnergyAA(self, atomtype)
def computeEnergyCG(self, atomtype = None):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if atomtype is None:
return self.cxxclass.computeEnergyCG(self)
else:
return self.cxxclass.computeEnergyCG(self, atomtype)
def computeEnergyDeriv(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.computeEnergyDeriv(self)
def computeVirial(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.computeVirial(self)
def bondType(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return int(self.cxxclass.bondType(self))
if pmi.isController :
class Interaction(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
pmicall = [ "computeEnergy", "computeEnergyDeriv", "computeEnergyAA", "computeEnergyCG", "computeVirial", "bondType" ]
)
| govarguz/espressopp | src/interaction/Interaction.py | Python | gpl-3.0 | 3,721 | [
"ESPResSo"
] | c50a8c7c597fb91c41f0689f81d7da6f82748d187b98b9ec7e9a54e4d0670016 |
# Copyright 2009-2011 by Eric Talevich. All rights reserved.
# Revisions copyright 2009-2013 by Peter Cock. All rights reserved.
# Revisions copyright 2013 Lenna X. Peterson. All rights reserved.
#
# Converted by Eric Talevich from an older unit test copyright 2002
# by Thomas Hamelryck.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit tests for the Bio.PDB module."""
from __future__ import print_function
import os
import tempfile
import unittest
import warnings
from Bio._py3k import StringIO
try:
import numpy
from numpy import dot # Missing on old PyPy's micronumpy
del dot
from numpy.linalg import svd, det # Missing in PyPy 2.0 numpypy
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NumPy if you want to use Bio.PDB.")
from Bio import BiopythonWarning
from Bio.Seq import Seq
from Bio.Alphabet import generic_protein
from Bio.PDB import PDBParser, PPBuilder, CaPPBuilder, PDBIO, Select
from Bio.PDB import HSExposureCA, HSExposureCB, ExposureCN
from Bio.PDB.PDBExceptions import PDBConstructionException, PDBConstructionWarning
from Bio.PDB import rotmat, Vector
from Bio.PDB import Residue, Atom
from Bio.PDB import make_dssp_dict
from Bio.PDB.NACCESS import process_asa_data, process_rsa_data
# NB: the 'A_' prefix ensures this test case is run first
class A_ExceptionTest(unittest.TestCase):
"""Errors and warnings while parsing of flawed PDB files.
These tests must be executed because of the way Python's warnings module
works -- a warning is only logged the first time it is encountered.
"""
def test_1_warnings(self):
"""Check warnings: Parse a flawed PDB file in permissive mode.
NB: The try/finally block is adapted from the warnings.catch_warnings
context manager in the Python 2.6 standard library.
TODO: Now we require Python 2.6, switch to using warnings.catch_warnings
"""
warnings.simplefilter('always', PDBConstructionWarning)
try:
# Equivalent to warnings.catch_warnings -- hackmagic
orig_showwarning = warnings.showwarning
all_warns = []
def showwarning(*args, **kwargs):
all_warns.append(args[0])
warnings.showwarning = showwarning
# Trigger warnings
p = PDBParser(PERMISSIVE=True)
p.get_structure("example", "PDB/a_structure.pdb")
self.assertEqual(len(all_warns), 14)
for wrn, msg in zip(all_warns, [
# Expected warning messages:
"Used element 'N' for Atom (name=N) with given element ''",
"Used element 'C' for Atom (name=CA) with given element ''",
"Atom names ' CA ' and 'CA ' differ only in spaces at line 17.",
"Used element 'CA' for Atom (name=CA ) with given element ''",
'Atom N defined twice in residue <Residue ARG het= resseq=2 icode= > at line 21.',
'disordered atom found with blank altloc before line 33.',
"Residue (' ', 4, ' ') redefined at line 43.",
"Blank altlocs in duplicate residue SER (' ', 4, ' ') at line 43.",
"Residue (' ', 10, ' ') redefined at line 75.",
"Residue (' ', 14, ' ') redefined at line 106.",
"Residue (' ', 16, ' ') redefined at line 135.",
"Residue (' ', 80, ' ') redefined at line 633.",
"Residue (' ', 81, ' ') redefined at line 646.",
'Atom O defined twice in residue <Residue HOH het=W resseq=67 icode= > at line 822.'
]):
self.assertTrue(msg in str(wrn), str(wrn))
finally:
warnings.showwarning = orig_showwarning
def test_2_strict(self):
"""Check error: Parse a flawed PDB file in strict mode."""
parser = PDBParser(PERMISSIVE=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", PDBConstructionWarning)
self.assertRaises(PDBConstructionException,
parser.get_structure, "example", "PDB/a_structure.pdb")
self.assertEqual(len(w), 4, w)
def test_3_bad_xyz(self):
"""Check error: Parse an entry with bad x,y,z value."""
data = "ATOM 9 N ASP A 152 21.554 34.953 27.691 1.00 19.26 N\n"
parser = PDBParser(PERMISSIVE=False)
s = parser.get_structure("example", StringIO(data))
data = "ATOM 9 N ASP A 152 21.ish 34.953 27.691 1.00 19.26 N\n"
self.assertRaises(PDBConstructionException,
parser.get_structure, "example", StringIO(data))
def test_4_occupancy(self):
"""Parse file with missing occupancy"""
permissive = PDBParser(PERMISSIVE=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", PDBConstructionWarning)
structure = permissive.get_structure("test", "PDB/occupancy.pdb")
self.assertEqual(len(w), 3, w)
atoms = structure[0]['A'][(' ', 152, ' ')]
# Blank occupancy behavior set in Bio/PDB/PDBParser
self.assertEqual(atoms['N'].get_occupancy(), None)
self.assertEqual(atoms['CA'].get_occupancy(), 1.0)
self.assertEqual(atoms['C'].get_occupancy(), 0.0)
strict = PDBParser(PERMISSIVE=False)
self.assertRaises(PDBConstructionException,
strict.get_structure, "test", "PDB/occupancy.pdb")
class HeaderTests(unittest.TestCase):
"""Tests for parse_pdb_header."""
def test_capsid(self):
"""Parse the header of a known PDB file (1A8O)."""
parser = PDBParser()
struct = parser.get_structure('1A8O', 'PDB/1A8O.pdb')
self.assertAlmostEqual(struct.header['resolution'], 1.7)
# Case-insensitive string comparisons
known_strings = {
'author': 'T.R.Gamble,S.Yoo,F.F.Vajdos,U.K.Von Schwedler,D.K.Worthylake,H.Wang,J.P.Mccutcheon,W.I.Sundquist,C.P.Hill',
'deposition_date': '1998-03-27',
'head': 'viral protein',
'journal': 'AUTH T.R.GAMBLE,S.YOO,F.F.VAJDOS,U.K.VON SCHWEDLER,AUTH 2 D.K.WORTHYLAKE,H.WANG,J.P.MCCUTCHEON,W.I.SUNDQUIST,AUTH 3 C.P.HILLTITL STRUCTURE OF THE CARBOXYL-TERMINAL DIMERIZATIONTITL 2 DOMAIN OF THE HIV-1 CAPSID PROTEIN.REF SCIENCE V. 278 849 1997REFN ISSN 0036-8075PMID 9346481DOI 10.1126/SCIENCE.278.5339.849',
'journal_reference': 't.r.gamble,s.yoo,f.f.vajdos,u.k.von schwedler, d.k.worthylake,h.wang,j.p.mccutcheon,w.i.sundquist, c.p.hill structure of the carboxyl-terminal dimerization domain of the hiv-1 capsid protein. science v. 278 849 1997 issn 0036-8075 9346481 10.1126/science.278.5339.849 ',
'keywords': 'capsid, core protein, hiv, c-terminal domain, viral protein',
'name': ' hiv capsid c-terminal domain',
'release_date': '1998-10-14',
'structure_method': 'x-ray diffraction',
}
for key, expect in known_strings.items():
self.assertEqual(struct.header[key].lower(), expect.lower())
def test_fibril(self):
"""Parse the header of another PDB file (2BEG)."""
parser = PDBParser()
struct = parser.get_structure('2BEG', 'PDB/2BEG.pdb')
known_strings = {
'author': 'T.Luhrs,C.Ritter,M.Adrian,D.Riek-Loher,B.Bohrmann,H.Dobeli,D.Schubert,R.Riek',
'deposition_date': '2005-10-24',
'head': 'protein fibril',
'journal': "AUTH T.LUHRS,C.RITTER,M.ADRIAN,D.RIEK-LOHER,B.BOHRMANN,AUTH 2 H.DOBELI,D.SCHUBERT,R.RIEKTITL 3D STRUCTURE OF ALZHEIMER'S AMYLOID-{BETA}(1-42)TITL 2 FIBRILS.REF PROC.NATL.ACAD.SCI.USA V. 102 17342 2005REFN ISSN 0027-8424PMID 16293696DOI 10.1073/PNAS.0506723102",
'journal_reference': "t.luhrs,c.ritter,m.adrian,d.riek-loher,b.bohrmann, h.dobeli,d.schubert,r.riek 3d structure of alzheimer's amyloid-{beta}(1-42) fibrils. proc.natl.acad.sci.usa v. 102 17342 2005 issn 0027-8424 16293696 10.1073/pnas.0506723102 ",
'keywords': "alzheimer's, fibril, protofilament, beta-sandwich, quenched hydrogen/deuterium exchange, pairwise mutagenesis, protein fibril",
'name': " 3d structure of alzheimer's abeta(1-42) fibrils",
'release_date': '2005-11-22',
'structure_method': 'solution nmr',
}
for key, expect in known_strings.items():
self.assertEqual(struct.header[key].lower(), expect.lower())
class ParseTest(unittest.TestCase):
def setUp(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
p = PDBParser(PERMISSIVE=1)
self.structure = p.get_structure("example", "PDB/a_structure.pdb")
def test_c_n(self):
"""Extract polypeptides using C-N."""
ppbuild = PPBuilder()
polypeptides = ppbuild.build_peptides(self.structure[1])
self.assertEqual(len(polypeptides), 1)
pp = polypeptides[0]
# Check the start and end positions
self.assertEqual(pp[0].get_id()[1], 2)
self.assertEqual(pp[-1].get_id()[1], 86)
# Check the sequence
s = pp.get_sequence()
self.assertTrue(isinstance(s, Seq))
self.assertEqual(s.alphabet, generic_protein)
self.assertEqual("RCGSQGGGSTCPGLRCCSIWGWCGDSEPYCGRTCENKCWSGER"
"SDHRCGAAVGNPPCGQDRCCSVHGWCGGGNDYCSGGNCQYRC",
str(s))
def test_ca_ca(self):
"""Extract polypeptides using CA-CA."""
ppbuild = CaPPBuilder()
polypeptides = ppbuild.build_peptides(self.structure[1])
self.assertEqual(len(polypeptides), 1)
pp = polypeptides[0]
# Check the start and end positions
self.assertEqual(pp[0].get_id()[1], 2)
self.assertEqual(pp[-1].get_id()[1], 86)
# Check the sequence
s = pp.get_sequence()
self.assertTrue(isinstance(s, Seq))
self.assertEqual(s.alphabet, generic_protein)
self.assertEqual("RCGSQGGGSTCPGLRCCSIWGWCGDSEPYCGRTCENKCWSGER"
"SDHRCGAAVGNPPCGQDRCCSVHGWCGGGNDYCSGGNCQYRC",
str(s))
def test_structure(self):
"""Verify the structure of the parsed example PDB file."""
# Structure contains 2 models
self.assertEqual(len(self.structure), 2)
# --- Checking model 0 ---
m0 = self.structure[0]
# Model 0 contains 1 chain
self.assertEqual(len(m0), 1)
# Chain 'A' contains 1 residue
self.assertEqual(len(m0['A']), 1)
# Residue ('H_PCA', 1, ' ') contains 8 atoms.
residue = m0['A'].get_list()[0]
self.assertEqual(residue.get_id(), ('H_PCA', 1, ' '))
self.assertEqual(len(residue), 9)
# --- Checking model 1 ---
m1 = self.structure[1]
# Model 1 contains 3 chains
self.assertEqual(len(m1), 3)
# Deconstruct this data structure to check each chain
chain_data = [ # chain_id, chain_len, [(residue_id, residue_len), ...]
('A', 86, [ ((' ', 0, ' '), 1 ),
((' ', 2, ' '), 11),
((' ', 3, ' '), 6, 1), # disordered
((' ', 4, ' '), 4 ),
((' ', 5, ' '), 6 ),
((' ', 6, ' '), 9 ),
((' ', 7, ' '), 4 ),
((' ', 8, ' '), 4 ),
((' ', 9, ' '), 4 ),
((' ', 10, ' '), 6, ['GLY', 'SER']), # point mut
((' ', 11, ' '), 7 ),
((' ', 12, ' '), 6 ),
((' ', 13, ' '), 7 ),
((' ', 14, ' '), 4, ['ALA', 'GLY']), # point mut
((' ', 15, ' '), 8, 3), # disordered
((' ', 16, ' '), 11, ['ARG', 'TRP']), # point mut
((' ', 17, ' '), 6 ),
((' ', 18, ' '), 6 ),
((' ', 19, ' '), 6 ),
((' ', 20, ' '), 8 ),
((' ', 21, ' '), 14),
((' ', 22, ' '), 4 ),
((' ', 23, ' '), 14),
((' ', 24, ' '), 6 ),
((' ', 25, ' '), 4 ),
((' ', 26, ' '), 8 ),
((' ', 27, ' '), 6 ),
((' ', 28, ' '), 9, 5), # disordered
((' ', 29, ' '), 7 ),
((' ', 30, ' '), 12),
((' ', 31, ' '), 6 ),
((' ', 32, ' '), 4 ),
((' ', 33, ' '), 11),
((' ', 34, ' '), 7 ),
((' ', 35, ' '), 6 ),
((' ', 36, ' '), 9 ),
((' ', 37, ' '), 8 ),
((' ', 38, ' '), 9 ),
((' ', 39, ' '), 6 ),
((' ', 40, ' '), 14),
((' ', 41, ' '), 6 ),
((' ', 42, ' '), 4 ),
((' ', 43, ' '), 9 ),
((' ', 44, ' '), 11),
((' ', 45, ' '), 6, 1), # disordered
((' ', 46, ' '), 8 ),
((' ', 47, ' '), 10),
((' ', 48, ' '), 11),
((' ', 49, ' '), 6 ),
((' ', 50, ' '), 4 ),
((' ', 51, ' '), 5 ),
((' ', 52, ' '), 5 ),
((' ', 53, ' '), 7 ),
((' ', 54, ' '), 4 ),
((' ', 55, ' '), 8 ),
((' ', 56, ' '), 7 ),
((' ', 57, ' '), 7 ),
((' ', 58, ' '), 6 ),
((' ', 59, ' '), 4 ),
((' ', 60, ' '), 9 ),
((' ', 61, ' '), 8 ),
((' ', 62, ' '), 11),
((' ', 63, ' '), 6 ),
((' ', 64, ' '), 6 ),
((' ', 65, ' '), 6 ),
((' ', 66, ' '), 7 ),
((' ', 67, ' '), 10),
((' ', 68, ' '), 4 ),
((' ', 69, ' '), 14),
((' ', 70, ' '), 6 ),
((' ', 71, ' '), 4 ),
((' ', 72, ' '), 4 ),
((' ', 73, ' '), 4 ),
((' ', 74, ' '), 8, 3), # disordered
((' ', 75, ' '), 8 ),
((' ', 76, ' '), 12),
((' ', 77, ' '), 6 ),
((' ', 78, ' '), 6 ),
((' ', 79, ' '), 4, 4), # disordered
((' ', 80, ' '), 4, ['GLY', 'SER']), # point mut
((' ', 81, ' '), 8, ['ASN', 'LYS']), # point mut
((' ', 82, ' '), 6 ),
((' ', 83, ' '), 9 ),
((' ', 84, ' '), 12),
((' ', 85, ' '), 11),
((' ', 86, ' '), 6 ),
]),
('B', 4, [ (('H_NAG', 1, ' '), 14),
(('H_NAG', 2, ' '), 14),
(('H_NAG', 3, ' '), 14),
(('H_NAG', 4, ' '), 14),
]),
(' ', 76, [ (('W', 1, ' '), 1),
(('W', 2, ' '), 1),
(('W', 3, ' '), 1),
(('W', 4, ' '), 1),
(('W', 5, ' '), 1),
(('W', 6, ' '), 1),
(('W', 7, ' '), 1),
(('W', 8, ' '), 1),
(('W', 9, ' '), 1),
(('W', 10, ' '), 1),
(('W', 11, ' '), 1),
(('W', 12, ' '), 1),
(('W', 13, ' '), 1),
(('W', 14, ' '), 1),
(('W', 15, ' '), 1),
(('W', 16, ' '), 1),
(('W', 17, ' '), 1),
(('W', 18, ' '), 1),
(('W', 19, ' '), 1),
(('W', 20, ' '), 1),
(('W', 21, ' '), 1),
(('W', 22, ' '), 1),
(('W', 23, ' '), 1),
(('W', 24, ' '), 1),
(('W', 25, ' '), 1),
(('W', 26, ' '), 1),
(('W', 27, ' '), 1),
(('W', 28, ' '), 1),
(('W', 29, ' '), 1),
(('W', 30, ' '), 1),
(('W', 31, ' '), 1),
(('W', 32, ' '), 1),
(('W', 33, ' '), 1),
(('W', 34, ' '), 1),
(('W', 35, ' '), 1),
(('W', 36, ' '), 1),
(('W', 37, ' '), 1),
(('W', 38, ' '), 1),
(('W', 39, ' '), 1),
(('W', 40, ' '), 1),
(('W', 41, ' '), 1),
(('W', 42, ' '), 1),
(('W', 43, ' '), 1),
(('W', 44, ' '), 1),
(('W', 45, ' '), 1),
(('W', 46, ' '), 1),
(('W', 47, ' '), 1),
(('W', 48, ' '), 1),
(('W', 49, ' '), 1),
(('W', 50, ' '), 1),
(('W', 51, ' '), 1),
(('W', 52, ' '), 1),
(('W', 53, ' '), 1),
(('W', 54, ' '), 1),
(('W', 55, ' '), 1),
(('W', 56, ' '), 1),
(('W', 57, ' '), 1),
(('W', 58, ' '), 1),
(('W', 59, ' '), 1),
(('W', 60, ' '), 1),
(('W', 61, ' '), 1),
(('W', 62, ' '), 1),
(('W', 63, ' '), 1),
(('W', 64, ' '), 1),
(('W', 65, ' '), 1),
(('W', 66, ' '), 1),
(('W', 67, ' '), 1),
(('W', 68, ' '), 1),
(('W', 69, ' '), 1),
(('W', 70, ' '), 1),
(('W', 71, ' '), 1),
(('W', 72, ' '), 1),
(('W', 73, ' '), 1),
(('W', 74, ' '), 1),
(('W', 75, ' '), 1),
(('W', 77, ' '), 1),
])
]
for c_idx, chn in enumerate(chain_data):
# Check chain ID and length
chain = m1.get_list()[c_idx]
self.assertEqual(chain.get_id(), chn[0])
self.assertEqual(len(chain), chn[1])
for r_idx, res in enumerate(chn[2]):
residue = chain.get_list()[r_idx]
# Check residue ID and atom count
self.assertEqual(residue.get_id(), res[0])
self.assertEqual(len(residue), res[1])
disorder_lvl = residue.is_disordered()
if disorder_lvl == 1:
# Check the number of disordered atoms
disordered_count = sum(1 for atom in residue
if atom.is_disordered())
if disordered_count:
self.assertEqual(disordered_count, res[2])
elif disorder_lvl == 2:
# Point mutation -- check residue names
self.assertEqual(residue.disordered_get_id_list(), res[2])
def test_details(self):
"""Verify details of the parsed example PDB file."""
structure = self.structure
self.assertEqual(len(structure), 2)
# First model
model = structure[0]
self.assertEqual(model.id, 0)
self.assertEqual(model.level, "M")
self.assertEqual(len(model), 1)
chain = model["A"]
self.assertEqual(chain.id, "A")
self.assertEqual(chain.level, "C")
self.assertEqual(len(chain), 1)
self.assertEqual(" ".join(residue.resname for residue in chain), "PCA")
self.assertEqual(" ".join(atom.name for atom in chain.get_atoms()),
"N CA CB CG CD OE C O CA ")
self.assertEqual(" ".join(atom.element for atom in chain.get_atoms()),
"N C C C C O C O CA")
# Second model
model = structure[1]
self.assertEqual(model.id, 1)
self.assertEqual(model.level, "M")
self.assertEqual(len(model), 3)
chain = model["A"]
self.assertEqual(chain.id, "A")
self.assertEqual(chain.level, "C")
self.assertEqual(len(chain), 86)
self.assertEqual(" ".join(residue.resname for residue in chain),
"CYS ARG CYS GLY SER GLN GLY GLY GLY SER THR CYS "
"PRO GLY LEU ARG CYS CYS SER ILE TRP GLY TRP CYS "
"GLY ASP SER GLU PRO TYR CYS GLY ARG THR CYS GLU "
"ASN LYS CYS TRP SER GLY GLU ARG SER ASP HIS ARG "
"CYS GLY ALA ALA VAL GLY ASN PRO PRO CYS GLY GLN "
"ASP ARG CYS CYS SER VAL HIS GLY TRP CYS GLY GLY "
"GLY ASN ASP TYR CYS SER GLY GLY ASN CYS GLN TYR "
"ARG CYS")
self.assertEqual(" ".join(atom.name for atom in chain.get_atoms()),
"C N CA C O CB CG CD NE CZ NH1 NH2 N CA C O CB SG "
"N CA C O N CA C O CB OG N CA C O CB CG CD OE1 NE2 "
"N CA C O N CA C O N CA C O N CA C O CB OG N CA C "
"O CB OG1 CG2 N CA C O CB SG N CA C O CB CG CD N "
"CA C O N CA C O CB CG CD1 CD2 N CA C O CB CG CD NE "
"CZ NH1 NH2 N CA C O CB SG N CA C O CB SG N CA C O "
"CB OG N CA C O CB CG1 CG2 CD1 N CA C O CB CG CD1 "
"CD2 NE1 CE2 CE3 CZ2 CZ3 CH2 N CA C O N CA C O CB "
"CG CD1 CD2 NE1 CE2 CE3 CZ2 CZ3 CH2 N CA C O CB SG "
"N CA C O N CA C O CB CG OD1 OD2 N CA C O CB OG N "
"CA C O CB CG CD OE1 OE2 N CA C O CB CG CD N CA C O "
"CB CG CD1 CD2 CE1 CE2 CZ OH N CA C O CB SG N CA C "
"O N CA C O CB CG CD NE CZ NH1 NH2 N CA C O CB OG1 "
"CG2 N CA C O CB SG N CA C O CB CG CD OE1 OE2 N CA "
"C O CB CG OD1 ND2 N CA C O CB CG CD CE NZ N CA C O "
"CB SG N CA C O CB CG CD1 CD2 NE1 CE2 CE3 CZ2 CZ3 "
"CH2 N CA C O CB OG N CA C O N CA C O CB CG CD OE1 "
"OE2 N CA C O CB CG CD NE CZ NH1 NH2 N CA C O CB OG "
"N CA C O CB CG OD1 OD2 N CA C O CB CG ND1 CD2 CE1 "
"NE2 N CA C O CB CG CD NE CZ NH1 NH2 N CA C O CB SG "
"N CA C O N CA C O CB N CA C O CB N CA C O CB CG1 "
"CG2 N CA C O N CA C O CB CG OD1 ND2 N CA C O CB CG "
"CD N CA C O CB CG CD N CA C O CB SG N CA C O N CA "
"C O CB CG CD OE1 NE2 N CA C O CB CG OD1 OD2 N CA C "
"O CB CG CD NE CZ NH1 NH2 N CA C O CB SG N CA C O "
"CB SG N CA C O CB OG N CA C O CB CG1 CG2 N CA C O "
"CB CG ND1 CD2 CE1 NE2 N CA C O N CA C O CB CG CD1 "
"CD2 NE1 CE2 CE3 CZ2 CZ3 CH2 N CA C O CB SG N CA C "
"O N CA C O N CA C O N CA C O CB CG OD1 ND2 N CA C O "
"CB CG OD1 OD2 N CA C O CB CG CD1 CD2 CE1 CE2 CZ OH "
"N CA C O CB SG N CA C O CB OG N CA C O N CA C O N "
"CA C O CB CG OD1 ND2 N CA C O CB SG N CA C O CB CG "
"CD OE1 NE2 N CA C O CB CG CD1 CD2 CE1 CE2 CZ OH N "
"CA C O CB CG CD NE CZ NH1 NH2 N CA C O CB SG")
self.assertEqual(" ".join(atom.element for atom in chain.get_atoms()),
"C N C C O C C C N C N N N C C O C S N C C O N C C O "
"C O N C C O C C C O N N C C O N C C O N C C O N C C "
"O C O N C C O C O C N C C O C S N C C O C C C N C C "
"O N C C O C C C C N C C O C C C N C N N N C C O C S "
"N C C O C S N C C O C O N C C O C C C C N C C O C C "
"C C N C C C C C N C C O N C C O C C C C N C C C C C "
"N C C O C S N C C O N C C O C C O O N C C O C O N C "
"C O C C C O O N C C O C C C N C C O C C C C C C C O "
"N C C O C S N C C O N C C O C C C N C N N N C C O C "
"O C N C C O C S N C C O C C C O O N C C O C C O N N "
"C C O C C C C N N C C O C S N C C O C C C C N C C C "
"C C N C C O C O N C C O N C C O C C C O O N C C O C "
"C C N C N N N C C O C O N C C O C C O O N C C O C C "
"N C C N N C C O C C C N C N N N C C O C S N C C O N "
"C C O C N C C O C N C C O C C C N C C O N C C O C C "
"O N N C C O C C C N C C O C C C N C C O C S N C C O "
"N C C O C C C O N N C C O C C O O N C C O C C C N C "
"N N N C C O C S N C C O C S N C C O C O N C C O C C "
"C N C C O C C N C C N N C C O N C C O C C C C N C C "
"C C C N C C O C S N C C O N C C O N C C O N C C O C "
"C O N N C C O C C O O N C C O C C C C C C C O N C C "
"O C S N C C O C O N C C O N C C O N C C O C C O N N "
"C C O C S N C C O C C C O N N C C O C C C C C C C O "
"N C C O C C C N C N N N C C O C S")
class ParseReal(unittest.TestCase):
"""Testing with real PDB files."""
def test_empty(self):
"""Parse an empty file."""
parser = PDBParser()
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
struct = parser.get_structure('MT', filename)
# Structure has no children (models)
self.assertFalse(len(struct))
finally:
os.remove(filename)
def test_c_n(self):
"""Extract polypeptides from 1A80."""
parser = PDBParser(PERMISSIVE=False)
structure = parser.get_structure("example", "PDB/1A8O.pdb")
self.assertEqual(len(structure), 1)
for ppbuild in [PPBuilder(), CaPPBuilder()]:
# ==========================================================
# First try allowing non-standard amino acids,
polypeptides = ppbuild.build_peptides(structure[0], False)
self.assertEqual(len(polypeptides), 1)
pp = polypeptides[0]
# Check the start and end positions
self.assertEqual(pp[0].get_id()[1], 151)
self.assertEqual(pp[-1].get_id()[1], 220)
# Check the sequence
s = pp.get_sequence()
self.assertTrue(isinstance(s, Seq))
self.assertEqual(s.alphabet, generic_protein)
# Here non-standard MSE are shown as M
self.assertEqual("MDIRQGPKEPFRDYVDRFYKTLRAEQASQEVKNWMTETLLVQ"
"NANPDCKTILKALGPGATLEEMMTACQG", str(s))
# ==========================================================
# Now try strict version with only standard amino acids
# Should ignore MSE 151 at start, and then break the chain
# at MSE 185, and MSE 214,215
polypeptides = ppbuild.build_peptides(structure[0], True)
self.assertEqual(len(polypeptides), 3)
# First fragment
pp = polypeptides[0]
self.assertEqual(pp[0].get_id()[1], 152)
self.assertEqual(pp[-1].get_id()[1], 184)
s = pp.get_sequence()
self.assertTrue(isinstance(s, Seq))
self.assertEqual(s.alphabet, generic_protein)
self.assertEqual("DIRQGPKEPFRDYVDRFYKTLRAEQASQEVKNW", str(s))
# Second fragment
pp = polypeptides[1]
self.assertEqual(pp[0].get_id()[1], 186)
self.assertEqual(pp[-1].get_id()[1], 213)
s = pp.get_sequence()
self.assertTrue(isinstance(s, Seq))
self.assertEqual(s.alphabet, generic_protein)
self.assertEqual("TETLLVQNANPDCKTILKALGPGATLEE", str(s))
# Third fragment
pp = polypeptides[2]
self.assertEqual(pp[0].get_id()[1], 216)
self.assertEqual(pp[-1].get_id()[1], 220)
s = pp.get_sequence()
self.assertTrue(isinstance(s, Seq))
self.assertEqual(s.alphabet, generic_protein)
self.assertEqual("TACQG", str(s))
def test_strict(self):
"""Parse 1A8O.pdb file in strict mode."""
parser = PDBParser(PERMISSIVE=False)
structure = parser.get_structure("example", "PDB/1A8O.pdb")
self.assertEqual(len(structure), 1)
model = structure[0]
self.assertEqual(model.id, 0)
self.assertEqual(model.level, "M")
self.assertEqual(len(model), 1)
chain = model["A"]
self.assertEqual(chain.id, "A")
self.assertEqual(chain.level, "C")
self.assertEqual(len(chain), 158)
self.assertEqual(" ".join(residue.resname for residue in chain),
"MSE ASP ILE ARG GLN GLY PRO LYS GLU PRO PHE ARG "
"ASP TYR VAL ASP ARG PHE TYR LYS THR LEU ARG ALA "
"GLU GLN ALA SER GLN GLU VAL LYS ASN TRP MSE THR "
"GLU THR LEU LEU VAL GLN ASN ALA ASN PRO ASP CYS "
"LYS THR ILE LEU LYS ALA LEU GLY PRO GLY ALA THR "
"LEU GLU GLU MSE MSE THR ALA CYS GLN GLY HOH HOH "
"HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH "
"HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH "
"HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH "
"HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH "
"HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH "
"HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH "
"HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH "
"HOH HOH")
self.assertEqual(" ".join(atom.name for atom in chain.get_atoms()),
"N CA C O CB CG SE CE N CA C O CB CG OD1 OD2 N CA "
"C O CB CG1 CG2 CD1 N CA C O CB CG CD NE CZ NH1 "
"NH2 N CA C O CB CG CD OE1 NE2 N CA C O N CA C O "
"CB CG CD N CA C O CB CG CD CE NZ N CA C O CB CG "
"CD OE1 OE2 N CA C O CB CG CD N CA C O CB CG CD1 "
"CD2 CE1 CE2 CZ N CA C O CB CG CD NE CZ NH1 NH2 N "
"CA C O CB CG OD1 OD2 N CA C O CB CG CD1 CD2 CE1 "
"CE2 CZ OH N CA C O CB CG1 CG2 N CA C O CB CG OD1 "
"OD2 N CA C O CB CG CD NE CZ NH1 NH2 N CA C O CB "
"CG CD1 CD2 CE1 CE2 CZ N CA C O CB CG CD1 CD2 CE1 "
"CE2 CZ OH N CA C O CB CG CD CE NZ N CA C O CB "
"OG1 CG2 N CA C O CB CG CD1 CD2 N CA C O CB CG CD "
"NE CZ NH1 NH2 N CA C O CB N CA C O CB CG CD OE1 "
"OE2 N CA C O CB CG CD OE1 NE2 N CA C O CB N CA C "
"O CB OG N CA C O CB CG CD OE1 NE2 N CA C O CB CG "
"CD OE1 OE2 N CA C O CB CG1 CG2 N CA C O CB CG CD "
"CE NZ N CA C O CB CG OD1 ND2 N CA C O CB CG CD1 "
"CD2 NE1 CE2 CE3 CZ2 CZ3 CH2 N CA C O CB CG SE CE "
"N CA C O CB OG1 CG2 N CA C O CB CG CD OE1 OE2 N "
"CA C O CB OG1 CG2 N CA C O CB CG CD1 CD2 N CA C "
"O CB CG CD1 CD2 N CA C O CB CG1 CG2 N CA C O CB "
"CG CD OE1 NE2 N CA C O CB CG OD1 ND2 N CA C O CB "
"N CA C O CB CG OD1 ND2 N CA C O CB CG CD N CA C "
"O CB CG OD1 OD2 N CA C O CB SG N CA C O CB CG CD "
"CE NZ N CA C O CB OG1 CG2 N CA C O CB CG1 CG2 "
"CD1 N CA C O CB CG CD1 CD2 N CA C O CB CG CD CE "
"NZ N CA C O CB N CA C O CB CG CD1 CD2 N CA C O N "
"CA C O CB CG CD N CA C O N CA C O CB N CA C O CB "
"OG1 CG2 N CA C O CB CG CD1 CD2 N CA C O CB CG CD "
"OE1 OE2 N CA C O CB CG CD OE1 OE2 N CA C O CB CG "
"SE CE N CA C O CB CG SE CE N CA C O CB OG1 CG2 N "
"CA C O CB N CA C O CB SG N CA C O CB CG CD OE1 "
"NE2 N CA C O OXT O O O O O O O O O O O O O O O O "
"O O O O O O O O O O O O O O O O O O O O O O O O "
"O O O O O O O O O O O O O O O O O O O O O O O O "
"O O O O O O O O O O O O O O O O O O O O O O O O")
self.assertEqual(" ".join(atom.element for atom in chain.get_atoms()),
"N C C O C C SE C N C C O C C O O N C C O C C C C "
"N C C O C C C N C N N N C C O C C C O N N C C O "
"N C C O C C C N C C O C C C C N N C C O C C C O "
"O N C C O C C C N C C O C C C C C C C N C C O C "
"C C N C N N N C C O C C O O N C C O C C C C C C "
"C O N C C O C C C N C C O C C O O N C C O C C C "
"N C N N N C C O C C C C C C C N C C O C C C C C "
"C C O N C C O C C C C N N C C O C O C N C C O C "
"C C C N C C O C C C N C N N N C C O C N C C O C "
"C C O O N C C O C C C O N N C C O C N C C O C O "
"N C C O C C C O N N C C O C C C O O N C C O C C "
"C N C C O C C C C N N C C O C C O N N C C O C C "
"C C N C C C C C N C C O C C SE C N C C O C O C N "
"C C O C C C O O N C C O C O C N C C O C C C C N "
"C C O C C C C N C C O C C C N C C O C C C O N N "
"C C O C C O N N C C O C N C C O C C O N N C C O "
"C C C N C C O C C O O N C C O C S N C C O C C C "
"C N N C C O C O C N C C O C C C C N C C O C C C "
"C N C C O C C C C N N C C O C N C C O C C C C N "
"C C O N C C O C C C N C C O N C C O C N C C O C "
"O C N C C O C C C C N C C O C C C O O N C C O C "
"C C O O N C C O C C SE C N C C O C C SE C N C C "
"O C O C N C C O C N C C O C S N C C O C C C O N "
"N C C O O O O O O O O O O O O O O O O O O O O O "
"O O O O O O O O O O O O O O O O O O O O O O O O "
"O O O O O O O O O O O O O O O O O O O O O O O O "
"O O O O O O O O O O O O O O O O O O O O O")
def test_model_numbering(self):
"""Preserve model serial numbers during I/O."""
def confirm_numbering(struct):
self.assertEqual(len(struct), 20)
for idx, model in enumerate(struct):
self.assertTrue(model.serial_num, idx + 1)
self.assertTrue(model.serial_num, model.id + 1)
parser = PDBParser()
struct1 = parser.get_structure("1mot", "PDB/1MOT.pdb")
confirm_numbering(struct1)
# Round trip: serialize and parse again
io = PDBIO()
io.set_structure(struct1)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
struct2 = parser.get_structure("1mot", filename)
confirm_numbering(struct2)
finally:
os.remove(filename)
class WriteTest(unittest.TestCase):
def setUp(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
self.parser = PDBParser(PERMISSIVE=1)
self.structure = self.parser.get_structure("example", "PDB/1A8O.pdb")
def test_pdbio_write_structure(self):
"""Write a full structure using PDBIO"""
io = PDBIO()
struct1 = self.structure
# Write full model to temp file
io.set_structure(struct1)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
struct2 = self.parser.get_structure("1a8o", filename)
nresidues = len(list(struct2.get_residues()))
self.assertEqual(len(struct2), 1)
self.assertEqual(nresidues, 158)
finally:
os.remove(filename)
def test_pdbio_write_residue(self):
"""Write a single residue using PDBIO"""
io = PDBIO()
struct1 = self.structure
residue1 = list(struct1.get_residues())[0]
# Write full model to temp file
io.set_structure(residue1)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
struct2 = self.parser.get_structure("1a8o", filename)
nresidues = len(list(struct2.get_residues()))
self.assertEqual(nresidues, 1)
finally:
os.remove(filename)
def test_pdbio_write_custom_residue(self):
"""Write a chainless residue using PDBIO"""
io = PDBIO()
res = Residue.Residue((' ', 1, ' '), 'DUM', '')
atm = Atom.Atom('CA', [0.1, 0.1, 0.1], 1.0, 1.0, ' ', 'CA', 1, 'C')
res.add(atm)
# Write full model to temp file
io.set_structure(res)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
struct2 = self.parser.get_structure("res", filename)
latoms = list(struct2.get_atoms())
self.assertEqual(len(latoms), 1)
self.assertEqual(latoms[0].name, 'CA')
self.assertEqual(latoms[0].parent.resname, 'DUM')
self.assertEqual(latoms[0].parent.parent.id, 'A')
finally:
os.remove(filename)
def test_pdbio_select(self):
"""Write a selection of the structure using a Select subclass"""
# Selection class to filter all alpha carbons
class CAonly(Select):
"""
Accepts only CA residues
"""
def accept_atom(self, atom):
if atom.name == "CA" and atom.element == "C":
return 1
io = PDBIO()
struct1 = self.structure
# Write to temp file
io.set_structure(struct1)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename, CAonly())
struct2 = self.parser.get_structure("1a8o", filename)
nresidues = len(list(struct2.get_residues()))
self.assertEqual(nresidues, 70)
finally:
os.remove(filename)
def test_pdbio_missing_occupancy(self):
"""Write PDB file with missing occupancy"""
io = PDBIO()
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
structure = self.parser.get_structure("test", "PDB/occupancy.pdb")
io.set_structure(structure)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", BiopythonWarning)
io.save(filename)
self.assertEqual(len(w), 1, w)
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
struct2 = self.parser.get_structure("test", filename)
atoms = struct2[0]['A'][(' ', 152, ' ')]
self.assertEqual(atoms['N'].get_occupancy(), None)
finally:
os.remove(filename)
class Exposure(unittest.TestCase):
"Testing Bio.PDB.HSExposure."
def setUp(self):
pdb_filename = "PDB/a_structure.pdb"
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
structure=PDBParser(PERMISSIVE=True).get_structure('X', pdb_filename)
self.model=structure[1]
# Look at first chain only
a_residues=list(self.model["A"].child_list)
self.assertEqual(86, len(a_residues))
self.assertEqual(a_residues[0].get_resname(), "CYS")
self.assertEqual(a_residues[1].get_resname(), "ARG")
self.assertEqual(a_residues[2].get_resname(), "CYS")
self.assertEqual(a_residues[3].get_resname(), "GLY")
# ...
self.assertEqual(a_residues[-3].get_resname(), "TYR")
self.assertEqual(a_residues[-2].get_resname(), "ARG")
self.assertEqual(a_residues[-1].get_resname(), "CYS")
self.a_residues = a_residues
self.radius = 13.0
def test_HSExposureCA(self):
"""HSExposureCA."""
hse = HSExposureCA(self.model, self.radius)
residues = self.a_residues
self.assertEqual(0, len(residues[0].xtra))
self.assertEqual(0, len(residues[1].xtra))
self.assertEqual(3, len(residues[2].xtra))
self.assertAlmostEqual(0.81250973133184456, residues[2].xtra["EXP_CB_PCB_ANGLE"])
self.assertEqual(14, residues[2].xtra["EXP_HSE_A_D"])
self.assertEqual(14, residues[2].xtra["EXP_HSE_A_U"])
self.assertEqual(3, len(residues[3].xtra))
self.assertAlmostEqual(1.3383737, residues[3].xtra["EXP_CB_PCB_ANGLE"])
self.assertEqual(13, residues[3].xtra["EXP_HSE_A_D"])
self.assertEqual(16, residues[3].xtra["EXP_HSE_A_U"])
# ...
self.assertEqual(3, len(residues[-2].xtra))
self.assertAlmostEqual(0.77124014456278489, residues[-2].xtra["EXP_CB_PCB_ANGLE"])
self.assertEqual(24, residues[-2].xtra["EXP_HSE_A_D"])
self.assertEqual(24, residues[-2].xtra["EXP_HSE_A_U"])
self.assertEqual(0, len(residues[-1].xtra))
def test_HSExposureCB(self):
"""HSExposureCB."""
hse = HSExposureCB(self.model, self.radius)
residues = self.a_residues
self.assertEqual(0, len(residues[0].xtra))
self.assertEqual(2, len(residues[1].xtra))
self.assertEqual(20, residues[1].xtra["EXP_HSE_B_D"])
self.assertEqual(5, residues[1].xtra["EXP_HSE_B_U"])
self.assertEqual(2, len(residues[2].xtra))
self.assertEqual(10, residues[2].xtra["EXP_HSE_B_D"])
self.assertEqual(18, residues[2].xtra["EXP_HSE_B_U"])
self.assertEqual(2, len(residues[3].xtra))
self.assertEqual(7, residues[3].xtra["EXP_HSE_B_D"])
self.assertEqual(22, residues[3].xtra["EXP_HSE_B_U"])
# ...
self.assertEqual(2, len(residues[-2].xtra))
self.assertEqual(14, residues[-2].xtra["EXP_HSE_B_D"])
self.assertEqual(34, residues[-2].xtra["EXP_HSE_B_U"])
self.assertEqual(2, len(residues[-1].xtra))
self.assertEqual(23, residues[-1].xtra["EXP_HSE_B_D"])
self.assertEqual(15, residues[-1].xtra["EXP_HSE_B_U"])
def test_ExposureCN(self):
"""HSExposureCN."""
hse = ExposureCN(self.model, self.radius)
residues = self.a_residues
self.assertEqual(0, len(residues[0].xtra))
self.assertEqual(1, len(residues[1].xtra))
self.assertEqual(25, residues[1].xtra["EXP_CN"])
self.assertEqual(1, len(residues[2].xtra))
self.assertEqual(28, residues[2].xtra["EXP_CN"])
self.assertEqual(1, len(residues[3].xtra))
self.assertEqual(29, residues[3].xtra["EXP_CN"])
# ...
self.assertEqual(1, len(residues[-2].xtra))
self.assertEqual(48, residues[-2].xtra["EXP_CN"])
self.assertEqual(1, len(residues[-1].xtra))
self.assertEqual(38, residues[-1].xtra["EXP_CN"])
class Atom_Element(unittest.TestCase):
"""induces Atom Element from Atom Name"""
def setUp(self):
pdb_filename = "PDB/a_structure.pdb"
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
structure=PDBParser(PERMISSIVE=True).get_structure('X', pdb_filename)
self.residue = structure[0]['A'][('H_PCA', 1, ' ')]
def test_AtomElement(self):
""" Atom Element """
atoms = self.residue.child_list
self.assertEqual('N', atoms[0].element) # N
self.assertEqual('C', atoms[1].element) # Alpha Carbon
self.assertEqual('CA', atoms[8].element) # Calcium
def test_ions(self):
"""Element for magnesium is assigned correctly."""
pdb_filename = "PDB/ions.pdb"
structure=PDBParser(PERMISSIVE=True).get_structure('X', pdb_filename)
# check magnesium atom
atoms = structure[0]['A'][('H_ MG', 1, ' ')].child_list
self.assertEqual('MG', atoms[0].element)
def test_hydrogens(self):
def quick_assign(fullname):
return Atom.Atom(fullname.strip(), None, None, None, None,
fullname, None).element
pdb_elements = dict(
H=(
' H ', ' HA ', ' HB ', ' HD1', ' HD2', ' HE ', ' HE1', ' HE2',
' HE3', ' HG ', ' HG1', ' HH ', ' HH2', ' HZ ', ' HZ2', ' HZ3',
'1H ', '1HA ', '1HB ', '1HD ', '1HD1', '1HD2', '1HE ', '1HE2',
'1HG ', '1HG1', '1HG2', '1HH1', '1HH2', '1HZ ', '2H ', '2HA ',
'2HB ', '2HD ', '2HD1', '2HD2', '2HE ', '2HE2', '2HG ', '2HG1',
'2HG2', '2HH1', '2HH2', '2HZ ', '3H ', '3HB ', '3HD1', '3HD2',
'3HE ', '3HG1', '3HG2', '3HZ ', 'HE21',
),
O=(' OH ',),
C=(' CH2',),
N=(' NH1', ' NH2'),
)
for element, atom_names in pdb_elements.items():
for fullname in atom_names:
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
e = quick_assign(fullname)
# warnings.warn("%s %s" % (fullname, e))
self.assertEqual(e, element)
class IterationTests(unittest.TestCase):
def setUp(self):
self.struc = PDBParser(PERMISSIVE=True).get_structure('X', "PDB/a_structure.pdb")
def test_get_chains(self):
"""Yields chains from different models separately."""
chains = [chain.id for chain in self.struc.get_chains()]
self.assertEqual(chains, ['A', 'A', 'B', ' '])
def test_get_residues(self):
"""Yields all residues from all models."""
residues = [resi.id for resi in self.struc.get_residues()]
self.assertEqual(len(residues), 167)
def test_get_atoms(self):
"""Yields all atoms from the structure, excluding duplicates and ALTLOCs which are not parsed."""
atoms = ["%12s"%str((atom.id, atom.altloc)) for atom in self.struc.get_atoms()]
self.assertEqual(len(atoms), 756)
# class RenumberTests(unittest.TestCase):
# """Tests renumbering of structures."""
#
# def setUp(self):
# pdb_filename = "PDB/1A8O.pdb"
# self.structure=PDBParser(PERMISSIVE=True).get_structure('X', pdb_filename)
#
# def test_renumber_residues(self):
# """Residues in a structure are renumbered."""
# self.structure.renumber_residues()
# nums = [resi.id[1] for resi in self.structure[0]['A'].child_list]
# print(nums)
#
# -------------------------------------------------------------
class TransformTests(unittest.TestCase):
def setUp(self):
self.s = PDBParser(PERMISSIVE=True).get_structure(
'X', "PDB/a_structure.pdb")
self.m = self.s.get_list()[0]
self.c = self.m.get_list()[0]
self.r = self.c.get_list()[0]
self.a = self.r.get_list()[0]
def get_total_pos(self, o):
"""
Returns the sum of the positions of atoms in an entity along
with the number of atoms.
"""
if hasattr(o, "get_coord"):
return o.get_coord(), 1
total_pos = numpy.array((0.0, 0.0, 0.0))
total_count = 0
for p in o.get_list():
pos, count = self.get_total_pos(p)
total_pos += pos
total_count += count
return total_pos, total_count
def get_pos(self, o):
"""
Returns the average atom position in an entity.
"""
pos, count = self.get_total_pos(o)
return 1.0*pos/count
def test_transform(self):
"""Transform entities (rotation and translation)."""
for o in (self.s, self.m, self.c, self.r, self.a):
rotation = rotmat(Vector(1, 3, 5), Vector(1, 0, 0))
translation=numpy.array((2.4, 0, 1), 'f')
oldpos = self.get_pos(o)
o.transform(rotation, translation)
newpos = self.get_pos(o)
newpos_check = numpy.dot(oldpos, rotation) + translation
for i in range(0, 3):
self.assertAlmostEqual(newpos[i], newpos_check[i])
class CopyTests(unittest.TestCase):
def setUp(self):
self.s = PDBParser(PERMISSIVE=True).get_structure(
'X', "PDB/a_structure.pdb")
self.m = self.s.get_list()[0]
self.c = self.m.get_list()[0]
self.r = self.c.get_list()[0]
self.a = self.r.get_list()[0]
def test_atom_copy(self):
aa = self.a.copy()
self.assertFalse(self.a is aa)
self.assertFalse(self.a.get_coord() is aa.get_coord())
def test_entitity_copy(self):
"""Make a copy of a residue."""
for e in (self.s, self.m, self.c, self.r):
ee = e.copy()
self.assertFalse(e is ee)
self.assertFalse(e.get_list()[0] is ee.get_list()[0])
class DsspTests(unittest.TestCase):
"""Tests for DSSP parsing etc which don't need the binary tool.
See also test_DSSP_tool.py for run time testing with the tool.
"""
def test_DSSP_file(self):
"""Test parsing of pregenerated DSSP"""
dssp, keys = make_dssp_dict("PDB/2BEG.dssp")
self.assertEqual(len(dssp), 130)
def test_DSSP_noheader_file(self):
"""Test parsing of pregenerated DSSP missing header information"""
# New DSSP prints a line containing only whitespace and "."
dssp, keys = make_dssp_dict("PDB/2BEG_noheader.dssp")
self.assertEqual(len(dssp), 130)
class NACCESSTests(unittest.TestCase):
"""Tests for NACCESS parsing etc which don't need the binary tool.
See also test_NACCESS_tool.py for run time testing with the tool.
"""
def test_NACCESS_rsa_file(self):
"""Test parsing of pregenerated rsa NACCESS file"""
with open("PDB/1A8O.rsa") as rsa:
naccess = process_rsa_data(rsa)
self.assertEqual(len(naccess), 66)
def test_NACCESS_asa_file(self):
"""Test parsing of pregenerated asa NACCESS file"""
with open("PDB/1A8O.asa") as asa:
naccess = process_asa_data(asa)
self.assertEqual(len(naccess), 524)
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| updownlife/multipleK | dependencies/biopython-1.65/Tests/test_PDB.py | Python | gpl-2.0 | 52,905 | [
"Biopython"
] | 563eecee1794e1413b7f906250171a1b8c814f7e18c2580a5d5f67379e950125 |
#!/usr/bin/env python
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# MSMTools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""PyEMMA: Emma's Markov Model Algorithms
PyEMMA is an open source collection of algorithms implemented mostly in
`NumPy <http://www.numpy.org/>`_ and `SciPy <http://www.scipy.org>`_
to analyze trajectories generated from any kind of simulation
(e.g. molecular trajectories) via Markov state models (MSM).
"""
import sys
import os
import versioneer
import warnings
from io import open
from setuptools import setup, Extension, find_packages
if sys.version_info[0] < 3:
print('PyEMMA requires Python3k')
sys.exit(2)
DOCLINES = __doc__.split("\n")
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Environment :: Console
Environment :: MacOS X
Intended Audience :: Science/Research
License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)
Natural Language :: English
Operating System :: MacOS :: MacOS X
Operating System :: POSIX
Operating System :: Microsoft :: Windows
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Scientific/Engineering :: Chemistry
Topic :: Scientific/Engineering :: Mathematics
Topic :: Scientific/Engineering :: Physics
"""
from setup_util import parse_setuppy_commands
try:
from setuptools import setup, Extension, find_packages
except ImportError as ie:
print("PyEMMA requires setuptools. Please install it with conda or pip.")
sys.exit(1)
###############################################################################
# Extensions
###############################################################################
def extensions():
from Cython.Build import cythonize
from numpy import get_include as np_get_include
pybind_inc = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ext', 'pybind11', 'include')
assert os.path.exists(pybind_inc), 'pybind11 headers not found at %s' % pybind_inc
from mdtraj import capi as mdtraj_capi
mdtraj_inc = mdtraj_capi()['include_dir']
mdtraj_lib = mdtraj_capi()['lib_dir']
from deeptime import capi_includes
deeptime_inc = capi_includes(inc_clustering=True)
lib_prefix = 'lib' if sys.platform.startswith('win') else ''
common_cflags = ['-O3', ]
clustering_module = \
Extension('pyemma.coordinates.clustering._ext',
sources=['pyemma/coordinates/clustering/src/clustering_module.cpp'],
include_dirs=[
mdtraj_inc,
pybind_inc,
] + deeptime_inc,
language='c++',
libraries=[lib_prefix+'theobald'],
library_dirs=[mdtraj_lib],
extra_compile_args=common_cflags + [''])
covar_module = \
Extension('pyemma._ext.variational.estimators.covar_c._covartools',
sources=['pyemma/_ext/variational/estimators/covar_c/covartools.cpp'],
include_dirs=['pyemma/_ext/variational/estimators/covar_c/',
pybind_inc,
],
language='c++',
extra_compile_args=common_cflags)
eig_qr_module = \
Extension('pyemma._ext.variational.solvers.eig_qr.eig_qr',
sources=['pyemma/_ext/variational/solvers/eig_qr/eig_qr.pyx'],
include_dirs=['pyemma/_ext/variational/solvers/eig_qr/'],
extra_compile_args=['-std=c99'] + common_cflags)
orderedset = \
Extension('pyemma._ext.orderedset._orderedset',
sources=['pyemma/_ext/orderedset/_orderedset.pyx'],
extra_compile_args=['-std=c99'] + common_cflags)
extra_compile_args = ["-O3", "-std=c99"]
ext_bar = Extension(
"pyemma.thermo.extensions.bar",
sources=["pyemma/thermo/extensions/bar/bar.pyx",
"pyemma/thermo/extensions/bar/_bar.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_wham = Extension(
"pyemma.thermo.extensions.wham",
sources=["pyemma/thermo/extensions/wham/wham.pyx",
"pyemma/thermo/extensions/wham/_wham.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_mbar = Extension(
"pyemma.thermo.extensions.mbar",
sources=["pyemma/thermo/extensions/mbar/mbar.pyx",
"pyemma/thermo/extensions/mbar/_mbar.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_tram = Extension(
"pyemma.thermo.extensions.tram",
sources=["pyemma/thermo/extensions/tram/tram.pyx",
"pyemma/thermo/extensions/tram/_tram.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_dtram = Extension(
"pyemma.thermo.extensions.dtram",
sources=["pyemma/thermo/extensions/dtram/dtram.pyx",
"pyemma/thermo/extensions/dtram/_dtram.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_trammbar = Extension(
"pyemma.thermo.extensions.trammbar",
sources=["pyemma/thermo/extensions/trammbar/trammbar.pyx",
"pyemma/thermo/extensions/tram/_tram.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args + ["-DTRAMMBAR"])
ext_mbar_direct = Extension(
"pyemma.thermo.extensions.mbar_direct",
sources=["pyemma/thermo/extensions/mbar_direct/mbar_direct.pyx",
"pyemma/thermo/extensions/mbar_direct/_mbar_direct.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_tram_direct = Extension(
"pyemma.thermo.extensions.tram_direct",
sources=["pyemma/thermo/extensions/tram_direct/tram_direct.pyx",
"pyemma/thermo/extensions/tram_direct/_tram_direct.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_trammbar_direct = Extension(
"pyemma.thermo.extensions.trammbar_direct",
sources=["pyemma/thermo/extensions/trammbar_direct/trammbar_direct.pyx",
"pyemma/thermo/extensions/tram_direct/_tram_direct.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args + ["-DTRAMMBAR"])
ext_util = Extension(
"pyemma.thermo.extensions.util",
sources=["pyemma/thermo/extensions/util/util.pyx",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
exts_thermo = [
ext_bar,
ext_wham,
ext_mbar,
ext_tram,
ext_dtram,
ext_trammbar,
ext_mbar_direct,
ext_tram_direct,
ext_trammbar_direct,
ext_util]
exts = [clustering_module,
covar_module,
eig_qr_module,
orderedset
]
exts += exts_thermo
# Note, that we add numpy include to every extension after declaration.
np_inc = np_get_include()
for e in exts:
e.include_dirs.append(np_inc)
exts = cythonize(exts, language_level=sys.version_info[0])
return exts
def get_cmdclass():
versioneer_cmds = versioneer.get_cmdclass()
from setuptools.command.build_ext import build_ext
# taken from https://github.com/pybind/python_example/blob/master/setup.py
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'msvc': ['/EHsc'],
'unix': [],
}
def build_extensions(self):
from setup_util import cpp_flag, has_flag, detect_openmp
# enable these options only for clang, OSX
if sys.platform == 'darwin':
import sysconfig
compiler = os.path.basename(sysconfig.get_config_var("CC"))
if 'clang' in str(compiler):
self.c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
if ct == 'unix':
opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version())
opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, '-fvisibility=hidden'):
opts.append('-fvisibility=hidden')
elif ct == 'msvc':
opts.append('/std:c++17')
opts.append('/bigobj')
opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version())
# setup OpenMP support
openmp_enabled, additional_libs = detect_openmp(self.compiler)
if openmp_enabled:
warnings.warn('enabled openmp')
if sys.platform == 'darwin':
omp_compiler_args = ['-fopenmp=libiomp5']
else:
omp_compiler_args = ['-fopenmp']
omp_libraries = ['-l%s' % l for l in additional_libs]
omp_defines = [('USE_OPENMP', None)]
# debug
if self.debug:
dbg_flag = ['-g']
else:
dbg_flag = ['-g0', '-DNDEBUG']
for ext in self.extensions:
if ext.language == 'c++':
ext.extra_compile_args = opts + dbg_flag
elif ext.language is None: # C
ext.extra_compile_args += dbg_flag
if openmp_enabled:
ext.extra_compile_args += omp_compiler_args
ext.extra_link_args += omp_libraries
ext.define_macros += omp_defines
build_ext.build_extensions(self)
versioneer_cmds['build_ext'] = BuildExt
return versioneer_cmds
metadata = dict(
name='pyEMMA',
maintainer='Martin K. Scherer',
maintainer_email='m.scherer@fu-berlin.de',
author='The Emma team',
author_email='info@emma-project.org',
url='http://github.com/markovmodel/PyEMMA',
license='LGPLv3+',
description=DOCLINES[0],
long_description=open('README.rst', encoding='utf8').read(),
version=versioneer.get_version(),
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
classifiers=[c for c in CLASSIFIERS.split('\n') if c],
keywords='Markov State Model Algorithms',
cmdclass=get_cmdclass(),
tests_require=['pytest'],
# runtime dependencies
install_requires=[
'bhmm>=0.6,<0.7',
'decorator>=4.0.0',
'h5py>=2.7.1',
'matplotlib',
'mdtraj>=1.9.2',
'msmtools>=1.2',
'numpy>=1.8.0',
'pathos',
'psutil>=3.1.1',
'pyyaml',
'scipy>=0.11',
'tqdm>=4.23',
'deeptime'
# 'deeptime @ git+https://git@github.com/deeptime-ml/deeptime.git@main#egg=deeptime' # for re-integration purposes
],
zip_safe=False,
entry_points={
'console_scripts': ['pyemma_list_models=pyemma._base.serialization.cli:main']
},
package_data={
'pyemma': ['pyemma.cfg', 'logging.yml'],
'pyemma.coordinates.tests': ['data/*'],
'pyemma.msm.tests': ['data/*'],
'pyemma.datasets': ['*.npz'],
'pyemma.util.tests': ['data/*'],
},
# packages are found if their folder contains an __init__.py,
packages=find_packages(),
)
if __name__ == '__main__':
if parse_setuppy_commands():
# only require numpy and extensions in case of building/installing
# first initialize submodules
if os.path.exists('.git'):
import subprocess
modules = [os.path.join('ext', 'pybind11')]
cmd = "git submodule update --init {mod}"
for m in modules:
subprocess.check_call(cmd.format(mod=m).split(' '))
# now build extension list.
metadata['ext_modules'] = extensions()
setup(**metadata)
| markovmodel/PyEMMA | setup.py | Python | lgpl-3.0 | 12,936 | [
"MDTraj"
] | cdf2e3db831ca89786616fa02647ce087355b7e0d4a7552f1921b57267d6f678 |
from __future__ import division
import numpy as np
import netCDF4 as nc
import sys
import os
import fnmatch
sys.path.append('/home/wesley/github/UTide/')
from utide import ut_solv, ut_reconstr
from shortest_element_path import shortest_element_path
import matplotlib.pyplot as plt
import matplotlib.tri as Tri
import matplotlib.ticker as ticker
import seaborn
#import scipy.io as sio
class FVCOM:
'''
A class for FVCOM data.
As of right now, only takes a filename as input. It will then load in the
data (except for timeseries, since loading in the whole time series can be
too large)
ax can be defined as a region, i.e. a bounding box.
An example:
ax = [min(lon_coord), max(lon_coord), min(lat_coord), max(lat_coord)]
'''
def __init__(self, filename, elements=slice(None), ax=[], onlyGrid=False, debug=False):
self.QC = ['raw data']
if ax:
self.ax = ax
else:
#self.ax = [min(self.lon), max(self.lon), min(self.lat), max(self.lat)]
self.ax = elements
self.debug = debug
if onlyGrid:
self.loadGrid(filename)
else:
self.isMulti(filename, self.ax)
def isMulti(self, filename, ax):
split = filename.split('/')
if split[-1]:
self.multi = False
self.load(filename, ax)
else:
self.multi = True
self.loadMulti(filename, ax)
def loadMulti(self, filename, ax):
self.matches = self.findFiles(filename)
#self.loadGrid(filename)
self.x = np.array([])
self.y = np.array([])
self.xc = np.array([])
self.yc = np.array([])
self.lonc = np.array([])
self.latc = np.array([])
self.lon = np.array([])
self.lat = np.array([])
self.siglay = np.array([])
self.siglev = np.array([])
self.h = np.array([])
self.time = np.array([])
self.u = np.array([])
self.v = np.array([])
self.ww = np.array([])
self.ua = np.array([])
self.va = np.array([])
self.elev = np.array([])
for i, v in enumerate(self.matches):
data = nc.Dataset(v, 'r')
x = data.variables['x'][:]
y = data.variables['y'][:]
xc = data.variables['xc'][:]
yc = data.variables['yc'][:]
lon = data.variables['lon'][:]
lat = data.variables['lat'][:]
lonc = data.variables['lonc'][:]
latc = data.variables['latc'][:]
siglay = data.variables['siglay'][:]
siglev = data.variables['siglev'][:]
h = data.variables['h'][:]
time = data.variables['time'][:]
# WES_COMMENT: I do this since el_region takes the class variable
# self.latc, etc. Therefore this could be done more effectively,
# but it shouldn't hurt anything since all of these variables are
# small and quick
self.lon = data.variables['lon'][:]
self.lat = data.variables['lat'][:]
self.lonc = data.variables['lonc'][:]
self.latc = data.variables['latc'][:]
self.el_region()
self.node_region()
try:
u = data.variables['u'][:, :, self.region_e]
v = data.variables['v'][:, :, self.region_e]
ww = data.variables['ww'][:, :, self.region_e]
self.D3 = True
except KeyError:
self.D3 = False
print self.region_e
print data.variables['ua'].shape
ua = data.variables['ua'][:, self.region_e]
va = data.variables['va'][:, self.region_e]
elev = data.variables['zeta'][:, self.region_n]
self.x = np.hstack((self.x, x))
self.y = np.hstack((self.y, y))
self.xc = np.hstack((self.xc, xc))
self.yc = np.hstack((self.yc, yc))
self.lon = np.hstack((self.lon, lon))
self.lat = np.hstack((self.lat, lat))
self.lonc = np.hstack((self.lonc, lonc))
self.latc = np.hstack((self.latc, latc))
self.h = np.hstack((self.h, h))
self.time = np.hstack((self.time, time))
if i == 0:
self.siglay = siglay
self.siglev = siglev
self.ua = ua
self.va = va
self.elev = elev
if self.D3:
self.u = u
self.v = v
self.ww = ww
else:
self.siglay = np.vstack((self.siglay, siglay))
self.siglev = np.vstack((self.siglev, siglev))
self.ua = np.vstack((self.ua, ua))
self.va = np.vstack((self.va, va))
self.elev = np.vstack((self.elev, elev))
if self.D3:
self.u = np.vstack((self.u, u))
self.v = np.vstack((self.v, v))
self.ww = np.vstack((self.ww, ww))
def findFiles(self, filename):
'''
Wesley comment: the name needs to be a linux expression to find files
you want. For multiple station files, this would work
name = '*station*.nc'
For just dngrid_0001 and no restart files:
name = 'dngrid_0*.nc'
will work
'''
# WES_COMMENT: This has been hardcoded, and once we have a regular
# naming convention a hard-coded name will work fine.
name = 'dngrid_0*.nc'
name = 'small*.nc'
self.matches = []
for root, dirnames, filenames in os.walk(filename):
for filename in fnmatch.filter(filenames, name):
self.matches.append(os.path.join(root, filename))
return sorted(self.matches)
def el_region(self):
self.region_e = np.argwhere((self.lonc >= self.ax[0]) &
(self.lonc <= self.ax[1]) &
(self.latc >= self.ax[2]) &
(self.latc <= self.ax[3]))
self.region_e = self.region_e.flatten()
self.QC.append('Made region for elements out of {}'.format(self.ax))
if self.debug:
print self.region_e
def node_region(self):
self.region_n = np.argwhere((self.lon >= self.ax[0]) &
(self.lon <= self.ax[1]) &
(self.lat >= self.ax[2]) &
(self.lat <= self.ax[3]))
self.region_n = self.region_n.flatten()
self.QC.append('Made region for nodes out of {}'.format(self.ax))
if self.debug:
print self.region_n
def loadGrid(self, filename):
self.data = nc.Dataset(filename, 'r')
self.x = self.data.variables['x'][:]
self.y = self.data.variables['y'][:]
self.xc = self.data.variables['xc'][:]
self.yc = self.data.variables['yc'][:]
self.lon = self.data.variables['lon'][:]
self.lat = self.data.variables['lat'][:]
self.lonc = self.data.variables['lonc'][:]
self.latc = self.data.variables['latc'][:]
self.nbe = self.data.variables['nbe'][:]
self.nv = self.data.variables['nv'][:]
# Make Trinode available for Python indexing
self.trinodes = self.nv.T - 1
# get time and adjust it to matlab datenum
self.julianTime = self.data.variables['time'][:]
self.time = self.julianTime + 678942
self.QC.append('Changed time from Julian to matlab datenum')
def load(self, filename, ax):
# self.data = nc.Dataset(filename, 'r')
# # self.data = sio.netcdf.netcdf_file(filename, 'r')
# self.x = self.data.variables['x'][:]
# self.y = self.data.variables['y'][:]
# self.xc = self.data.variables['xc'][:]
# self.yc = self.data.variables['yc'][:]
# self.lon = self.data.variables['lon'][:]
# self.lat = self.data.variables['lat'][:]
# self.lonc = self.data.variables['lonc'][:]
# self.latc = self.data.variables['latc'][:]
self.loadGrid(filename)
self.h = self.data.variables['h'][:]
self.nbe = self.data.variables['nbe'][:]
self.a1u = self.data.variables['a1u'][:]
self.a2u = self.data.variables['a2u'][:]
self.aw0 = self.data.variables['aw0'][:]
self.awx = self.data.variables['awx'][:]
self.awy = self.data.variables['awy'][:]
self.siglay = self.data.variables['siglay'][:]
self.siglev = self.data.variables['siglev'][:]
self.nv = self.data.variables['nv'][:]
# Make Trinode available for Python indexing
self.trinodes = self.nv.T - 1
# get time and adjust it to matlab datenum
self.julianTime = self.data.variables['time'][:]
self.time = self.julianTime + 678942
self.QC.append('Changed time from Julian to matlab datenum')
# Need to use len to get size of dimensions
self.nele = self.data.dimensions['nele']
self.node = self.data.dimensions['node']
# Get regions
if len(ax) == 4:
self.el_region()
self.node_region()
else:
print ax
#print ax.shape
self.region_e = self.closest_point(ax[0], ax[1])
self.region_n = self.closest_point(ax[0], ax[1], center=False)
print self.region_e, self.region_n
# elev timeseries
print self.data.variables['zeta'].shape
self.elev = self.data.variables['zeta'][:, self.region_n]
try:
self.ww = self.data.variables['ww'][:, :, self.region_e]
self.u = self.data.variables['u'][:, :, self.region_e]
self.v = self.data.variables['v'][:, :, self.region_e]
self.ua = self.data.variables['ua'][:, self.region_e]
self.va = self.data.variables['va'][:, self.region_e]
self.D3 = True
except KeyError:
self.ua = self.data.variables['ua'][:, self.region_e]
self.va = self.data.variables['va'][:, self.region_e]
self.D3 = False
def centers(self, elements):
'''Currently doesn't work with whole grid'''
size = self.trinodes.T[elements].shape[0]
size1 = self.elev.shape[0]
elc = np.zeros((size1, size))
hc = np.zeros((size))
for ind, value in enumerate(self.trinodes.T[elements]):
elc[:, ind] = np.mean(self.elev[:, value-1], axis=1)
hc[ind] = np.mean(self.h[value-1])
return elc, hc
def closest_point(self, pt_lon, pt_lat, center=True):
# def closest_point(self, pt_lon, pt_lat, lon, lat):
'''
Finds the closest exact lon, lat to a lon, lat coordinate.
Example input:
closest_point([-65.37], [45.34], lon, lat)
where lon, lat are from data
'''
points = np.array([pt_lon, pt_lat]).T
# point_list = np.array([lon,lat]).T
if center:
point_list = np.array([self.lonc, self.latc]).T
else:
point_list = np.array([self.lon, self.lat]).T
closest_dist = ((point_list[:, 0] - points[:, 0, None])**2 +
(point_list[:, 1] - points[:, 1, None])**2)
closest_point_indexes = np.argmin(closest_dist, axis=1)
return closest_point_indexes
def harmonics(self, ind, twodim=True, **kwarg):
if twodim:
self.coef = ut_solv(self.time, self.ua[:, ind], self.va[:, ind],
self.lat[ind], **kwarg)
self.QC.append('ut_solv done for velocity')
else:
self.coef = ut_solv(self.time, self.ua[:, ind], [],
self.lat[ind], **kwarg)
self.QC.append('ut_solv done for elevation')
def reconstr(self, time):
if self.coef['aux']['opt']['twodim']:
self.U, self.V = ut_reconstr(time, self.coef)
self.QC.append('ut_reconstr done for velocity')
else:
self.ts_recon, _ = ut_reconstr(time, self.coef)
self.QC.append('ut_reconstr done for elevation')
def graphGrid(self):
nv = self.nv.T - 1
h = self.h
tri = Tri.Triangulation(self.lon, self.lat, triangles=nv.T-1)
levels = np.arange(-38, -4, 1) # depth contours to plot
fig = plt.figure(figsize=(18, 10))
plt.rc('font', size='22')
ax = fig.add_subplot(111, aspect=(1.0/np.cos(np.mean(self.lat)*np.pi/180.0)))
plt.tricontourf(tri, -h, levels=levels, shading='faceted', cmap=plt.cm.gist_earth)
plt.triplot(tri)
plt.ylabel('Latitude')
plt.xlabel('Longitude')
plt.gca().patch.set_facecolor('0.5')
cbar = plt.colorbar()
cbar.set_label('Water Depth (m)', rotation=-90, labelpad=30)
scale = 1
ticks = ticker.FuncFormatter(lambda lon, pos: '{0:g}'.format(lon/scale))
ax.xaxis.set_major_formatter(ticks)
ax.yaxis.set_major_formatter(ticks)
plt.grid()
plt.show()
if __name__ == '__main__':
filename = '/home/wesley/ncfiles/smallcape_force_0001.nc'
#filename = '/home/wesley/ncfiles/'
ind = [-66.3419, -66.3324, 44.2755, 44.2815]
test = FVCOM(filename, ax=ind)
test.harmonics(0, cnstit='auto', notrend=True, nodiagn=True)
#test.reconstr(test.time)
#test.closest_point([-66.3385], [44.277])
#t = shortest_element_path(test.latc,test.lonc,test.lat,test.lon,test.nv,test.h)
#elements, _ = t.getTargets([[41420,39763],[48484,53441],
# [27241,24226],[21706,17458]])
# t.graphGrid()
| wesleybowman/karsten | project/fvcomClass.py | Python | mit | 13,827 | [
"NetCDF"
] | 79762dc2e1d14cbb3617949eefb7eabb5ff469b8a20ce7d4936e352a5320c42c |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches
def new_pulse_fig(figsize):
'''
Open a new figure and configure it to plot pulse schemes.
'''
fig, ax = plt.subplots(1, 1, figsize=figsize, frameon=False)
ax.axis('off')
fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
ax.axhline(0, color='0.75')
return fig, ax
def new_pulse_subplot(fig, *args, **kwargs):
'''
Add a new subplot configured for plotting pulse schemes to a figure.
All *args and **kwargs are passed to fig.add_subplot.
'''
ax = fig.add_subplot(*args, **kwargs)
ax.axis('off')
fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
ax.axhline(0, color='0.75')
return ax
def mwPulse(ax, pos, width=1.5, amp=1, label=None, phase=0, labelHeight=1.3,
color='C0'):
'''
Draw a microwave pulse: Gaussian envelope with modulation.
'''
x = np.linspace(pos, pos + width, 100)
envPos = amp * np.exp(-(x - (pos + width / 2))**2 / (width / 4)**2)
envNeg = -amp * np.exp(-(x - (pos + width / 2))**2 / (width / 4)**2)
mod = envPos * np.sin(2 * np.pi * 3 / width * x + phase)
ax.plot(x, envPos, '--', color=color)
ax.plot(x, envNeg, '--', color=color)
ax.plot(x, mod, '-', color=color)
if label is not None:
ax.text(pos + width / 2, labelHeight, label,
horizontalalignment='center', color=color)
return pos + width
def fluxPulse(ax, pos, width=2.5, s=.1, amp=1.5, label=None, labelHeight=1.7,
color='C1'):
'''
Draw a smooth flux pulse, where the rising and falling edges are given by
Fermi-Dirac functions.
s: smoothness of edge
'''
x = np.linspace(pos, pos + width, 100)
y = amp / ((np.exp(-(x - (pos + 5.5 * s)) / s) + 1) *
(np.exp((x - (pos + width - 5.5 * s)) / s) + 1))
ax.fill_between(x, y, color=color, alpha=0.3)
ax.plot(x, y, color=color)
if label is not None:
ax.text(pos + width / 2, labelHeight, label,
horizontalalignment='center', color=color)
return pos + width
def ramZPulse(ax, pos, width=2.5, s=0.1, amp=1.5, sep=1.5, color='C1'):
'''
Draw a Ram-Z flux pulse, i.e. only part of the pulse is shaded, to indicate
cutting off the pulse at some time.
'''
xLeft = np.linspace(pos, pos + sep, 100)
xRight = np.linspace(pos + sep, pos + width, 100)
xFull = np.concatenate((xLeft, xRight))
y = amp / ((np.exp(-(xFull - (pos + 5.5 * s)) / s) + 1) *
(np.exp((xFull - (pos + width - 5.5 * s)) / s) + 1))
yLeft = y[:len(xLeft)]
ax.fill_between(xLeft, yLeft, alpha=0.3, color=color, linewidth=0.0)
ax.plot(xFull, y, color=color)
return pos + width
def interval(ax, start, stop, height=1.5, label=None, labelHeight=None,
vlines=True, color='k', arrowstyle='<|-|>'):
'''
Draw an arrow to indicate an interval.
'''
if labelHeight is None:
labelHeight = height + 0.2
arrow = matplotlib.patches.FancyArrowPatch(
posA=(start, height), posB=(stop, height), arrowstyle=arrowstyle,
color=color, mutation_scale=7)
ax.add_patch(arrow)
if vlines:
ax.plot([start, start], [0, labelHeight], '--', color=color)
ax.plot([stop, stop], [0, labelHeight], '--', color=color)
if label is not None:
ax.text((start + stop) / 2, labelHeight, label, color=color,
horizontalalignment='center')
| QudevETH/PycQED_py3 | pycqed/utilities/pulse_scheme.py | Python | mit | 3,498 | [
"DIRAC",
"Gaussian"
] | cb42f7add22583573b5e8fa0499909914fad93fb4dc73f97303679ce57dae385 |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd
import numpy as np
import espressomd.observables
def calc_com_x(system, x, id_list):
"""Mass-weighted average, skipping virtual sites"""
masses = system.part[id_list].mass
# Filter out virtual particles by using mass=0 for them
virtual = system.part[id_list].virtual
for i in range(len(masses)):
if virtual[i]:
masses[i] = 0.
com_x = np.average(
getattr(system.part[id_list], x), weights=masses, axis=0)
return com_x
class Observables(ut.TestCase):
N_PART = 200
# Handle for espresso system
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
system.part.add(
id=np.arange(3, 3 + 2 * N_PART, 2),
pos=np.random.random((N_PART, 3)) * system.box_l,
v=np.random.random((N_PART, 3)) * 3.2 - 1,
f=np.random.random((N_PART, 3)))
if espressomd.has_features(["MASS"]):
system.part[:].mass = np.random.random(N_PART)
if espressomd.has_features(["DIPOLES"]):
system.part[:].dip = np.random.random((N_PART, 3)) - .3
if espressomd.has_features(["ROTATION"]):
system.part[:].omega_body = np.random.random((N_PART, 3)) - .5
system.part[:].torque_lab = np.random.random((N_PART, 3)) - .5
system.part[:].quat = np.random.random((N_PART, 4))
if espressomd.has_features("DIPOLES"):
system.part[:].dipm = np.random.random(N_PART) + 2
if espressomd.has_features("ELECTROSTATICS"):
system.part[:].q = np.random.random(N_PART)
if espressomd.has_features("VIRTUAL_SITES"):
p = system.part[system.part[:].id[8]]
p.virtual = True
def generate_test_for_pid_observable(
_obs_name, _pprop_name, _agg_type=None):
"""Generates test cases for observables working on particle id lists.
"""
pprop_name = _pprop_name
obs_name = _obs_name
agg_type = _agg_type
def func(self):
# This code is run at the execution of the generated function.
# It will use the state of the variables in the outer function,
# which was there, when the outer function was called
# Randomly pick a subset of the particles
id_list = sorted(
np.random.choice(
self.system.part[:].id,
size=int(
self.N_PART * .9),
replace=False))
for id in id_list:
self.assertTrue(self.system.part.exists(id))
# Get data from particles
if pprop_name == "f":
for p_id in id_list:
if self.system.part[p_id].virtual:
id_list.remove(p_id)
part_data = getattr(self.system.part[id_list], pprop_name)
# Reshape and aggregate to linear array
if len(part_data.shape) > 1:
if agg_type == "average":
part_data = np.average(part_data, 0)
if agg_type == "sum":
part_data = np.sum(part_data, 0)
if agg_type == 'com':
part_data = calc_com_x(self.system, pprop_name, id_list)
# Data from observable
observable = obs_name(ids=id_list)
obs_data = observable.calculate()
# Check
self.assertEqual(obs_data.shape, part_data.shape)
np.testing.assert_equal(id_list, observable.ids)
np.testing.assert_array_almost_equal(
obs_data,
part_data, err_msg="Data did not agree for observable " +
str(obs_name) +
" and particle property " +
pprop_name, decimal=11)
# Test setters and getters
self.assertEqual(observable.ids, id_list)
with self.assertRaises(RuntimeError):
observable.ids = [observable.ids[0]]
return func
test_pos = generate_test_for_pid_observable(
espressomd.observables.ParticlePositions, "pos")
test_v = generate_test_for_pid_observable(
espressomd.observables.ParticleVelocities, "v")
test_f = generate_test_for_pid_observable(
espressomd.observables.ParticleForces, "f")
test_com_position = generate_test_for_pid_observable(
espressomd.observables.ComPosition, 'pos', 'com')
test_com_velocity = generate_test_for_pid_observable(
espressomd.observables.ComVelocity, 'v', 'com')
if espressomd.has_features(["DIPOLES"]):
test_mag_dip = generate_test_for_pid_observable(
espressomd.observables.MagneticDipoleMoment, "dip", "sum")
if espressomd.has_features(["ROTATION"]):
test_body_angular_velocity = generate_test_for_pid_observable(
espressomd.observables.ParticleBodyAngularVelocities, "omega_body")
test_lab_angular_velocity = generate_test_for_pid_observable(
espressomd.observables.ParticleAngularVelocities, "omega_lab")
@utx.skipIfMissingFeatures(['ROTATION'])
def test_particle_body_velocities(self):
obs = espressomd.observables.ParticleBodyVelocities(
ids=self.system.part[:].id)
obs_data = obs.calculate()
part_data = np.array([p.convert_vector_space_to_body(p.v)
for p in self.system.part])
self.assertEqual(obs_data.shape, part_data.shape)
np.testing.assert_array_almost_equal(part_data, obs_data,
err_msg="Data did not agree for observable ParticleBodyVelocities and particle derived values.",
decimal=9)
def test_energy(self):
s = self.system.analysis.energy()["total"]
obs_data = espressomd.observables.Energy().calculate()
self.assertEqual(obs_data.shape, (1,))
np.testing.assert_array_almost_equal(
obs_data,
s,
err_msg="Energy from analysis and observable did not agree",
decimal=9)
def test_pressure(self):
s = self.system.analysis.pressure()["total"]
obs_data = espressomd.observables.Pressure().calculate()
self.assertEqual(obs_data.shape, (1,))
np.testing.assert_array_almost_equal(
obs_data,
s,
err_msg="Pressure from analysis and observable did not agree",
decimal=9)
def test_pressure_tensor(self):
s = self.system.analysis.pressure_tensor()["total"]
obs_data = espressomd.observables.PressureTensor().calculate()
self.assertEqual(obs_data.shape, s.shape)
np.testing.assert_array_almost_equal(
obs_data,
s,
err_msg="Pressure tensor from analysis and observable did not agree",
decimal=9)
@utx.skipIfMissingFeatures('ELECTROSTATICS')
def test_dipolemoment(self):
obs = espressomd.observables.DipoleMoment(ids=self.system.part[:].id)
obs_data = obs.calculate()
part_data = self.system.part[:].q.dot(self.system.part[:].pos)
self.assertEqual(obs_data.shape, part_data.shape)
np.testing.assert_array_almost_equal(
obs_data, part_data, err_msg="Data did not agree for observable 'DipoleMoment'", decimal=9)
def test_com_force(self):
id_list = sorted(
np.random.choice(
self.system.part[:].id,
size=int(
self.N_PART * .9),
replace=False))
particles = self.system.part.select(
lambda p: p.id in id_list and not p.virtual)
np.testing.assert_allclose(
np.sum(particles.f, axis=0),
espressomd.observables.TotalForce(ids=id_list).calculate())
if __name__ == "__main__":
ut.main()
| fweik/espresso | testsuite/python/observables.py | Python | gpl-3.0 | 8,599 | [
"ESPResSo"
] | cf6e476ed40509272e858cd6c1d1e7372012c6fd13e4d88e122b1166b449a3aa |
"""Plotting functions for visualizing distributions."""
from numbers import Number
from functools import partial
import math
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as tx
from matplotlib.colors import to_rgba
from matplotlib.collections import LineCollection
from ._core import (
VectorPlotter,
)
from ._statistics import (
KDE,
Histogram,
ECDF,
)
from .axisgrid import (
FacetGrid,
_facet_docs,
)
from .utils import (
remove_na,
_kde_support,
_normalize_kwargs,
_check_argument,
_assign_default_kwargs,
_default_color,
)
from .palettes import color_palette
from .external import husl
from .external.kde import gaussian_kde
from ._decorators import _deprecate_positional_args
from ._docstrings import (
DocstringComponents,
_core_docs,
)
__all__ = ["displot", "histplot", "kdeplot", "ecdfplot", "rugplot", "distplot"]
# ==================================================================================== #
# Module documentation
# ==================================================================================== #
_dist_params = dict(
multiple="""
multiple : {{"layer", "stack", "fill"}}
Method for drawing multiple elements when semantic mapping creates subsets.
Only relevant with univariate data.
""",
log_scale="""
log_scale : bool or number, or pair of bools or numbers
Set axis scale(s) to log. A single value sets the data axis for univariate
distributions and both axes for bivariate distributions. A pair of values
sets each axis independently. Numeric values are interpreted as the desired
base (default 10). If `False`, defer to the existing Axes scale.
""",
legend="""
legend : bool
If False, suppress the legend for semantic variables.
""",
cbar="""
cbar : bool
If True, add a colorbar to annotate the color mapping in a bivariate plot.
Note: Does not currently support plots with a ``hue`` variable well.
""",
cbar_ax="""
cbar_ax : :class:`matplotlib.axes.Axes`
Pre-existing axes for the colorbar.
""",
cbar_kws="""
cbar_kws : dict
Additional parameters passed to :meth:`matplotlib.figure.Figure.colorbar`.
""",
)
_param_docs = DocstringComponents.from_nested_components(
core=_core_docs["params"],
facets=DocstringComponents(_facet_docs),
dist=DocstringComponents(_dist_params),
kde=DocstringComponents.from_function_params(KDE.__init__),
hist=DocstringComponents.from_function_params(Histogram.__init__),
ecdf=DocstringComponents.from_function_params(ECDF.__init__),
)
# ==================================================================================== #
# Internal API
# ==================================================================================== #
class _DistributionPlotter(VectorPlotter):
semantics = "x", "y", "hue", "weights"
wide_structure = {"x": "@values", "hue": "@columns"}
flat_structure = {"x": "@values"}
def __init__(
self,
data=None,
variables={},
):
super().__init__(data=data, variables=variables)
@property
def univariate(self):
"""Return True if only x or y are used."""
# TODO this could go down to core, but putting it here now.
# We'd want to be conceptually clear that univariate only applies
# to x/y and not to other semantics, which can exist.
# We haven't settled on a good conceptual name for x/y.
return bool({"x", "y"} - set(self.variables))
@property
def data_variable(self):
"""Return the variable with data for univariate plots."""
# TODO This could also be in core, but it should have a better name.
if not self.univariate:
raise AttributeError("This is not a univariate plot")
return {"x", "y"}.intersection(self.variables).pop()
@property
def has_xy_data(self):
"""Return True at least one of x or y is defined."""
# TODO see above points about where this should go
return bool({"x", "y"} & set(self.variables))
def _add_legend(
self,
ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,
):
"""Add artists that reflect semantic mappings and put then in a legend."""
# TODO note that this doesn't handle numeric mappings like the relational plots
handles = []
labels = []
for level in self._hue_map.levels:
color = self._hue_map(level)
kws = self._artist_kws(
artist_kws, fill, element, multiple, color, alpha
)
# color gets added to the kws to workaround an issue with barplot's color
# cycle integration but it causes problems in this context where we are
# setting artist properties directly, so pop it off here
if "facecolor" in kws:
kws.pop("color", None)
handles.append(artist(**kws))
labels.append(level)
if isinstance(ax_obj, mpl.axes.Axes):
ax_obj.legend(handles, labels, title=self.variables["hue"], **legend_kws)
else: # i.e. a FacetGrid. TODO make this better
legend_data = dict(zip(labels, handles))
ax_obj.add_legend(
legend_data,
title=self.variables["hue"],
label_order=self.var_levels["hue"],
**legend_kws
)
def _artist_kws(self, kws, fill, element, multiple, color, alpha):
"""Handle differences between artists in filled/unfilled plots."""
kws = kws.copy()
if fill:
kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)
kws.setdefault("facecolor", to_rgba(color, alpha))
if element == "bars":
# Make bar() interface with property cycle correctly
# https://github.com/matplotlib/matplotlib/issues/19385
kws["color"] = "none"
if multiple in ["stack", "fill"] or element == "bars":
kws.setdefault("edgecolor", mpl.rcParams["patch.edgecolor"])
else:
kws.setdefault("edgecolor", to_rgba(color, 1))
elif element == "bars":
kws["facecolor"] = "none"
kws["edgecolor"] = to_rgba(color, alpha)
else:
kws["color"] = to_rgba(color, alpha)
return kws
def _quantile_to_level(self, data, quantile):
"""Return data levels corresponding to quantile cuts of mass."""
isoprop = np.asarray(quantile)
values = np.ravel(data)
sorted_values = np.sort(values)[::-1]
normalized_values = np.cumsum(sorted_values) / values.sum()
idx = np.searchsorted(normalized_values, 1 - isoprop)
levels = np.take(sorted_values, idx, mode="clip")
return levels
def _cmap_from_color(self, color):
"""Return a sequential colormap given a color seed."""
# Like so much else here, this is broadly useful, but keeping it
# in this class to signify that I haven't thought overly hard about it...
r, g, b, _ = to_rgba(color)
h, s, _ = husl.rgb_to_husl(r, g, b)
xx = np.linspace(-1, 1, int(1.15 * 256))[:256]
ramp = np.zeros((256, 3))
ramp[:, 0] = h
ramp[:, 1] = s * np.cos(xx)
ramp[:, 2] = np.linspace(35, 80, 256)
colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)
return mpl.colors.ListedColormap(colors[::-1])
def _default_discrete(self):
"""Find default values for discrete hist estimation based on variable type."""
if self.univariate:
discrete = self.var_types[self.data_variable] == "categorical"
else:
discrete_x = self.var_types["x"] == "categorical"
discrete_y = self.var_types["y"] == "categorical"
discrete = discrete_x, discrete_y
return discrete
def _resolve_multiple(self, curves, multiple):
"""Modify the density data structure to handle multiple densities."""
# Default baselines have all densities starting at 0
baselines = {k: np.zeros_like(v) for k, v in curves.items()}
# TODO we should have some central clearinghouse for checking if any
# "grouping" (terminnology?) semantics have been assigned
if "hue" not in self.variables:
return curves, baselines
if multiple in ("stack", "fill"):
# Setting stack or fill means that the curves share a
# support grid / set of bin edges, so we can make a dataframe
# Reverse the column order to plot from top to bottom
curves = pd.DataFrame(curves).iloc[:, ::-1]
# Find column groups that are nested within col/row variables
column_groups = {}
for i, keyd in enumerate(map(dict, curves.columns.tolist())):
facet_key = keyd.get("col", None), keyd.get("row", None)
column_groups.setdefault(facet_key, [])
column_groups[facet_key].append(i)
baselines = curves.copy()
for cols in column_groups.values():
norm_constant = curves.iloc[:, cols].sum(axis="columns")
# Take the cumulative sum to stack
curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis="columns")
# Normalize by row sum to fill
if multiple == "fill":
curves.iloc[:, cols] = (curves
.iloc[:, cols]
.div(norm_constant, axis="index"))
# Define where each segment starts
baselines.iloc[:, cols] = (curves
.iloc[:, cols]
.shift(1, axis=1)
.fillna(0))
if multiple == "dodge":
# Account for the unique semantic (non-faceting) levels
# This will require rethiniking if we add other semantics!
hue_levels = self.var_levels["hue"]
n = len(hue_levels)
for key in curves:
level = dict(key)["hue"]
hist = curves[key].reset_index(name="heights")
hist["widths"] /= n
hist["edges"] += hue_levels.index(level) * hist["widths"]
curves[key] = hist.set_index(["edges", "widths"])["heights"]
return curves, baselines
# -------------------------------------------------------------------------------- #
# Computation
# -------------------------------------------------------------------------------- #
def _compute_univariate_density(
self,
data_variable,
common_norm,
common_grid,
estimate_kws,
log_scale,
warn_singular=True,
):
# Initialize the estimator object
estimator = KDE(**estimate_kws)
all_data = self.plot_data.dropna()
if set(self.variables) - {"x", "y"}:
if common_grid:
all_observations = self.comp_data.dropna()
estimator.define_support(all_observations[data_variable])
else:
common_norm = False
densities = {}
for sub_vars, sub_data in self.iter_data("hue", from_comp_data=True):
# Extract the data points from this sub set and remove nulls
observations = sub_data[data_variable]
observation_variance = observations.var()
if math.isclose(observation_variance, 0) or np.isnan(observation_variance):
msg = (
"Dataset has 0 variance; skipping density estimate. "
"Pass `warn_singular=False` to disable this warning."
)
if warn_singular:
warnings.warn(msg, UserWarning)
continue
# Extract the weights for this subset of observations
if "weights" in self.variables:
weights = sub_data["weights"]
else:
weights = None
# Estimate the density of observations at this level
density, support = estimator(observations, weights=weights)
if log_scale:
support = np.power(10, support)
# Apply a scaling factor so that the integral over all subsets is 1
if common_norm:
density *= len(sub_data) / len(all_data)
# Store the density for this level
key = tuple(sub_vars.items())
densities[key] = pd.Series(density, index=support)
return densities
# -------------------------------------------------------------------------------- #
# Plotting
# -------------------------------------------------------------------------------- #
def plot_univariate_histogram(
self,
multiple,
element,
fill,
common_norm,
common_bins,
shrink,
kde,
kde_kws,
color,
legend,
line_kws,
estimate_kws,
**plot_kws,
):
# -- Default keyword dicts
kde_kws = {} if kde_kws is None else kde_kws.copy()
line_kws = {} if line_kws is None else line_kws.copy()
estimate_kws = {} if estimate_kws is None else estimate_kws.copy()
# -- Input checking
_check_argument("multiple", ["layer", "stack", "fill", "dodge"], multiple)
_check_argument("element", ["bars", "step", "poly"], element)
if estimate_kws["discrete"] and element != "bars":
raise ValueError("`element` must be 'bars' when `discrete` is True")
auto_bins_with_weights = (
"weights" in self.variables
and estimate_kws["bins"] == "auto"
and estimate_kws["binwidth"] is None
and not estimate_kws["discrete"]
)
if auto_bins_with_weights:
msg = (
"`bins` cannot be 'auto' when using weights. "
"Setting `bins=10`, but you will likely want to adjust."
)
warnings.warn(msg, UserWarning)
estimate_kws["bins"] = 10
# Simplify downstream code if we are not normalizing
if estimate_kws["stat"] == "count":
common_norm = False
# Now initialize the Histogram estimator
estimator = Histogram(**estimate_kws)
histograms = {}
# Do pre-compute housekeeping related to multiple groups
# TODO best way to account for facet/semantic?
if set(self.variables) - {"x", "y"}:
all_data = self.comp_data.dropna()
if common_bins:
all_observations = all_data[self.data_variable]
estimator.define_bin_params(
all_observations,
weights=all_data.get("weights", None),
)
else:
common_norm = False
# Estimate the smoothed kernel densities, for use later
if kde:
# TODO alternatively, clip at min/max bins?
kde_kws.setdefault("cut", 0)
kde_kws["cumulative"] = estimate_kws["cumulative"]
log_scale = self._log_scaled(self.data_variable)
densities = self._compute_univariate_density(
self.data_variable,
common_norm,
common_bins,
kde_kws,
log_scale,
warn_singular=False,
)
# First pass through the data to compute the histograms
for sub_vars, sub_data in self.iter_data("hue", from_comp_data=True):
# Prepare the relevant data
key = tuple(sub_vars.items())
observations = sub_data[self.data_variable]
if "weights" in self.variables:
weights = sub_data["weights"]
else:
weights = None
# Do the histogram computation
heights, edges = estimator(observations, weights=weights)
# Rescale the smoothed curve to match the histogram
if kde and key in densities:
density = densities[key]
if estimator.cumulative:
hist_norm = heights.max()
else:
hist_norm = (heights * np.diff(edges)).sum()
densities[key] *= hist_norm
# Convert edges back to original units for plotting
if self._log_scaled(self.data_variable):
edges = np.power(10, edges)
# Pack the histogram data and metadata together
orig_widths = np.diff(edges)
widths = shrink * orig_widths
edges = edges[:-1] + (1 - shrink) / 2 * orig_widths
index = pd.MultiIndex.from_arrays([
pd.Index(edges, name="edges"),
pd.Index(widths, name="widths"),
])
hist = pd.Series(heights, index=index, name="heights")
# Apply scaling to normalize across groups
if common_norm:
hist *= len(sub_data) / len(all_data)
# Store the finalized histogram data for future plotting
histograms[key] = hist
# Modify the histogram and density data to resolve multiple groups
histograms, baselines = self._resolve_multiple(histograms, multiple)
if kde:
densities, _ = self._resolve_multiple(
densities, None if multiple == "dodge" else multiple
)
# Set autoscaling-related meta
sticky_stat = (0, 1) if multiple == "fill" else (0, np.inf)
if multiple == "fill":
# Filled plots should not have any margins
bin_vals = histograms.index.to_frame()
edges = bin_vals["edges"]
widths = bin_vals["widths"]
sticky_data = (
edges.min(),
edges.max() + widths.loc[edges.idxmax()]
)
else:
sticky_data = []
# --- Handle default visual attributes
# Note: default linewidth is determined after plotting
# Default alpha should depend on other parameters
if fill:
# Note: will need to account for other grouping semantics if added
if "hue" in self.variables and multiple == "layer":
default_alpha = .5 if element == "bars" else .25
elif kde:
default_alpha = .5
else:
default_alpha = .75
else:
default_alpha = 1
alpha = plot_kws.pop("alpha", default_alpha) # TODO make parameter?
hist_artists = []
# Go back through the dataset and draw the plots
for sub_vars, _ in self.iter_data("hue", reverse=True):
key = tuple(sub_vars.items())
hist = histograms[key].rename("heights").reset_index()
bottom = np.asarray(baselines[key])
ax = self._get_axes(sub_vars)
# Define the matplotlib attributes that depend on semantic mapping
if "hue" in self.variables:
sub_color = self._hue_map(sub_vars["hue"])
else:
sub_color = color
artist_kws = self._artist_kws(
plot_kws, fill, element, multiple, sub_color, alpha
)
if element == "bars":
# Use matplotlib bar plotting
plot_func = ax.bar if self.data_variable == "x" else ax.barh
artists = plot_func(
hist["edges"],
hist["heights"] - bottom,
hist["widths"],
bottom,
align="edge",
**artist_kws,
)
for bar in artists:
if self.data_variable == "x":
bar.sticky_edges.x[:] = sticky_data
bar.sticky_edges.y[:] = sticky_stat
else:
bar.sticky_edges.x[:] = sticky_stat
bar.sticky_edges.y[:] = sticky_data
hist_artists.extend(artists)
else:
# Use either fill_between or plot to draw hull of histogram
if element == "step":
final = hist.iloc[-1]
x = np.append(hist["edges"], final["edges"] + final["widths"])
y = np.append(hist["heights"], final["heights"])
b = np.append(bottom, bottom[-1])
if self.data_variable == "x":
step = "post"
drawstyle = "steps-post"
else:
step = "post" # fillbetweenx handles mapping internally
drawstyle = "steps-pre"
elif element == "poly":
x = hist["edges"] + hist["widths"] / 2
y = hist["heights"]
b = bottom
step = None
drawstyle = None
if self.data_variable == "x":
if fill:
artist = ax.fill_between(x, b, y, step=step, **artist_kws)
else:
artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)
artist.sticky_edges.x[:] = sticky_data
artist.sticky_edges.y[:] = sticky_stat
else:
if fill:
artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)
else:
artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)
artist.sticky_edges.x[:] = sticky_stat
artist.sticky_edges.y[:] = sticky_data
hist_artists.append(artist)
if kde:
# Add in the density curves
try:
density = densities[key]
except KeyError:
continue
support = density.index
if "x" in self.variables:
line_args = support, density
sticky_x, sticky_y = None, (0, np.inf)
else:
line_args = density, support
sticky_x, sticky_y = (0, np.inf), None
line_kws["color"] = to_rgba(sub_color, 1)
line, = ax.plot(
*line_args, **line_kws,
)
if sticky_x is not None:
line.sticky_edges.x[:] = sticky_x
if sticky_y is not None:
line.sticky_edges.y[:] = sticky_y
if element == "bars" and "linewidth" not in plot_kws:
# Now we handle linewidth, which depends on the scaling of the plot
# We will base everything on the minimum bin width
hist_metadata = pd.concat([
# Use .items for generality over dict or df
h.index.to_frame() for _, h in histograms.items()
]).reset_index(drop=True)
thin_bar_idx = hist_metadata["widths"].idxmin()
binwidth = hist_metadata.loc[thin_bar_idx, "widths"]
left_edge = hist_metadata.loc[thin_bar_idx, "edges"]
# Set initial value
default_linewidth = math.inf
# Loop through subsets based only on facet variables
for sub_vars, _ in self.iter_data():
ax = self._get_axes(sub_vars)
# Needed in some cases to get valid transforms.
# Innocuous in other cases?
ax.autoscale_view()
# Convert binwidth from data coordinates to pixels
pts_x, pts_y = 72 / ax.figure.dpi * abs(
ax.transData.transform([left_edge + binwidth] * 2)
- ax.transData.transform([left_edge] * 2)
)
if self.data_variable == "x":
binwidth_points = pts_x
else:
binwidth_points = pts_y
# The relative size of the lines depends on the appearance
# This is a provisional value and may need more tweaking
default_linewidth = min(.1 * binwidth_points, default_linewidth)
# Set the attributes
for bar in hist_artists:
# Don't let the lines get too thick
max_linewidth = bar.get_linewidth()
if not fill:
max_linewidth *= 1.5
linewidth = min(default_linewidth, max_linewidth)
# If not filling, don't let lines disappear
if not fill:
min_linewidth = .5
linewidth = max(linewidth, min_linewidth)
bar.set_linewidth(linewidth)
# --- Finalize the plot ----
# Axis labels
ax = self.ax if self.ax is not None else self.facets.axes.flat[0]
default_x = default_y = ""
if self.data_variable == "x":
default_y = estimator.stat.capitalize()
if self.data_variable == "y":
default_x = estimator.stat.capitalize()
self._add_axis_labels(ax, default_x, default_y)
# Legend for semantic variables
if "hue" in self.variables and legend:
if fill or element == "bars":
artist = partial(mpl.patches.Patch)
else:
artist = partial(mpl.lines.Line2D, [], [])
ax_obj = self.ax if self.ax is not None else self.facets
self._add_legend(
ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},
)
def plot_bivariate_histogram(
self,
common_bins, common_norm,
thresh, pthresh, pmax,
color, legend,
cbar, cbar_ax, cbar_kws,
estimate_kws,
**plot_kws,
):
# Default keyword dicts
cbar_kws = {} if cbar_kws is None else cbar_kws.copy()
# Now initialize the Histogram estimator
estimator = Histogram(**estimate_kws)
# Do pre-compute housekeeping related to multiple groups
if set(self.variables) - {"x", "y"}:
all_data = self.comp_data.dropna()
if common_bins:
estimator.define_bin_params(
all_data["x"],
all_data["y"],
all_data.get("weights", None),
)
else:
common_norm = False
# -- Determine colormap threshold and norm based on the full data
full_heights = []
for _, sub_data in self.iter_data(from_comp_data=True):
sub_heights, _ = estimator(
sub_data["x"], sub_data["y"], sub_data.get("weights", None)
)
full_heights.append(sub_heights)
common_color_norm = not set(self.variables) - {"x", "y"} or common_norm
if pthresh is not None and common_color_norm:
thresh = self._quantile_to_level(full_heights, pthresh)
plot_kws.setdefault("vmin", 0)
if common_color_norm:
if pmax is not None:
vmax = self._quantile_to_level(full_heights, pmax)
else:
vmax = plot_kws.pop("vmax", max(map(np.max, full_heights)))
else:
vmax = None
# Get a default color
# (We won't follow the color cycle here, as multiple plots are unlikely)
if color is None:
color = "C0"
# --- Loop over data (subsets) and draw the histograms
for sub_vars, sub_data in self.iter_data("hue", from_comp_data=True):
if sub_data.empty:
continue
# Do the histogram computation
heights, (x_edges, y_edges) = estimator(
sub_data["x"],
sub_data["y"],
weights=sub_data.get("weights", None),
)
# Check for log scaling on the data axis
if self._log_scaled("x"):
x_edges = np.power(10, x_edges)
if self._log_scaled("y"):
y_edges = np.power(10, y_edges)
# Apply scaling to normalize across groups
if estimator.stat != "count" and common_norm:
heights *= len(sub_data) / len(all_data)
# Define the specific kwargs for this artist
artist_kws = plot_kws.copy()
if "hue" in self.variables:
color = self._hue_map(sub_vars["hue"])
cmap = self._cmap_from_color(color)
artist_kws["cmap"] = cmap
else:
cmap = artist_kws.pop("cmap", None)
if isinstance(cmap, str):
cmap = color_palette(cmap, as_cmap=True)
elif cmap is None:
cmap = self._cmap_from_color(color)
artist_kws["cmap"] = cmap
# Set the upper norm on the colormap
if not common_color_norm and pmax is not None:
vmax = self._quantile_to_level(heights, pmax)
if vmax is not None:
artist_kws["vmax"] = vmax
# Make cells at or below the threshold transparent
if not common_color_norm and pthresh:
thresh = self._quantile_to_level(heights, pthresh)
if thresh is not None:
heights = np.ma.masked_less_equal(heights, thresh)
# Get the axes for this plot
ax = self._get_axes(sub_vars)
# pcolormesh is going to turn the grid off, but we want to keep it
# I'm not sure if there's a better way to get the grid state
x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])
y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])
mesh = ax.pcolormesh(
x_edges,
y_edges,
heights.T,
**artist_kws,
)
# pcolormesh sets sticky edges, but we only want them if not thresholding
if thresh is not None:
mesh.sticky_edges.x[:] = []
mesh.sticky_edges.y[:] = []
# Add an optional colorbar
# Note, we want to improve this. When hue is used, it will stack
# multiple colorbars with redundant ticks in an ugly way.
# But it's going to take some work to have multiple colorbars that
# share ticks nicely.
if cbar:
ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)
# Reset the grid state
if x_grid:
ax.grid(True, axis="x")
if y_grid:
ax.grid(True, axis="y")
# --- Finalize the plot
ax = self.ax if self.ax is not None else self.facets.axes.flat[0]
self._add_axis_labels(ax)
if "hue" in self.variables and legend:
# TODO if possible, I would like to move the contour
# intensity information into the legend too and label the
# iso proportions rather than the raw density values
artist_kws = {}
artist = partial(mpl.patches.Patch)
ax_obj = self.ax if self.ax is not None else self.facets
self._add_legend(
ax_obj, artist, True, False, "layer", 1, artist_kws, {},
)
def plot_univariate_density(
self,
multiple,
common_norm,
common_grid,
warn_singular,
fill,
color,
legend,
estimate_kws,
**plot_kws,
):
# Handle conditional defaults
if fill is None:
fill = multiple in ("stack", "fill")
# Preprocess the matplotlib keyword dictionaries
if fill:
artist = mpl.collections.PolyCollection
else:
artist = mpl.lines.Line2D
plot_kws = _normalize_kwargs(plot_kws, artist)
# Input checking
_check_argument("multiple", ["layer", "stack", "fill"], multiple)
# Always share the evaluation grid when stacking
subsets = bool(set(self.variables) - {"x", "y"})
if subsets and multiple in ("stack", "fill"):
common_grid = True
# Check if the data axis is log scaled
log_scale = self._log_scaled(self.data_variable)
# Do the computation
densities = self._compute_univariate_density(
self.data_variable,
common_norm,
common_grid,
estimate_kws,
log_scale,
warn_singular,
)
# Adjust densities based on the `multiple` rule
densities, baselines = self._resolve_multiple(densities, multiple)
# Control the interaction with autoscaling by defining sticky_edges
# i.e. we don't want autoscale margins below the density curve
sticky_density = (0, 1) if multiple == "fill" else (0, np.inf)
if multiple == "fill":
# Filled plots should not have any margins
sticky_support = densities.index.min(), densities.index.max()
else:
sticky_support = []
if fill:
if multiple == "layer":
default_alpha = .25
else:
default_alpha = .75
else:
default_alpha = 1
alpha = plot_kws.pop("alpha", default_alpha) # TODO make parameter?
# Now iterate through the subsets and draw the densities
# We go backwards so stacked densities read from top-to-bottom
for sub_vars, _ in self.iter_data("hue", reverse=True):
# Extract the support grid and density curve for this level
key = tuple(sub_vars.items())
try:
density = densities[key]
except KeyError:
continue
support = density.index
fill_from = baselines[key]
ax = self._get_axes(sub_vars)
if "hue" in self.variables:
sub_color = self._hue_map(sub_vars["hue"])
else:
sub_color = color
artist_kws = self._artist_kws(
plot_kws, fill, False, multiple, sub_color, alpha
)
# Either plot a curve with observation values on the x axis
if "x" in self.variables:
if fill:
artist = ax.fill_between(support, fill_from, density, **artist_kws)
else:
artist, = ax.plot(support, density, **artist_kws)
artist.sticky_edges.x[:] = sticky_support
artist.sticky_edges.y[:] = sticky_density
# Or plot a curve with observation values on the y axis
else:
if fill:
artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)
else:
artist, = ax.plot(density, support, **artist_kws)
artist.sticky_edges.x[:] = sticky_density
artist.sticky_edges.y[:] = sticky_support
# --- Finalize the plot ----
ax = self.ax if self.ax is not None else self.facets.axes.flat[0]
default_x = default_y = ""
if self.data_variable == "x":
default_y = "Density"
if self.data_variable == "y":
default_x = "Density"
self._add_axis_labels(ax, default_x, default_y)
if "hue" in self.variables and legend:
if fill:
artist = partial(mpl.patches.Patch)
else:
artist = partial(mpl.lines.Line2D, [], [])
ax_obj = self.ax if self.ax is not None else self.facets
self._add_legend(
ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},
)
def plot_bivariate_density(
self,
common_norm,
fill,
levels,
thresh,
color,
legend,
cbar,
warn_singular,
cbar_ax,
cbar_kws,
estimate_kws,
**contour_kws,
):
contour_kws = contour_kws.copy()
estimator = KDE(**estimate_kws)
if not set(self.variables) - {"x", "y"}:
common_norm = False
all_data = self.plot_data.dropna()
# Loop through the subsets and estimate the KDEs
densities, supports = {}, {}
for sub_vars, sub_data in self.iter_data("hue", from_comp_data=True):
# Extract the data points from this sub set and remove nulls
observations = sub_data[["x", "y"]]
# Extract the weights for this subset of observations
if "weights" in self.variables:
weights = sub_data["weights"]
else:
weights = None
# Check that KDE will not error out
variance = observations[["x", "y"]].var()
if any(math.isclose(x, 0) for x in variance) or variance.isna().any():
msg = (
"Dataset has 0 variance; skipping density estimate. "
"Pass `warn_singular=False` to disable this warning."
)
if warn_singular:
warnings.warn(msg, UserWarning)
continue
# Estimate the density of observations at this level
observations = observations["x"], observations["y"]
density, support = estimator(*observations, weights=weights)
# Transform the support grid back to the original scale
xx, yy = support
if self._log_scaled("x"):
xx = np.power(10, xx)
if self._log_scaled("y"):
yy = np.power(10, yy)
support = xx, yy
# Apply a scaling factor so that the integral over all subsets is 1
if common_norm:
density *= len(sub_data) / len(all_data)
key = tuple(sub_vars.items())
densities[key] = density
supports[key] = support
# Define a grid of iso-proportion levels
if thresh is None:
thresh = 0
if isinstance(levels, Number):
levels = np.linspace(thresh, 1, levels)
else:
if min(levels) < 0 or max(levels) > 1:
raise ValueError("levels must be in [0, 1]")
# Transform from iso-proportions to iso-densities
if common_norm:
common_levels = self._quantile_to_level(
list(densities.values()), levels,
)
draw_levels = {k: common_levels for k in densities}
else:
draw_levels = {
k: self._quantile_to_level(d, levels)
for k, d in densities.items()
}
# Get a default single color from the attribute cycle
if self.ax is None:
default_color = "C0" if color is None else color
else:
scout, = self.ax.plot([], color=color)
default_color = scout.get_color()
scout.remove()
# Define the coloring of the contours
if "hue" in self.variables:
for param in ["cmap", "colors"]:
if param in contour_kws:
msg = f"{param} parameter ignored when using hue mapping."
warnings.warn(msg, UserWarning)
contour_kws.pop(param)
else:
# Work out a default coloring of the contours
coloring_given = set(contour_kws) & {"cmap", "colors"}
if fill and not coloring_given:
cmap = self._cmap_from_color(default_color)
contour_kws["cmap"] = cmap
if not fill and not coloring_given:
contour_kws["colors"] = [default_color]
# Use our internal colormap lookup
cmap = contour_kws.pop("cmap", None)
if isinstance(cmap, str):
cmap = color_palette(cmap, as_cmap=True)
if cmap is not None:
contour_kws["cmap"] = cmap
# Loop through the subsets again and plot the data
for sub_vars, _ in self.iter_data("hue"):
if "hue" in sub_vars:
color = self._hue_map(sub_vars["hue"])
if fill:
contour_kws["cmap"] = self._cmap_from_color(color)
else:
contour_kws["colors"] = [color]
ax = self._get_axes(sub_vars)
# Choose the function to plot with
# TODO could add a pcolormesh based option as well
# Which would look something like element="raster"
if fill:
contour_func = ax.contourf
else:
contour_func = ax.contour
key = tuple(sub_vars.items())
if key not in densities:
continue
density = densities[key]
xx, yy = supports[key]
label = contour_kws.pop("label", None)
cset = contour_func(
xx, yy, density,
levels=draw_levels[key],
**contour_kws,
)
if "hue" not in self.variables:
cset.collections[0].set_label(label)
# Add a color bar representing the contour heights
# Note: this shows iso densities, not iso proportions
# See more notes in histplot about how this could be improved
if cbar:
cbar_kws = {} if cbar_kws is None else cbar_kws
ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)
# --- Finalize the plot
ax = self.ax if self.ax is not None else self.facets.axes.flat[0]
self._add_axis_labels(ax)
if "hue" in self.variables and legend:
# TODO if possible, I would like to move the contour
# intensity information into the legend too and label the
# iso proportions rather than the raw density values
artist_kws = {}
if fill:
artist = partial(mpl.patches.Patch)
else:
artist = partial(mpl.lines.Line2D, [], [])
ax_obj = self.ax if self.ax is not None else self.facets
self._add_legend(
ax_obj, artist, fill, False, "layer", 1, artist_kws, {},
)
def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):
estimator = ECDF(**estimate_kws)
# Set the draw style to step the right way for the data variable
drawstyles = dict(x="steps-post", y="steps-pre")
plot_kws["drawstyle"] = drawstyles[self.data_variable]
# Loop through the subsets, transform and plot the data
for sub_vars, sub_data in self.iter_data(
"hue", reverse=True, from_comp_data=True,
):
# Compute the ECDF
if sub_data.empty:
continue
observations = sub_data[self.data_variable]
weights = sub_data.get("weights", None)
stat, vals = estimator(observations, weights=weights)
# Assign attributes based on semantic mapping
artist_kws = plot_kws.copy()
if "hue" in self.variables:
artist_kws["color"] = self._hue_map(sub_vars["hue"])
# Return the data variable to the linear domain
# This needs an automatic solution; see GH2409
if self._log_scaled(self.data_variable):
vals = np.power(10, vals)
vals[0] = -np.inf
# Work out the orientation of the plot
if self.data_variable == "x":
plot_args = vals, stat
stat_variable = "y"
else:
plot_args = stat, vals
stat_variable = "x"
if estimator.stat == "count":
top_edge = len(observations)
else:
top_edge = 1
# Draw the line for this subset
ax = self._get_axes(sub_vars)
artist, = ax.plot(*plot_args, **artist_kws)
sticky_edges = getattr(artist.sticky_edges, stat_variable)
sticky_edges[:] = 0, top_edge
# --- Finalize the plot ----
ax = self.ax if self.ax is not None else self.facets.axes.flat[0]
stat = estimator.stat.capitalize()
default_x = default_y = ""
if self.data_variable == "x":
default_y = stat
if self.data_variable == "y":
default_x = stat
self._add_axis_labels(ax, default_x, default_y)
if "hue" in self.variables and legend:
artist = partial(mpl.lines.Line2D, [], [])
alpha = plot_kws.get("alpha", 1)
ax_obj = self.ax if self.ax is not None else self.facets
self._add_legend(
ax_obj, artist, False, False, None, alpha, plot_kws, {},
)
def plot_rug(self, height, expand_margins, legend, **kws):
for sub_vars, sub_data, in self.iter_data(from_comp_data=True):
ax = self._get_axes(sub_vars)
kws.setdefault("linewidth", 1)
if expand_margins:
xmarg, ymarg = ax.margins()
if "x" in self.variables:
ymarg += height * 2
if "y" in self.variables:
xmarg += height * 2
ax.margins(x=xmarg, y=ymarg)
if "hue" in self.variables:
kws.pop("c", None)
kws.pop("color", None)
if "x" in self.variables:
self._plot_single_rug(sub_data, "x", height, ax, kws)
if "y" in self.variables:
self._plot_single_rug(sub_data, "y", height, ax, kws)
# --- Finalize the plot
self._add_axis_labels(ax)
if "hue" in self.variables and legend:
# TODO ideally i'd like the legend artist to look like a rug
legend_artist = partial(mpl.lines.Line2D, [], [])
self._add_legend(
ax, legend_artist, False, False, None, 1, {}, {},
)
def _plot_single_rug(self, sub_data, var, height, ax, kws):
"""Draw a rugplot along one axis of the plot."""
vector = sub_data[var]
n = len(vector)
# Return data to linear domain
# This needs an automatic solution; see GH2409
if self._log_scaled(var):
vector = np.power(10, vector)
# We'll always add a single collection with varying colors
if "hue" in self.variables:
colors = self._hue_map(sub_data["hue"])
else:
colors = None
# Build the array of values for the LineCollection
if var == "x":
trans = tx.blended_transform_factory(ax.transData, ax.transAxes)
xy_pairs = np.column_stack([
np.repeat(vector, 2), np.tile([0, height], n)
])
if var == "y":
trans = tx.blended_transform_factory(ax.transAxes, ax.transData)
xy_pairs = np.column_stack([
np.tile([0, height], n), np.repeat(vector, 2)
])
# Draw the lines on the plot
line_segs = xy_pairs.reshape([n, 2, 2])
ax.add_collection(LineCollection(
line_segs, transform=trans, colors=colors, **kws
))
ax.autoscale_view(scalex=var == "x", scaley=var == "y")
class _DistributionFacetPlotter(_DistributionPlotter):
semantics = _DistributionPlotter.semantics + ("col", "row")
# ==================================================================================== #
# External API
# ==================================================================================== #
def histplot(
data=None, *,
# Vector variables
x=None, y=None, hue=None, weights=None,
# Histogram computation parameters
stat="count", bins="auto", binwidth=None, binrange=None,
discrete=None, cumulative=False, common_bins=True, common_norm=True,
# Histogram appearance parameters
multiple="layer", element="bars", fill=True, shrink=1,
# Histogram smoothing with a kernel density estimate
kde=False, kde_kws=None, line_kws=None,
# Bivariate histogram parameters
thresh=0, pthresh=None, pmax=None, cbar=False, cbar_ax=None, cbar_kws=None,
# Hue mapping parameters
palette=None, hue_order=None, hue_norm=None, color=None,
# Axes information
log_scale=None, legend=True, ax=None,
# Other appearance keywords
**kwargs,
):
p = _DistributionPlotter(
data=data,
variables=_DistributionPlotter.get_semantics(locals())
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
if ax is None:
ax = plt.gca()
p._attach(ax, log_scale=log_scale)
if p.univariate: # Note, bivariate plots won't cycle
if fill:
method = ax.bar if element == "bars" else ax.fill_between
else:
method = ax.plot
color = _default_color(method, hue, color, kwargs)
if not p.has_xy_data:
return ax
# Default to discrete bins for categorical variables
if discrete is None:
discrete = p._default_discrete()
estimate_kws = dict(
stat=stat,
bins=bins,
binwidth=binwidth,
binrange=binrange,
discrete=discrete,
cumulative=cumulative,
)
if p.univariate:
p.plot_univariate_histogram(
multiple=multiple,
element=element,
fill=fill,
shrink=shrink,
common_norm=common_norm,
common_bins=common_bins,
kde=kde,
kde_kws=kde_kws,
color=color,
legend=legend,
estimate_kws=estimate_kws,
line_kws=line_kws,
**kwargs,
)
else:
p.plot_bivariate_histogram(
common_bins=common_bins,
common_norm=common_norm,
thresh=thresh,
pthresh=pthresh,
pmax=pmax,
color=color,
legend=legend,
cbar=cbar,
cbar_ax=cbar_ax,
cbar_kws=cbar_kws,
estimate_kws=estimate_kws,
**kwargs,
)
return ax
histplot.__doc__ = """\
Plot univariate or bivariate histograms to show distributions of datasets.
A histogram is a classic visualization tool that represents the distribution
of one or more variables by counting the number of observations that fall within
discrete bins.
This function can normalize the statistic computed within each bin to estimate
frequency, density or probability mass, and it can add a smooth curve obtained
using a kernel density estimate, similar to :func:`kdeplot`.
More information is provided in the :ref:`user guide <tutorial_hist>`.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
weights : vector or key in ``data``
If provided, weight the contribution of the corresponding data points
towards the count in each bin by these factors.
{params.hist.stat}
{params.hist.bins}
{params.hist.binwidth}
{params.hist.binrange}
discrete : bool
If True, default to ``binwidth=1`` and draw the bars so that they are
centered on their corresponding data points. This avoids "gaps" that may
otherwise appear when using discrete (integer) data.
cumulative : bool
If True, plot the cumulative counts as bins increase.
common_bins : bool
If True, use the same bins when semantic variables produce multiple
plots. If using a reference rule to determine the bins, it will be computed
with the full dataset.
common_norm : bool
If True and using a normalized statistic, the normalization will apply over
the full dataset. Otherwise, normalize each histogram independently.
multiple : {{"layer", "dodge", "stack", "fill"}}
Approach to resolving multiple elements when semantic mapping creates subsets.
Only relevant with univariate data.
element : {{"bars", "step", "poly"}}
Visual representation of the histogram statistic.
Only relevant with univariate data.
fill : bool
If True, fill in the space under the histogram.
Only relevant with univariate data.
shrink : number
Scale the width of each bar relative to the binwidth by this factor.
Only relevant with univariate data.
kde : bool
If True, compute a kernel density estimate to smooth the distribution
and show on the plot as (one or more) line(s).
Only relevant with univariate data.
kde_kws : dict
Parameters that control the KDE computation, as in :func:`kdeplot`.
line_kws : dict
Parameters that control the KDE visualization, passed to
:meth:`matplotlib.axes.Axes.plot`.
thresh : number or None
Cells with a statistic less than or equal to this value will be transparent.
Only relevant with bivariate data.
pthresh : number or None
Like ``thresh``, but a value in [0, 1] such that cells with aggregate counts
(or other statistics, when used) up to this proportion of the total will be
transparent.
pmax : number or None
A value in [0, 1] that sets that saturation point for the colormap at a value
such that cells below is constistute this proportion of the total count (or
other statistic, when used).
{params.dist.cbar}
{params.dist.cbar_ax}
{params.dist.cbar_kws}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.core.color}
{params.dist.log_scale}
{params.dist.legend}
{params.core.ax}
kwargs
Other keyword arguments are passed to one of the following matplotlib
functions:
- :meth:`matplotlib.axes.Axes.bar` (univariate, element="bars")
- :meth:`matplotlib.axes.Axes.fill_between` (univariate, other element, fill=True)
- :meth:`matplotlib.axes.Axes.plot` (univariate, other element, fill=False)
- :meth:`matplotlib.axes.Axes.pcolormesh` (bivariate)
Returns
-------
{returns.ax}
See Also
--------
{seealso.displot}
{seealso.kdeplot}
{seealso.rugplot}
{seealso.ecdfplot}
{seealso.jointplot}
Notes
-----
The choice of bins for computing and plotting a histogram can exert
substantial influence on the insights that one is able to draw from the
visualization. If the bins are too large, they may erase important features.
On the other hand, bins that are too small may be dominated by random
variability, obscuring the shape of the true underlying distribution. The
default bin size is determined using a reference rule that depends on the
sample size and variance. This works well in many cases, (i.e., with
"well-behaved" data) but it fails in others. It is always a good to try
different bin sizes to be sure that you are not missing something important.
This function allows you to specify bins in several different ways, such as
by setting the total number of bins to use, the width of each bin, or the
specific locations where the bins should break.
Examples
--------
.. include:: ../docstrings/histplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
@_deprecate_positional_args
def kdeplot(
x=None, # Allow positional x, because behavior will not change with reorg
*,
y=None,
shade=None, # Note "soft" deprecation, explained below
vertical=False, # Deprecated
kernel=None, # Deprecated
bw=None, # Deprecated
gridsize=200, # TODO maybe depend on uni/bivariate?
cut=3, clip=None, legend=True, cumulative=False,
shade_lowest=None, # Deprecated, controlled with levels now
cbar=False, cbar_ax=None, cbar_kws=None,
ax=None,
# New params
weights=None, # TODO note that weights is grouped with semantics
hue=None, palette=None, hue_order=None, hue_norm=None,
multiple="layer", common_norm=True, common_grid=False,
levels=10, thresh=.05,
bw_method="scott", bw_adjust=1, log_scale=None,
color=None, fill=None,
# Renamed params
data=None, data2=None,
# New in v0.12
warn_singular=True,
**kwargs,
):
# Handle deprecation of `data2` as name for y variable
if data2 is not None:
y = data2
# If `data2` is present, we need to check for the `data` kwarg being
# used to pass a vector for `x`. We'll reassign the vectors and warn.
# We need this check because just passing a vector to `data` is now
# technically valid.
x_passed_as_data = (
x is None
and data is not None
and np.ndim(data) == 1
)
if x_passed_as_data:
msg = "Use `x` and `y` rather than `data` `and `data2`"
x = data
else:
msg = "The `data2` param is now named `y`; please update your code"
warnings.warn(msg, FutureWarning)
# Handle deprecation of `vertical`
if vertical:
msg = (
"The `vertical` parameter is deprecated and will be removed in a "
"future version. Assign the data to the `y` variable instead."
)
warnings.warn(msg, FutureWarning)
x, y = y, x
# Handle deprecation of `bw`
if bw is not None:
msg = (
"The `bw` parameter is deprecated in favor of `bw_method` and "
f"`bw_adjust`. Using {bw} for `bw_method`, but please "
"see the docs for the new parameters and update your code."
)
warnings.warn(msg, FutureWarning)
bw_method = bw
# Handle deprecation of `kernel`
if kernel is not None:
msg = (
"Support for alternate kernels has been removed. "
"Using Gaussian kernel."
)
warnings.warn(msg, UserWarning)
# Handle deprecation of shade_lowest
if shade_lowest is not None:
if shade_lowest:
thresh = 0
msg = (
"`shade_lowest` is now deprecated in favor of `thresh`. "
f"Setting `thresh={thresh}`, but please update your code."
)
warnings.warn(msg, UserWarning)
# Handle `n_levels`
# This was never in the formal API but it was processed, and appeared in an
# example. We can treat as an alias for `levels` now and deprecate later.
levels = kwargs.pop("n_levels", levels)
# Handle "soft" deprecation of shade `shade` is not really the right
# terminology here, but unlike some of the other deprecated parameters it
# is probably very commonly used and much hard to remove. This is therefore
# going to be a longer process where, first, `fill` will be introduced and
# be used throughout the documentation. In 0.12, when kwarg-only
# enforcement hits, we can remove the shade/shade_lowest out of the
# function signature all together and pull them out of the kwargs. Then we
# can actually fire a FutureWarning, and eventually remove.
if shade is not None:
fill = shade
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
p = _DistributionPlotter(
data=data,
variables=_DistributionPlotter.get_semantics(locals()),
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
if ax is None:
ax = plt.gca()
p._attach(ax, allowed_types=["numeric", "datetime"], log_scale=log_scale)
method = ax.fill_between if fill else ax.plot
color = _default_color(method, hue, color, kwargs)
if not p.has_xy_data:
return ax
# Pack the kwargs for statistics.KDE
estimate_kws = dict(
bw_method=bw_method,
bw_adjust=bw_adjust,
gridsize=gridsize,
cut=cut,
clip=clip,
cumulative=cumulative,
)
if p.univariate:
plot_kws = kwargs.copy()
p.plot_univariate_density(
multiple=multiple,
common_norm=common_norm,
common_grid=common_grid,
fill=fill,
color=color,
legend=legend,
warn_singular=warn_singular,
estimate_kws=estimate_kws,
**plot_kws,
)
else:
p.plot_bivariate_density(
common_norm=common_norm,
fill=fill,
levels=levels,
thresh=thresh,
legend=legend,
color=color,
warn_singular=warn_singular,
cbar=cbar,
cbar_ax=cbar_ax,
cbar_kws=cbar_kws,
estimate_kws=estimate_kws,
**kwargs,
)
return ax
kdeplot.__doc__ = """\
Plot univariate or bivariate distributions using kernel density estimation.
A kernel density estimate (KDE) plot is a method for visualizing the
distribution of observations in a dataset, analogous to a histogram. KDE
represents the data using a continuous probability density curve in one or
more dimensions.
The approach is explained further in the :ref:`user guide <tutorial_kde>`.
Relative to a histogram, KDE can produce a plot that is less cluttered and
more interpretable, especially when drawing multiple distributions. But it
has the potential to introduce distortions if the underlying distribution is
bounded or not smooth. Like a histogram, the quality of the representation
also depends on the selection of good smoothing parameters.
Parameters
----------
{params.core.xy}
shade : bool
Alias for ``fill``. Using ``fill`` is recommended.
vertical : bool
Orientation parameter.
.. deprecated:: 0.11.0
specify orientation by assigning the ``x`` or ``y`` variables.
kernel : str
Function that defines the kernel.
.. deprecated:: 0.11.0
support for non-Gaussian kernels has been removed.
bw : str, number, or callable
Smoothing parameter.
.. deprecated:: 0.11.0
see ``bw_method`` and ``bw_adjust``.
gridsize : int
Number of points on each dimension of the evaluation grid.
{params.kde.cut}
{params.kde.clip}
{params.dist.legend}
{params.kde.cumulative}
shade_lowest : bool
If False, the area below the lowest contour will be transparent
.. deprecated:: 0.11.0
see ``thresh``.
{params.dist.cbar}
{params.dist.cbar_ax}
{params.dist.cbar_kws}
{params.core.ax}
weights : vector or key in ``data``
If provided, weight the kernel density estimation using these values.
{params.core.hue}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.dist.multiple}
common_norm : bool
If True, scale each conditional density by the number of observations
such that the total area under all densities sums to 1. Otherwise,
normalize each density independently.
common_grid : bool
If True, use the same evaluation grid for each kernel density estimate.
Only relevant with univariate data.
levels : int or vector
Number of contour levels or values to draw contours at. A vector argument
must have increasing values in [0, 1]. Levels correspond to iso-proportions
of the density: e.g., 20% of the probability mass will lie below the
contour drawn for 0.2. Only relevant with bivariate data.
thresh : number in [0, 1]
Lowest iso-proportion level at which to draw a contour line. Ignored when
``levels`` is a vector. Only relevant with bivariate data.
{params.kde.bw_method}
{params.kde.bw_adjust}
{params.dist.log_scale}
{params.core.color}
fill : bool or None
If True, fill in the area under univariate density curves or between
bivariate contours. If None, the default depends on ``multiple``.
{params.core.data}
warn_singular : bool
If True, issue a warning when trying to estimate the density of data
with zero variance.
kwargs
Other keyword arguments are passed to one of the following matplotlib
functions:
- :meth:`matplotlib.axes.Axes.plot` (univariate, ``fill=False``),
- :meth:`matplotlib.axes.Axes.fill_between` (univariate, ``fill=True``),
- :meth:`matplotlib.axes.Axes.contour` (bivariate, ``fill=False``),
- :meth:`matplotlib.axes.contourf` (bivariate, ``fill=True``).
Returns
-------
{returns.ax}
See Also
--------
{seealso.displot}
{seealso.histplot}
{seealso.ecdfplot}
{seealso.jointplot}
{seealso.violinplot}
Notes
-----
The *bandwidth*, or standard deviation of the smoothing kernel, is an
important parameter. Misspecification of the bandwidth can produce a
distorted representation of the data. Much like the choice of bin width in a
histogram, an over-smoothed curve can erase true features of a
distribution, while an under-smoothed curve can create false features out of
random variability. The rule-of-thumb that sets the default bandwidth works
best when the true distribution is smooth, unimodal, and roughly bell-shaped.
It is always a good idea to check the default behavior by using ``bw_adjust``
to increase or decrease the amount of smoothing.
Because the smoothing algorithm uses a Gaussian kernel, the estimated density
curve can extend to values that do not make sense for a particular dataset.
For example, the curve may be drawn over negative values when smoothing data
that are naturally positive. The ``cut`` and ``clip`` parameters can be used
to control the extent of the curve, but datasets that have many observations
close to a natural boundary may be better served by a different visualization
method.
Similar considerations apply when a dataset is naturally discrete or "spiky"
(containing many repeated observations of the same value). Kernel density
estimation will always produce a smooth curve, which would be misleading
in these situations.
The units on the density axis are a common source of confusion. While kernel
density estimation produces a probability distribution, the height of the curve
at each point gives a density, not a probability. A probability can be obtained
only by integrating the density across a range. The curve is normalized so
that the integral over all possible values is 1, meaning that the scale of
the density axis depends on the data values.
Examples
--------
.. include:: ../docstrings/kdeplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def ecdfplot(
data=None, *,
# Vector variables
x=None, y=None, hue=None, weights=None,
# Computation parameters
stat="proportion", complementary=False,
# Hue mapping parameters
palette=None, hue_order=None, hue_norm=None,
# Axes information
log_scale=None, legend=True, ax=None,
# Other appearance keywords
**kwargs,
):
p = _DistributionPlotter(
data=data,
variables=_DistributionPlotter.get_semantics(locals())
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
# We could support other semantics (size, style) here fairly easily
# But it would make distplot a bit more complicated.
# It's always possible to add features like that later, so I am going to defer.
# It will be even easier to wait until after there is a more general/abstract
# way to go from semantic specs to artist attributes.
if ax is None:
ax = plt.gca()
p._attach(ax, log_scale=log_scale)
color = kwargs.pop("color", kwargs.pop("c", None))
kwargs["color"] = _default_color(ax.plot, hue, color, kwargs)
if not p.has_xy_data:
return ax
# We could add this one day, but it's of dubious value
if not p.univariate:
raise NotImplementedError("Bivariate ECDF plots are not implemented")
estimate_kws = dict(
stat=stat,
complementary=complementary,
)
p.plot_univariate_ecdf(
estimate_kws=estimate_kws,
legend=legend,
**kwargs,
)
return ax
ecdfplot.__doc__ = """\
Plot empirical cumulative distribution functions.
An ECDF represents the proportion or count of observations falling below each
unique value in a dataset. Compared to a histogram or density plot, it has the
advantage that each observation is visualized directly, meaning that there are
no binning or smoothing parameters that need to be adjusted. It also aids direct
comparisons between multiple distributions. A downside is that the relationship
between the appearance of the plot and the basic properties of the distribution
(such as its central tendency, variance, and the presence of any bimodality)
may not be as intuitive.
More information is provided in the :ref:`user guide <tutorial_ecdf>`.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
weights : vector or key in ``data``
If provided, weight the contribution of the corresponding data points
towards the cumulative distribution using these values.
{params.ecdf.stat}
{params.ecdf.complementary}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.dist.log_scale}
{params.dist.legend}
{params.core.ax}
kwargs
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.plot`.
Returns
-------
{returns.ax}
See Also
--------
{seealso.displot}
{seealso.histplot}
{seealso.kdeplot}
{seealso.rugplot}
Examples
--------
.. include:: ../docstrings/ecdfplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
@_deprecate_positional_args
def rugplot(
x=None, # Allow positional x, because behavior won't change
*,
height=.025, axis=None, ax=None,
# New parameters
data=None, y=None, hue=None,
palette=None, hue_order=None, hue_norm=None,
expand_margins=True,
legend=True, # TODO or maybe default to False?
# Renamed parameter
a=None,
**kwargs
):
# A note: I think it would make sense to add multiple= to rugplot and allow
# rugs for different hue variables to be shifted orthogonal to the data axis
# But is this stacking, or dodging?
# A note: if we want to add a style semantic to rugplot,
# we could make an option that draws the rug using scatterplot
# A note, it would also be nice to offer some kind of histogram/density
# rugplot, since alpha blending doesn't work great in the large n regime
# Handle deprecation of `a``
if a is not None:
msg = "The `a` parameter is now called `x`. Please update your code."
warnings.warn(msg, FutureWarning)
x = a
del a
# Handle deprecation of "axis"
if axis is not None:
msg = (
"The `axis` variable is no longer used and will be removed. "
"Instead, assign variables directly to `x` or `y`."
)
warnings.warn(msg, FutureWarning)
# Handle deprecation of "vertical"
if kwargs.pop("vertical", axis == "y"):
x, y = None, x
msg = (
"Using `vertical=True` to control the orientation of the plot "
"is deprecated. Instead, assign the data directly to `y`. "
)
warnings.warn(msg, FutureWarning)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
weights = None
p = _DistributionPlotter(
data=data,
variables=_DistributionPlotter.get_semantics(locals()),
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
if ax is None:
ax = plt.gca()
p._attach(ax)
color = kwargs.pop("color", kwargs.pop("c", None))
kwargs["color"] = _default_color(ax.plot, hue, color, kwargs)
if not p.has_xy_data:
return ax
p.plot_rug(height, expand_margins, legend, **kwargs)
return ax
rugplot.__doc__ = """\
Plot marginal distributions by drawing ticks along the x and y axes.
This function is intended to complement other plots by showing the location
of individual observations in an unobtrusive way.
Parameters
----------
{params.core.xy}
height : number
Proportion of axes extent covered by each rug element.
axis : {{"x", "y"}}
Axis to draw the rug on.
.. deprecated:: 0.11.0
specify axis by assigning the ``x`` or ``y`` variables.
{params.core.ax}
{params.core.data}
{params.core.hue}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
expand_margins : bool
If True, increase the axes margins by the height of the rug to avoid
overlap with other elements.
legend : bool
If False, do not add a legend for semantic variables.
kwargs
Other keyword arguments are passed to
:meth:`matplotlib.collections.LineCollection`
Returns
-------
{returns.ax}
Examples
--------
.. include:: ../docstrings/rugplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def displot(
data=None, *,
# Vector variables
x=None, y=None, hue=None, row=None, col=None, weights=None,
# Other plot parameters
kind="hist", rug=False, rug_kws=None, log_scale=None, legend=True,
# Hue-mapping parameters
palette=None, hue_order=None, hue_norm=None, color=None,
# Faceting parameters
col_wrap=None, row_order=None, col_order=None,
height=5, aspect=1, facet_kws=None,
**kwargs,
):
p = _DistributionFacetPlotter(
data=data,
variables=_DistributionFacetPlotter.get_semantics(locals())
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
_check_argument("kind", ["hist", "kde", "ecdf"], kind)
# --- Initialize the FacetGrid object
# Check for attempt to plot onto specific axes and warn
if "ax" in kwargs:
msg = (
"`displot` is a figure-level function and does not accept "
"the ax= parameter. You may wish to try {}plot.".format(kind)
)
warnings.warn(msg, UserWarning)
kwargs.pop("ax")
for var in ["row", "col"]:
# Handle faceting variables that lack name information
if var in p.variables and p.variables[var] is None:
p.variables[var] = f"_{var}_"
# Adapt the plot_data dataframe for use with FacetGrid
grid_data = p.plot_data.rename(columns=p.variables)
grid_data = grid_data.loc[:, ~grid_data.columns.duplicated()]
col_name = p.variables.get("col", None)
row_name = p.variables.get("row", None)
if facet_kws is None:
facet_kws = {}
g = FacetGrid(
data=grid_data, row=row_name, col=col_name,
col_wrap=col_wrap, row_order=row_order,
col_order=col_order, height=height,
aspect=aspect,
**facet_kws,
)
# Now attach the axes object to the plotter object
if kind == "kde":
allowed_types = ["numeric", "datetime"]
else:
allowed_types = None
p._attach(g, allowed_types=allowed_types, log_scale=log_scale)
# Check for a specification that lacks x/y data and return early
if not p.has_xy_data:
return g
if color is None and hue is None:
color = "C0"
# XXX else warn if hue is not None?
kwargs["legend"] = legend
# --- Draw the plots
if kind == "hist":
hist_kws = kwargs.copy()
# Extract the parameters that will go directly to Histogram
estimate_defaults = {}
_assign_default_kwargs(estimate_defaults, Histogram.__init__, histplot)
estimate_kws = {}
for key, default_val in estimate_defaults.items():
estimate_kws[key] = hist_kws.pop(key, default_val)
# Handle derivative defaults
if estimate_kws["discrete"] is None:
estimate_kws["discrete"] = p._default_discrete()
hist_kws["estimate_kws"] = estimate_kws
hist_kws.setdefault("color", color)
if p.univariate:
_assign_default_kwargs(hist_kws, p.plot_univariate_histogram, histplot)
p.plot_univariate_histogram(**hist_kws)
else:
_assign_default_kwargs(hist_kws, p.plot_bivariate_histogram, histplot)
p.plot_bivariate_histogram(**hist_kws)
elif kind == "kde":
kde_kws = kwargs.copy()
# Extract the parameters that will go directly to KDE
estimate_defaults = {}
_assign_default_kwargs(estimate_defaults, KDE.__init__, kdeplot)
estimate_kws = {}
for key, default_val in estimate_defaults.items():
estimate_kws[key] = kde_kws.pop(key, default_val)
kde_kws["estimate_kws"] = estimate_kws
kde_kws["color"] = color
if p.univariate:
_assign_default_kwargs(kde_kws, p.plot_univariate_density, kdeplot)
p.plot_univariate_density(**kde_kws)
else:
_assign_default_kwargs(kde_kws, p.plot_bivariate_density, kdeplot)
p.plot_bivariate_density(**kde_kws)
elif kind == "ecdf":
ecdf_kws = kwargs.copy()
# Extract the parameters that will go directly to the estimator
estimate_kws = {}
estimate_defaults = {}
_assign_default_kwargs(estimate_defaults, ECDF.__init__, ecdfplot)
for key, default_val in estimate_defaults.items():
estimate_kws[key] = ecdf_kws.pop(key, default_val)
ecdf_kws["estimate_kws"] = estimate_kws
ecdf_kws["color"] = color
if p.univariate:
_assign_default_kwargs(ecdf_kws, p.plot_univariate_ecdf, ecdfplot)
p.plot_univariate_ecdf(**ecdf_kws)
else:
raise NotImplementedError("Bivariate ECDF plots are not implemented")
# All plot kinds can include a rug
if rug:
# TODO with expand_margins=True, each facet expands margins... annoying!
if rug_kws is None:
rug_kws = {}
_assign_default_kwargs(rug_kws, p.plot_rug, rugplot)
rug_kws["legend"] = False
if color is not None:
rug_kws["color"] = color
p.plot_rug(**rug_kws)
# Call FacetGrid annotation methods
# Note that the legend is currently set inside the plotting method
g.set_axis_labels(
x_var=p.variables.get("x", g.axes.flat[0].get_xlabel()),
y_var=p.variables.get("y", g.axes.flat[0].get_ylabel()),
)
g.set_titles()
g.tight_layout()
if data is not None and (x is not None or y is not None):
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data)
g.data = pd.merge(
data,
g.data[g.data.columns.difference(data.columns)],
left_index=True,
right_index=True,
)
else:
wide_cols = {
k: f"_{k}_" if v is None else v for k, v in p.variables.items()
}
g.data = p.plot_data.rename(columns=wide_cols)
return g
displot.__doc__ = """\
Figure-level interface for drawing distribution plots onto a FacetGrid.
This function provides access to several approaches for visualizing the
univariate or bivariate distribution of data, including subsets of data
defined by semantic mapping and faceting across multiple subplots. The
``kind`` parameter selects the approach to use:
- :func:`histplot` (with ``kind="hist"``; the default)
- :func:`kdeplot` (with ``kind="kde"``)
- :func:`ecdfplot` (with ``kind="ecdf"``; univariate-only)
Additionally, a :func:`rugplot` can be added to any kind of plot to show
individual observations.
Extra keyword arguments are passed to the underlying function, so you should
refer to the documentation for each to understand the complete set of options
for making plots with this interface.
See the :doc:`distribution plots tutorial <../tutorial/distributions>` for a more
in-depth discussion of the relative strengths and weaknesses of each approach.
The distinction between figure-level and axes-level functions is explained
further in the :doc:`user guide <../tutorial/function_overview>`.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
{params.facets.rowcol}
kind : {{"hist", "kde", "ecdf"}}
Approach for visualizing the data. Selects the underlying plotting function
and determines the additional set of valid parameters.
rug : bool
If True, show each observation with marginal ticks (as in :func:`rugplot`).
rug_kws : dict
Parameters to control the appearance of the rug plot.
{params.dist.log_scale}
{params.dist.legend}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.core.color}
{params.facets.col_wrap}
{params.facets.rowcol_order}
{params.facets.height}
{params.facets.aspect}
{params.facets.facet_kws}
kwargs
Other keyword arguments are documented with the relevant axes-level function:
- :func:`histplot` (with ``kind="hist"``)
- :func:`kdeplot` (with ``kind="kde"``)
- :func:`ecdfplot` (with ``kind="ecdf"``)
Returns
-------
{returns.facetgrid}
See Also
--------
{seealso.histplot}
{seealso.kdeplot}
{seealso.rugplot}
{seealso.ecdfplot}
{seealso.jointplot}
Examples
--------
See the API documentation for the axes-level functions for more details
about the breadth of options available for each plot kind.
.. include:: ../docstrings/displot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
# =========================================================================== #
# DEPRECATED FUNCTIONS LIVE BELOW HERE
# =========================================================================== #
def _freedman_diaconis_bins(a):
"""Calculate number of hist bins using Freedman-Diaconis rule."""
# From https://stats.stackexchange.com/questions/798/
a = np.asarray(a)
if len(a) < 2:
return 1
iqr = np.subtract.reduce(np.nanpercentile(a, [75, 25]))
h = 2 * iqr / (len(a) ** (1 / 3))
# fall back to sqrt(a) bins if iqr is 0
if h == 0:
return int(np.sqrt(a.size))
else:
return int(np.ceil((a.max() - a.min()) / h))
def distplot(a=None, bins=None, hist=True, kde=True, rug=False, fit=None,
hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,
color=None, vertical=False, norm_hist=False, axlabel=None,
label=None, ax=None, x=None):
"""DEPRECATED: Flexibly plot a univariate distribution of observations.
.. warning::
This function is deprecated and will be removed in a future version.
Please adapt your code to use one of two new functions:
- :func:`displot`, a figure-level function with a similar flexibility
over the kind of plot to draw
- :func:`histplot`, an axes-level function for plotting histograms,
including with kernel density smoothing
This function combines the matplotlib ``hist`` function (with automatic
calculation of a good default bin size) with the seaborn :func:`kdeplot`
and :func:`rugplot` functions. It can also fit ``scipy.stats``
distributions and plot the estimated PDF over the data.
Parameters
----------
a : Series, 1d-array, or list.
Observed data. If this is a Series object with a ``name`` attribute,
the name will be used to label the data axis.
bins : argument for matplotlib hist(), or None, optional
Specification of hist bins. If unspecified, as reference rule is used
that tries to find a useful default.
hist : bool, optional
Whether to plot a (normed) histogram.
kde : bool, optional
Whether to plot a gaussian kernel density estimate.
rug : bool, optional
Whether to draw a rugplot on the support axis.
fit : random variable object, optional
An object with `fit` method, returning a tuple that can be passed to a
`pdf` method a positional arguments following a grid of values to
evaluate the pdf on.
hist_kws : dict, optional
Keyword arguments for :meth:`matplotlib.axes.Axes.hist`.
kde_kws : dict, optional
Keyword arguments for :func:`kdeplot`.
rug_kws : dict, optional
Keyword arguments for :func:`rugplot`.
color : matplotlib color, optional
Color to plot everything but the fitted curve in.
vertical : bool, optional
If True, observed values are on y-axis.
norm_hist : bool, optional
If True, the histogram height shows a density rather than a count.
This is implied if a KDE or fitted density is plotted.
axlabel : string, False, or None, optional
Name for the support axis label. If None, will try to get it
from a.name if False, do not set a label.
label : string, optional
Legend label for the relevant component of the plot.
ax : matplotlib axis, optional
If provided, plot on this axis.
Returns
-------
ax : matplotlib Axes
Returns the Axes object with the plot for further tweaking.
See Also
--------
kdeplot : Show a univariate or bivariate distribution with a kernel
density estimate.
rugplot : Draw small vertical lines to show each observation in a
distribution.
Examples
--------
Show a default plot with a kernel density estimate and histogram with bin
size determined automatically with a reference rule:
.. plot::
:context: close-figs
>>> import seaborn as sns, numpy as np
>>> sns.set_theme(); np.random.seed(0)
>>> x = np.random.randn(100)
>>> ax = sns.distplot(x)
Use Pandas objects to get an informative axis label:
.. plot::
:context: close-figs
>>> import pandas as pd
>>> x = pd.Series(x, name="x variable")
>>> ax = sns.distplot(x)
Plot the distribution with a kernel density estimate and rug plot:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, rug=True, hist=False)
Plot the distribution with a histogram and maximum likelihood gaussian
distribution fit:
.. plot::
:context: close-figs
>>> from scipy.stats import norm
>>> ax = sns.distplot(x, fit=norm, kde=False)
Plot the distribution on the vertical axis:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, vertical=True)
Change the color of all the plot elements:
.. plot::
:context: close-figs
>>> sns.set_color_codes()
>>> ax = sns.distplot(x, color="y")
Pass specific parameters to the underlying plot functions:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, rug=True, rug_kws={"color": "g"},
... kde_kws={"color": "k", "lw": 3, "label": "KDE"},
... hist_kws={"histtype": "step", "linewidth": 3,
... "alpha": 1, "color": "g"})
"""
if kde and not hist:
axes_level_suggestion = (
"`kdeplot` (an axes-level function for kernel density plots)."
)
else:
axes_level_suggestion = (
"`histplot` (an axes-level function for histograms)."
)
msg = (
"`distplot` is a deprecated function and will be removed in a future version. "
"Please adapt your code to use either `displot` (a figure-level function with "
"similar flexibility) or " + axes_level_suggestion
)
warnings.warn(msg, FutureWarning)
if ax is None:
ax = plt.gca()
# Intelligently label the support axis
label_ax = bool(axlabel)
if axlabel is None and hasattr(a, "name"):
axlabel = a.name
if axlabel is not None:
label_ax = True
# Support new-style API
if x is not None:
a = x
# Make a a 1-d float array
a = np.asarray(a, float)
if a.ndim > 1:
a = a.squeeze()
# Drop null values from array
a = remove_na(a)
# Decide if the hist is normed
norm_hist = norm_hist or kde or (fit is not None)
# Handle dictionary defaults
hist_kws = {} if hist_kws is None else hist_kws.copy()
kde_kws = {} if kde_kws is None else kde_kws.copy()
rug_kws = {} if rug_kws is None else rug_kws.copy()
fit_kws = {} if fit_kws is None else fit_kws.copy()
# Get the color from the current color cycle
if color is None:
if vertical:
line, = ax.plot(0, a.mean())
else:
line, = ax.plot(a.mean(), 0)
color = line.get_color()
line.remove()
# Plug the label into the right kwarg dictionary
if label is not None:
if hist:
hist_kws["label"] = label
elif kde:
kde_kws["label"] = label
elif rug:
rug_kws["label"] = label
elif fit:
fit_kws["label"] = label
if hist:
if bins is None:
bins = min(_freedman_diaconis_bins(a), 50)
hist_kws.setdefault("alpha", 0.4)
hist_kws.setdefault("density", norm_hist)
orientation = "horizontal" if vertical else "vertical"
hist_color = hist_kws.pop("color", color)
ax.hist(a, bins, orientation=orientation,
color=hist_color, **hist_kws)
if hist_color != color:
hist_kws["color"] = hist_color
if kde:
kde_color = kde_kws.pop("color", color)
kdeplot(a, vertical=vertical, ax=ax, color=kde_color, **kde_kws)
if kde_color != color:
kde_kws["color"] = kde_color
if rug:
rug_color = rug_kws.pop("color", color)
axis = "y" if vertical else "x"
rugplot(a, axis=axis, ax=ax, color=rug_color, **rug_kws)
if rug_color != color:
rug_kws["color"] = rug_color
if fit is not None:
def pdf(x):
return fit.pdf(x, *params)
fit_color = fit_kws.pop("color", "#282828")
gridsize = fit_kws.pop("gridsize", 200)
cut = fit_kws.pop("cut", 3)
clip = fit_kws.pop("clip", (-np.inf, np.inf))
bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)
x = _kde_support(a, bw, gridsize, cut, clip)
params = fit.fit(a)
y = pdf(x)
if vertical:
x, y = y, x
ax.plot(x, y, color=fit_color, **fit_kws)
if fit_color != "#282828":
fit_kws["color"] = fit_color
if label_ax:
if vertical:
ax.set_ylabel(axlabel)
else:
ax.set_xlabel(axlabel)
return ax
| mwaskom/seaborn | seaborn/distributions.py | Python | bsd-3-clause | 90,480 | [
"Gaussian"
] | 6d424ae334daf106edd754589eefb645125da3d8d2a5cf62d0fd8f35e624e557 |
from datetime import datetime, timedelta
from copy import copy
import re
import os
import pylab
from util.config import Config
from util.units import Units
import util.derived as derived
from util.util import uniquify, any, fatalError, warning
#from util.callCount import FunctionCallCount
class HootPy(object):
"""
HootPy
Purpose: Base class for HootPy. Keeps track of product valid times, adding the finishing touches,
and saving the images.
Started: 14 June 2010 by Tim Supinie (tsupinie@ou.edu)
Completed: [not yet]
Modified: [not yet]
"""
# @FunctionCallCount
def __init__(self, config):
"""
__init__()
Purpose: Constructor for the HootPy class.
Parameters: config [type=dictionary]
Dictionary containing the configuration parameters for this run.
"""
self._valid_time = datetime.utcnow()
for key, value in config.iteritems():
setattr(self, "_%s" % key, value)
try:
meta_filename = config['meta_filename']
except KeyError:
meta_filename = "default/meta.hp"
meta = Config(meta_filename)
for key, value in meta.iteritems():
setattr(self, "_%s" % key, value)
self._var_name_cache = []
self._sanitizeHootPy()
self._sanitize()
return
def _sanitize(self):
"""
sanitize() [protected, abstract]
Purpose: Abstract sanitize method. Implement in a subclass to handle the sanitizing (quality control) of the configuration.
Parameters: [none]
Returns: [nothing]
"""
self._abstract()
# @FunctionCallCount
def loadData(self):
"""
loadData() [public, abstract]
Purpose: Abstract loadData method. Implement in a subclass to handle the loading in of data.
Parameters: [none]
Returns: [nothing]
"""
self._abstract()
# @FunctionCallCount
def plot(self):
"""
plot() [public, abstract]
Purpose: Abstract plot method. Implement in a subclass to handle the plotting of data.
Parameters: [none]
Returns: [nothing]
"""
self._abstract()
def _sanitizeHootPy(self):
"""
_sanitizeHootPy() [private]
Purpose: Sanitizes the plotting configuration for variables that are common to all subclasses.
Parameters: [none]
Returns: [nothing]
"""
self._sanitizeCheck('product_title', str)
#self._sanitizeCheck('data_src', str, ( lambda n: os.path.exists(n), "Data file '%(data_src)s' does not exist." ))
self._sanitizeCheck('data_scheme', str)
self._sanitizeCheck('image_file_name', str, ( lambda n: os.path.exists(os.path.dirname(n)), "Path to file '%(image_file_name)s' does not exist." ))
self._sanitizeCheck('image_size_x', int, ( lambda x: x > 0, "Parameter 'image_size_x' must be positive (was given as '%(image_size_x)d')." ))
self._sanitizeCheck('image_size_y', int, ( lambda y: y > 0, "Parameter 'image_size_y' must be positive (was given as '%(image_size_y)d')." ))
return
def _sanitizeCheck(self, variables, dtype, constraint=None, required=True, private=True):
return self._sanitizeDict(self.__dict__, variables, dtype, "the configuration file", constraint, required, private)
def _sanitizeDict(self, dictionary, variables, dtype, src_name, constraint=None, required=True, private=True):
if constraint is not None:
func, error = constraint
if type(variables) not in [ list, tuple ]:
variables = [ variables ]
if type(dtype) not in [ list, tuple ]:
dtype = [ dtype ]
values = []
for var in variables:
try:
if private: key = "_%s" % var
else: key = var
value = dictionary[key]
except KeyError:
if required:
fatalError("Parameter '%s' must be specified in %s." % (var, src_name))
else:
return False
if type(value) not in dtype:
dtype_strings = [ str(t)[1:-1] for t in dtype ]
if len(dtype) > 1:
dtype_strings[-1] = "or %s" % dtype_strings[-1]
if len(dtype) > 2:
dtype_string = ", ".join(dtype_strings)
else:
dtype_string = " ".join(dtype_strings)
fatalError("Parameter '%s' in %s must have %s" % (var, src_name, dtype_string))
values.append(value)
if constraint is not None:
if not func(*values):
fatalError(error % dict(zip(variables, values)))
return True
# @FunctionCallCount
def _initializeProduct(self):
"""
_initializeProduct() [protected]
Purpose: Create the initial product and set it up to fill as much of the figure as possible.
Parameters: [none]
Returns: [nothing]
"""
dpi = 80 * 1.25
pylab.figure(figsize=(float(self._image_size_x) / dpi, float(self._image_size_y) / dpi), dpi=dpi)
pylab.axes((0, 0, 1, 1))
return
def _resetProduct(self):
"""
_resetProduct() [protected]
Purpose: Reset the product so we don't have contours and fills, etc, bleeding over on time steps
Parameters: [none]
Returns: [nothing]
"""
dpi = 80 * 1.25
pylab.clf()
pylab.axes((0, 0, 1, 1))
pylab.gcf().set_size_inches(float(self._image_size_x) / dpi, float(self._image_size_y) / dpi)
return
# @FunctionCallCount
def _finalizeProduct(self, plot_time, is_forecast, plot_names=[]):
"""
_finalizeProduct() [protected]
Purpose: Add final things to the product, such as the title, valid time, and border, and then save.
Parameters: forecast_hour [type=int]
Forecast hour for model products (pass in None for an observed product).
Returns: [nothing]
"""
plot_names = uniquify(plot_names)
# Modify the last plot name for joining for the title string
if len(plot_names) > 1:
plot_names[-1] = "and " + plot_names[-1]
# Create the forecast hour string according to whether or not we're passed a forecast hour.
plot_time_delta = plot_time - self._valid_time
hour = Units.convert(plot_time_delta.microseconds, 'us', 'hr') + Units.convert(plot_time_delta.seconds, 's', 'hr') + Units.convert(plot_time_delta.days, 'dy', 'hr')
file_name = self._image_file_name % { 'plot_time':hour }
if is_forecast:
fh_string = " (F%03d)" % hour
else:
fh_string = ""
if self._vertical_level in ['surface', 'sfc', "None"]:
vert_level_str = ""
else:
vert_level_str = " %s" % self._vertical_level
# Create the valid time string and assemble the title string
valid_time_string = plot_time.strftime(self._product_time_format)
title_string = "%s%s %s Valid: %s%s" % (self._product_title, vert_level_str, ", ".join(plot_names), valid_time_string, fh_string)
# Put the title on the image
pylab.title(title_string, weight="bold", size="x-small", bbox=dict(facecolor="#ffffff", alpha=0.7),x=0.5,y=0.95)
# Save the figure
try:
pylab.savefig(file_name)
except IOError:
fatalError("Couldn't save image to %s" % file_name)
print "Saved product '%s', valid at %s%s, to file %s" % (self._product_title,
valid_time_string, fh_string, file_name)
pylab.close()
return
# @FunctionCallCount
def _findAttribute(self, plot, attr_name):
"""
_findAttribute() [protected]
Purpose: Find an attribute in the plot dictionary. If the attribute isn't in the dictionary, look in the member variables of the class. Failing that,
raise an error.
Parameters: plot [type=dictionary]
Dictionary of plot attributes to their values that is searched for an attribute.
attr_name [type=string]
The name of the attrbute to seach for.
Returns: The value of the attribute given by attr_name.
"""
attribute = ""
# Find the proper data source
try:
attribute = plot[attr_name]
except KeyError:
# The user didn't specify a data_src attribute in the plot dictionary, so look for a global attribute.
try:
attribute = getattr(self, "_%s" % attr_name)
plot[attr_name] = attribute
except:
# The user didn't specify a global data_src attribute, either. Uh-oh ...
raise AttributeError("Attribute %s not found")
return attribute
@classmethod
def _splitPlots(klass, plot_dicts):
"""
"""
# Split plot_dicts with lists of functions into different entries in plot_dict
indexes = [ idx for idx in xrange(len(plot_dicts)) if type(plot_dicts[idx]['function']) in [ list, tuple ] ]
indexes.sort(reverse=True)
for idx in indexes:
plot = plot_dicts.pop(idx)
for func in plot['function']:
new_plot = copy(plot)
new_plot['function'] = func
plot_dicts.insert(idx, new_plot)
return
def _parseFunctionConstituents(self, function, parse_consts=True, data_scheme=None):
"""
_parseFunctionConstituents() [protected]
Purpose: Take a function string and parse out the variables and constants that the function needs for computations. If the data_scheme
variable is given, convert the list to a file-based variable names.
Parameters: function [ type=str ]
String containing the function.
parse_consts [ type=bool ] [ optional ]
Boolean value specifying whether or not to parse out the constants in the file name. Default is True.
data_scheme [ type=dict ] [ optional ]
A dictionary containing the mapping of internal variables names to the variable names in the data file.
Returns: A list of internal constant names and/or a list of internal variable names or file variable names, depending on whether or not
parse_consts was set and data_scheme was given.
"""
# Put the variable list into a regexp-like format
nc_variable_list = "(?:^|(?<=[\\W]))(?:" + "|".join(self._var_map.keys()) + ")(?:(?=[\\W])|$)"
# Find all NetCDF variables in the function, removing duplicates
var_list = uniquify(re.findall(nc_variable_list, function))
if parse_consts:
hp_const_list = "|".join([ const for const in dir(derived) if const[0] != "_" ])
# Find all the constants in the function, removing duplicates
const_list = uniquify(re.findall(hp_const_list, function))
if data_scheme is not None:
var_list = [ data_scheme[self._var_map[v]] for v in var_list ]
if parse_consts:
return var_list, const_list
else:
return var_list
# @FunctionCallCount
def _loadFunctionString(self, nc, plot_dict, data_scheme, data_index=None, scratch=False):
"""
_loadFunctionString() [protected]
Purpose: Parses a function string from the input file, loads the data from the file, and converts the data to the proper units for plotting.
Parameters: nc [type=DataIO]
DataIO object that loads in the data.
plot_dict [type=dictionary]
Dictionary containing the plot attributes and their values.
data_scheme [type=dictionary]
Dictionary containing the mapping of internal variable names to the variable names in the data file.
data_index [type=np.array]
An array containing the indexes into the data array to return [not yet implemented].
Returns: [nothing]
"""
if type(plot_dict['function']) not in [ list, tuple ]:
plot_dict['function'] = [ plot_dict['function'] ]
plot_dict['data'] = {}
for function in plot_dict['function']:
parse_function = False
try:
default_units = data_scheme[self._unit_map[function]]
except KeyError:
parse_function = True
if parse_function or default_units is not None:
units_function = function
var_list, const_list = self._parseFunctionConstituents(function)
parsed_function = function
for const in const_list:
# Replace each constant in the function and units function with the proper source code to get the value
parsed_function = re.sub("(?:^|(?<=[\\W]))(%s)(?:(?=[\\W])|$)" % const, "derived.\\1", parsed_function)
for nc_variable in var_list:
if scratch:
plot_dict['scratch'] = nc.get_variable(data_scheme[self._var_map[nc_variable]], data_index)
return
if self._inCache(nc_variable):
# Check the cache to make sure the units in the cache are what we think they are (they might have been converted before putting them in).
file_units = data_scheme[self._unit_map[nc_variable]]
cache_units = self._getFromCache(nc_variable, 'units')
if file_units != cache_units:
nc_data = self._getFromCache(nc_variable)
self._updateCache(nc_variable, Units.convert(nc_data, cache_units, file_units), file_units)
else:
# Put data in the global namespace for easy access (will be deleted later)
self._updateCache(nc_variable, nc.get_variable(data_scheme[self._var_map[nc_variable]], data_index),
data_scheme[self._unit_map[nc_variable]])
for const in const_list:
# Find each constant and HootPy function in the string
match = re.search("(?:^|(?<=[\\W]))%s\\(([\\w\\, ]+)\\)?(?:(?=[\\W])|$)" % const, units_function)
# Parse out the arguments to each function. If it's not a function (and really a constant, such as g) give it an empty list for arguments.
if match is not None and match.group(1) is not None:
args = re.split("\,[\s]*", match.group(1))
else:
args = []
# Determine what the units of the data for the arguments are. If the argument's variable name is not in the cache,
# then that probably means it's the units being output from another HootPy function that's already been subbed
# into the string. Put None in its place.
arg_units = [ self._getFromCache(a, 'units') if self._inCache(a) else None for a in args ]
# Determine what the function is expecting
func_units = derived._units(const, *arg_units)
# A bit of idiot-proofing on the arguments
if len(arg_units) != len(func_units['args']): fatalError("Incorrect number of arguments for function %s." % const)
for idx in xrange(len(args)):
# Convert all argument data to the units the function is expecting (only do it if we actually have units there, and they don't need to be converted.
if arg_units[idx] is not None and arg_units[idx] != func_units['args'][idx]:
self._updateCache(args[idx], Units.convert(self._getFromCache(args[idx], 'value'), arg_units[idx], func_units['args'][idx]),
func_units['args'][idx])
# Substitute the units output from this function back into the units string
units_function = re.sub("(?:^|(?<=[\\W]))((?:%s)(?:\\([\w\\, ]+\\))?)(?:(?=[\\W])|$)" % const,
func_units['return'], units_function)
for nc_variable in var_list:
# Sub individual variables' units into the units string
if units_function.find(nc_variable) > -1:
units_function = re.sub("(?:^|(?<=[\\W]))(%s)(?:(?=[\\W])|$)" % nc_variable,
data_scheme[self._unit_map[nc_variable]], units_function)
plot_dict['default_units'] = Units.evaluateFunction(units_function)
else:
parsed_function = function
self._updateCache(parsed_function, nc.get_variable(data_scheme[self._var_map[parsed_function]], data_index), None)
plot_dict['default_units'] = None
# Load data
if len(plot_dict['function']) == 1:
if not scratch:
if type(plot_dict['plot_name']) == dict:
print "Loading/computing data for %s ..." % plot_dict['plot_name'][function]
else:
print "Loading/computing data for %s ..." % plot_dict['plot_name']
exec "plot_dict['data'] = %s " % parsed_function in globals(), locals()
# Do units conversion
if plot_dict['element_config']['units'] is not None:
if type(plot_dict['data']) in [ list, tuple ]:
plot_dict['data'] = tuple([ Units.convert(d, plot_dict['default_units'], plot_dict['element_config']['units']) for d in plot_dict['data'] ])
else:
plot_dict['data'] = Units.convert(plot_dict['data'], plot_dict['default_units'], plot_dict['element_config']['units'])
else:
if not scratch:
if type(plot_dict['plot_name']) == dict:
print "Loading/computing data for %s (%s) ..." % (plot_dict['plot_name'][function], function)
else:
print "Loading/computing data for %s (%s) ..." % (plot_dict['plot_name'], function)
exec "plot_dict['data']['%s'] = %s " % (function, parsed_function) in globals(), locals()
# Do units conversion
if plot_dict['element_config']['units'][function] is not None:
if type(plot_dict['data'][function]) in [ list, tuple ]:
plot_dict['data'][function] = tuple([ Units.convert(d, plot_dict['default_units'], plot_dict['element_config']['units'][function]) for d in plot_dict['data'][function] ])
else:
plot_dict['data'][function] = Units.convert(plot_dict['data'][function], plot_dict['default_units'], plot_dict['element_config']['units'][function])
if scratch:
for nc_variable in var_list:
self._clearCache(nc_variable)
return
# @FunctionCallCount
def _updateCache(self, key, value, units):
"""
_updateCache() [protected]
Purpose: Put data into a global cache. This has a couple of advantages. First, data that is loaded once can be kept around so it isn't
loaded again. Second, putting a variable into the cache means it is accessible by its name only. So putting, say, temperature
data into the cache under 'T' means it can be accesed through the T variable. This provides a convenient way to tell python
about the data when it's evaluating a function, and doesn't involve as many messy regex substitutions.
Parameters: key [type=string]
The name of the variable (maybe 'T' for temperature).
value [type=int,float,np.array]
The data to store in the cache.
units [type=str]
The units of the data (maybe K for temperature).
Returns: [nothing]
"""
globals()[key] = value
globals()["%s_units" % key] = units
if key not in self._var_name_cache:
self._var_name_cache.append(key)
return
# @FunctionCallCount
def _getFromCache(self, key, item=None):
"""
_getFromCache() [protected]
Purpose: Retrieve an item from the global cache.
Parameters: key [type=str]
The name of the variable to retrieve from the cache. Equivalent to just calling whatever the name of the variable is in
plaintext.
item [type=str,None]
String specifying what to retrieve. "value" or None will return the data, and "units" will return the units of the data.
Returns: Whatever is specified by the key and item arguments.
"""
if item is None or item == "value":
# Return data
return globals()[key]
elif item == "units":
# Returns the units of the data
return globals()["%s_units" % key]
# @FunctionCallCount
def _inCache(self, key):
"""
_inCache() [protected]
Purpose: Return a boolean specifying whether or not a variable is in the global cache.
Parameters: key [type=str]
The name of the variable we want to check for.
Returns: A boolean specifying whether or not the variable is in the cache.
"""
return key in self._var_name_cache
# @FunctionCallCount
def _clearCache(self,variable=None):
"""
_clearCache() [protected]
Purpose: Clear the global cache of raw data to free up the memory.
Parameters: [none]
Returns: [nothing]
"""
if variable is None:
for nc_variable in self._var_name_cache:
# Delete data in the global namespace
del globals()[nc_variable]
del globals()["%s_units" % nc_variable]
self._var_name_cache = [ ]
else:
try:
del globals()[variable]
del globals()['%s_units' % variable]
self._var_name_cache.remove(variable)
except KeyError:
warning("Variable %s does not exist in the cache" % variable)
return
def _abstract(self):
"""
_abstract() [protected]
Purpose: Emulate abstraction behavior of C++/Java. Raises an exception in abstract methods.
Parameters: [none]
Returns: [nothing]
"""
raise NotImplementedError('Abstract method must be implemented in subclass.')
if __name__ == "__main__":
cfg = {
'forecast_hours':[0, 3, 6, 9, 12],
}
hp = HootPy(cfg)
# hp.loadData()
# hp.plot()
| pulsatrixwx/PulsatrixWx | base/hootpy.py | Python | mit | 23,563 | [
"NetCDF"
] | 3436d663162655647a2c15220d1982104ceb49698159c98ada6a8be92782b603 |
# -*- coding: utf-8 -*-
#
# Instant Press. Instant sites. CMS developed in Web2py Framework
# Site: http://www.instant2press.com
#
# Copyright (c) 2010 Mulone, Pablo Martín
#
# License Code: GPL, General Public License v. 2.0
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# http://groups.google.com/group/web2py-usuarios
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from gluon.html import *
from gluon.http import *
from gluon.validators import *
from gluon.sqlhtml import *
import gluon.contrib.simplejson as sj
#local
from utils import *
class Widgets(object):
def __init__(self, i2p):
self.i2p = i2p
def get_menu(self):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
config = self.i2p.config
articles = self.i2p.articles
#this list the top pages
#the maximun display lenght of title is 25 characters
xml_pages=""
trunk_title = 25
(pages, pages_count) = articles.get_last_pages(1)
for page in pages:
(url, notvalid) = (IS_URL()(page.post_url))
if notvalid: #this is a normal post
link_page = articles.get_page_permanent_link(page.id, \
page.title[:trunk_title])
else: #this is a url-post
link_page = A(page.title[:trunk_title], _href=page.post_url)
xml_page = '<li>%s</li>' % link_page.xml()
#xml_pages += sanitate_string(xml_page)
xml_pages += xml_page
link_home = A(T('Home'), _href=URL(request.application,\
config.controller_default,\
'index'))
xml_menu='<ul><li>%s</li> %s </ul>' % (link_home,xml_pages)
return xml_menu
def front(self):
config = self.i2p.config
siteinfo = self.i2p.siteinfo
if config.front_enabled:
welcome_description = '<div class="entry">%s</div>' \
% siteinfo._get_frontpage()
xml = '<div class="post"> %s </div>' % welcome_description
else:
xml=""
return xml
def sidebar_aboutme(self):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
config = self.i2p.config
siteinfo = self.i2p.siteinfo
about_xml = ""
if config.about_enabled:
about_caption = T('About')
about_info = siteinfo._get_aboutme()
if about_info != "":
about_description = '%s' % about_info
about_xml = '<div id="sidebar-about"><h2>%s</h2> %s </div>' \
% (about_caption, about_description)
return about_xml
def sidebar_archive(self):
config = self.i2p.config
articles = self.i2p.articles
archive_xml = ""
if config.archive_enabled:
archive_generate=""
if not config.widgets_ajax:
archive_generate = articles.get_list_archives()
archive_xml = '<div id="sidebar-archive"> %s </div>' % archive_generate
return archive_xml
def footer_archive(self):
config = self.i2p.config
articles = self.i2p.articles
archive_xml = ""
if config.archive_enabled:
archive_generate=""
if not config.widgets_ajax:
archive_generate = articles.get_list_archives()
archive_xml = '<div id="footer-widgets-archives" class="footer-columns"> %s </div>' \
% archive_generate
return archive_xml
def get_pages(self):
T = self.i2p.environment.T
pages_caption = T('Pages')
pages_generate = self.get_menu()
xml_pages = '<h2>%s</h2> %s' % (pages_caption,pages_generate)
return xml_pages
def sidebar_pages(self):
config = self.i2p.config
pages_xml = ""
if config.pages_enabled:
pages_generate=""
if not config.widgets_ajax:
pages_generate = self.get_pages()
pages_xml = '<div id="sidebar-pages"> %s </div>' % (pages_generate)
return pages_xml
def footer_pages(self):
config = self.i2p.config
pages_xml = ""
if config.pages_enabled:
pages_generate=""
if not config.widgets_ajax:
pages_generate = self.get_pages()
pages_xml = '<div id="footer-widgets-pages" class="footer-columns"> %s </div>' \
% (pages_generate)
return pages_xml
def sidebar_links(self):
config = self.i2p.config
articles = self.i2p.articles
links_xml = ""
if config.pages_enabled:
links_generate=""
if not config.widgets_ajax:
links_generate = articles.get_list_links()
links_xml = '<div id="sidebar-links"> %s </div>' % (links_generate)
return links_xml
def load_last_comments(self, page=1):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
xml_comments=""
last_comments=""
header = T('Last comments')
(limit_inf, limit_sup) = get_query_limits(page, 5) #the last five comments
query = (db.comments.id>0)
last_comments = db(query).select(db.comments.ALL,\
orderby=~db.comments.comment_on,\
limitby=(limit_inf, limit_sup))
for comment in last_comments:
#author_avatar = IMG(_src=URL(r=request,c='static',f='images/avatar.png'), alt="avatar", style="padding: 5px; float:left;")
author_sytle = ""#"padding: 5px; float:left;"
author_avatar = self.i2p.comments._get_avatar(comment.author_id,\
style=author_sytle)
text_comment = comment.comment[:60]
comment_user = self.i2p.users.get_user_title(comment.author_id)
comment_time = comment.comment_on.strftime("%B %d, %Y:%I:%M %p")
link_post = self.i2p.articles.get_post_permanent_link(comment.post_id)
xml_comment = '<li><div style="float:left">%s</div> %s say: %s on %s on article %s</li>' \
% (author_avatar.xml(), comment_user, text_comment, \
comment_time, link_post.xml())
#xml_comments += sanitate_string(xml_comment)
xml_comments += xml_comment
if xml_comments!="":
last_comments="<h2>%s</h2><ul>%s</ul>" % (header,xml_comments)
return last_comments
def sidebar_last_comments(self):
comments_xml = ""
config = self.i2p.config
if config.comments_method in ['Disqus']:
comments_xml = '<div id="sidebar-last-comments"> %s </div>' % self._disqus_last_comments()
elif config.comments_method in ['Enabled']:
comments_generate=""
if not config.widgets_ajax:
comments_generate = self.load_last_comments()
comments_xml = '<div id="sidebar-last-comments"> %s </div>' % (comments_generate)
return comments_xml
def sidebar_tags(self):
config = self.i2p.config
articles = self.i2p.articles
tags_xml = ""
if config.tags_enabled:
tags_generate=""
if not config.widgets_ajax:
tags_generate = articles.get_popular_tags()
tags_xml = '<div id="sidebar-tags">%s</div><div style="clear: both; float: none;"></div>' \
% tags_generate
return tags_xml
def sidebar_feed(self):
T = self.i2p.environment.T
request = self.i2p.environment.request
config = self.i2p.config
feed_xml = ""
if config.feed_enabled:
feed_caption = T('Rss')
icon_feed_url = URL(request.application,'static','images/feed.png')
img_feed = IMG(_src=icon_feed_url, _alt="Feed", _style="padding-left: 5px;")
link_feedposts = A(T("Rss last posts"), \
_href=URL(request.application,\
config.controller_default,\
'feed_articles.rss' ))
link_feedcomments = A(T("Rss last comments"), \
_href=URL(request.application,\
config.controller_default,\
'feed_comments.rss' ))
feed_posts = '<li>%s %s</li>' % (link_feedposts, img_feed.xml())
feed_comments = '<li>%s %s</li>' % (link_feedcomments, img_feed.xml())
feed_xml = '<div id="sidebar-feed"><h2>%s</h2> <ul> %s %s </ul> </div>' \
% (feed_caption, feed_posts, feed_comments)
return feed_xml
def load_last_posts(self):
articles = self.i2p.articles
T = self.i2p.environment.T
request = self.i2p.environment.request
xml_posts=""
last_posts=""
last_entries = T('Last entries')
(posts, post_count) = articles.get_last_posts(1)
for post in posts:
link_post = articles.get_post_permanent_link(post.id, \
post.title)
xml_post = '<li>%s</li>' % link_post.xml()
xml_posts += xml_post
if xml_posts!="":
last_posts="<h2>%s</h2><ul>%s</ul>" % (last_entries,xml_posts)
return last_posts
def sidebar_last_posts(self):
config = self.i2p.config
last_posts=''
if config.last_post_enabled:
xml_posts=""
if not config.widgets_ajax:
xml_posts = self.load_last_posts()
last_posts='<div id="sidebar-last-posts">%s</div>' % xml_posts
return last_posts
def footer_last_posts(self):
config = self.i2p.config
last_posts=''
if config.last_post_enabled:
xml_posts=""
if not config.widgets_ajax:
xml_posts = self.load_last_posts()
last_posts='<div id="footer-widgets-last-posts" class="footer-columns">%s</div>' \
% xml_posts
return last_posts
def load_categories(self):
config = self.i2p.config
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
xml_cats=""
categories=""
cats = self.i2p.categories.get_list(page=1, limit=30)
for cat in cats:
post_count = db((db.posts.categories.contains(str(cat.id))) & (db.posts.published) ).count() #contains bug in web2py
#post_count = 0
#for post in posts :
# if post.published :
# post_count+=1
text_cat = " %s (%s)" % (cat.title,post_count)
link_cat = A(text_cat,_title="%s"%cat.description,\
_href=URL(request.application,\
config.controller_default,\
'category/by_id', args=[unicode(cat.id)] ))
xml_cat = '<li>%s</li>' % link_cat.xml()
xml_cats += xml_cat
if xml_cats!="" and post_count>0 and cat.title != 'ChangeMe' :
categories = "<h2>%s</h2>"%T('Categories')
categories += "<ul>%s</ul>"%xml_cats
return categories
def sidebar_categories(self):
config = self.i2p.config
xml_categories = ""
if config.categories_enabled:
xml_cats=""
if not config.widgets_ajax:
xml_cats = self.load_categories()
xml_categories='<div id="sidebar-categories">%s</div>' % xml_cats
return xml_categories
def footer_categories(self):
config = self.i2p.config
xml_categories = ""
if config.categories_enabled:
xml_cats=""
if not config.widgets_ajax:
xml_cats = self.load_categories()
xml_categories='<div id="footer-widgets-categories" class="footer-columns">%s</div>' \
% xml_cats
return xml_categories
def sidebar_search(self):
config = self.i2p.config
T = self.i2p.environment.T
request = self.i2p.environment.request
xml_content = ""
if config.search_enabled:
title_search = T('Search')
search_url = URL(request.application, \
config.controller_default,\
'search')
xml_content = '''<div id="sidebar-search" >
<h2>%s</h2>
<form method="get" action="%s">
<div><input type="text" name="q" id="sidebar-search-text" value="" /></div>
<div><input type="submit" id="sidebar-search-submit" value="Search" /></div>
</form>
</div>
''' % (title_search,search_url)
return xml_content
def add_this(self, url="",title="",description=""):
config = self.i2p.config
#need fix: need to escape to somethin like: &
if title!='':
addthis_title = 'addthis:title="%s"' % clean_html(title)
else:
addthis_title = ''
if url!='':
addthis_url = 'addthis:url="%s"' % url
else:
addthis_url = ''
if description!='':
addthis_description = 'addthis:description="%s"' % clean_html(description)
else:
addthis_description = ''
addthis = '''<!-- AddThis Button BEGIN -->
<div class="addthis_toolbox addthis_default_style">
<a href="http://www.addthis.com/bookmark.php?v=250&username=%(username)s" class="addthis_button_compact" %(url)s %(title)s %(description)s >Share</a>
<span class="addthis_separator">|</span>
<a class="addthis_button_facebook" %(url)s %(title)s %(description)s></a>
<a class="addthis_button_myspace" %(url)s %(title)s %(description)s></a>
<a class="addthis_button_google" %(url)s %(title)s %(description)s></a>
<a class="addthis_button_twitter" %(url)s %(title)s %(description)s></a>
</div>
<script type="text/javascript" src="http://s7.addthis.com/js/250/addthis_widget.js#username=%(username)s"></script>
<!-- AddThis Button END --> ''' % {'username': config.addthis_user, 'url': addthis_url, 'title': addthis_title, 'description': addthis_description}
return addthis
def post_meta(self, post):
T = self.i2p.environment.T
request = self.i2p.environment.request
config = self.i2p.config
articles = self.i2p.articles
users = self.i2p.users
post_author_caption = '<span class="author">%s</span>' \
% users.get_user_title(post.created_by_id)
post_category = articles.get_post_category(post.id)
if post_category=="":
post_category = T("uncategorized")
in_category = T('in')
else:
in_category = T('in categories')
#post_time = post.published_on.strftime("%B %d, %Y at %I:%M")
post_time = post.published_on.strftime("%Y-%m-%d %I:%M")
year_full = post.published_on.strftime("%Y")
month = post.published_on.strftime("%m")
link_time = A(post_time, _href=URL(request.application,\
config.controller_default,\
'archives',args=[year_full,month]))
posted_by = T('By')
updated_on = T('Published on')
byline = '%s %s %s %s %s %s' % (updated_on, link_time.xml(), posted_by, \
post_author_caption, in_category, post_category)
return byline
def post_extract(self, post):
config = self.i2p.config
T = self.i2p.environment.T
request = self.i2p.environment.request
comments = self.i2p.comments
articles = self.i2p.articles
label_comments = "%s" % T('Comments')
if config.comments_method in ['Enabled'] and not config.widgets_ajax:
comments_count = comments._get_comment_count(post.id)
elif config.comments_method in ['Disqus']:
comments_count = ""
else:
comments_count = 0
if config.comments_method in ['Disqus']:
link_comments = articles.get_post_permanent_link(post.id, \
'Comments', \
'disqus_thread')
else:
link_comments = articles.get_post_permanent_link(post.id, \
label_comments, \
'comments')
link_readmore = articles.get_post_permanent_link(post.id, T("Read more"))
base_http = 'http://' + str(request.env.http_host)
url_permanent = articles.get_post_permanent_link(post.id, only_url=True )
url_post = str(base_http + url_permanent)
if config.addthis_enabled:
#add_this = self.add_this(url_post,post.title,post.text_slice[:100]) #need to pass: title, url, description
add_this = self.add_this(url_permanent,post.title,post.text_slice[:100]) #need to pass: title, url, description
else:
add_this = ""
xml_post = '<div class="post">'
xml_post +='<h2 class="title">%s</h2>' \
% articles.get_post_permanent_link(post.id).xml()
xml_post +='''<div class="meta">%s -
<span class="comments-count" id="comments-count_%s"> %s </span>
%s
</div>''' \
% (self.post_meta(post), post.id, comments_count, link_comments.xml())
if config.editor_language in ['Markmin']:
text_slice = MARKMIN(post.text_slice)
else:
text_slice = post.text_slice
xml_post +='<div class="entry">%s</div>' % text_slice
xml_post +='''<div class="links">
<div class="readmore"> %s </div>
<div class="addthis"> %s </div>
<div style="float:none; clear:both;"></div>
</div>''' % (link_readmore.xml(), add_this)
xml_post +='</div>'
return xml_post
def last_posts(self, page):
articles = self.i2p.articles
(posts, count_posts) = articles.get_last_posts(page)
xml_posts = articles.get_xml_results_from_posts(posts)
xml_posts += articles.pagination_last_post(page, count_posts)
return xml_posts
def disqus_comments(self):
config = self.i2p.config
if config.disqus_dev:
developer = 'var disqus_developer = 1;';
else:
developer = '';
script = '''
<div id="disqus_thread"></div>
<script type="text/javascript">
%(developer)s
/**
* var disqus_identifier; [Optional but recommended: Define a unique identifier (e.g. post id or slug) for this thread]
*/
(function() {
var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true;
dsq.src = 'http://%(site)s.disqus.com/embed.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);
})();
</script>
<noscript>Please enable JavaScript to view the <a href="http://disqus.com/?ref_noscript=%(site)s">comments powered by Disqus.</a></noscript>
<a href="http://disqus.com" class="dsq-brlink">blog comments powered by <span class="logo-disqus">Disqus</span></a>
''' % {'developer': developer, 'site': config.disqus_site}
return script
def disqus_comments_count(self):
config = self.i2p.config
script = '''
<script type="text/javascript">
var disqus_shortname = '%(site)s';
(function () {
var s = document.createElement('script'); s.async = true;
s.src = 'http://disqus.com/forums/%(site)s/count.js';
(document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s);
}());
</script>
''' % {'site': config.disqus_site}
return script
def _disqus_last_comments(self):
T = self.i2p.environment.T
config = self.i2p.config
if self.i2p.config.avatars_enabled:
hide_avatars = 0
else:
hide_avatars = 1
avatar_size = self.i2p.config.avatar_size
recent_comments=T("Recent Comments")
num_items = 5
script = '''
<div id="recentcomments" class="dsq-widget">
<h2 class="dsq-widget-title">%(recent_comments)s</h2>
<script type="text/javascript" src="http://disqus.com/forums/%(site)s/recent_comments_widget.js?num_items=%(num_items)s&hide_avatars=%(hide_avatars)s&avatar_size=%(avatar_size)s&excerpt_length=200">
</script>
</div>
<a href="http://disqus.com/">Powered by Disqus</a>
''' % {'site': config.disqus_site, 'recent_comments': recent_comments, 'avatar_size': avatar_size, 'hide_avatars': hide_avatars, 'num_items': num_items}
return script
def sidebar_popular_threads(self):
config = self.i2p.config
popular_threads=''
#for now only in disqus
if config.comments_method in ['Disqus']:
popular_threads='<div id="sidebar-popular-threads">%s</div>' % self._disqus_popular_threads()
return popular_threads
def _disqus_popular_threads(self):
config = self.i2p.config
T = self.i2p.environment.T
popular_threads=T("Popular Threads")
script = '''
<div id="popularthreads" class="dsq-widget">
<h2 class="dsq-widget-title">%(popular_threads)s</h2>
<script type="text/javascript" src="http://disqus.com/forums/%(site)s/popular_threads_widget.js?num_items=5">
</script>
</div>
<a href="http://disqus.com/">Powered by Disqus</a>
'''% {'site': config.disqus_site,'popular_threads':popular_threads}
return script
def sidebar_top_commenters(self):
config = self.i2p.config
top_commenters=''
#for now only in disqus
if config.comments_method in ['Disqus']:
top_commenters='<div id="sidebar-top-commenters">%s</div>' % self._disqus_top_commenters()
return top_commenters
def _disqus_top_commenters(self):
T = self.i2p.environment.T
config = self.i2p.config
avatar_size = self.i2p.config.avatar_size
if self.i2p.config.avatars_enabled:
hide_avatars = 0
else:
hide_avatars = 1
num_items = 5
top_commenters=T('Top Commenters')
script = '''
<div id="topcommenters" class="dsq-widget">
<h2 class="dsq-widget-title">%(top_commenters)s</h2>
<script type="text/javascript" src="http://disqus.com/forums/%(site)s/top_commenters_widget.js?num_items=%(num_items)s&hide_mods=0&hide_avatars=%(hide_avatars)s&avatar_size=%(avatar_size)s">
</script>
</div>
<a href="http://disqus.com/">Powered by Disqus</a>
'''% {'site': config.disqus_site, 'top_commenters':top_commenters, 'avatar_size':avatar_size, 'hide_avatars':hide_avatars, 'num_items':num_items}
return script
def sidebar_combination(self):
config = self.i2p.config
T = self.i2p.environment.T
head_combination = T('Posts')
combination=''
#for now only in disqus
if config.comments_method in ['Disqus']:
combination='<div id="sidebar-combination"><h2>%s</h2>%s</div>' % (head_combination, self._disqus_combination())
return combination
def _disqus_combination(self):
config = self.i2p.config
num_items = 5
script = '''
<script type="text/javascript" src="http://disqus.com/forums/%(site)s/combination_widget.js?num_items=%(num_items)s&hide_mods=0&color=grey&default_tab=recent&excerpt_length=200">
</script>
<a href="http://disqus.com/">Powered by Disqus</a>
'''% {'site': config.disqus_site, 'num_items':num_items}
return script
def ga_script(self):
config = self.i2p.config
script=""
if config.ga_enabled:
script = '''
<script>
var _gaq = [['_setAccount', '%(ga_id)s'], ['_trackPageview']];
(function(d, t) {
var g = d.createElement(t),
s = d.getElementsByTagName(t)[0];
g.async = true;
g.src = '//www.google-analytics.com/ga.js';
s.parentNode.insertBefore(g, s);
})(document, 'script');
</script>
''' % {'ga_id': config.ga_id}
return script
| syed/instatnt-press | modules/widgets.py | Python | gpl-2.0 | 28,880 | [
"VisIt"
] | 3a9721949ba35d411a4b61acd54f57f0701806cd0c36514a4e26abb9755c40fb |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""Reports/Text Reports/Family Group Report"""
#------------------------------------------------------------------------
#
# Python Library
#
#------------------------------------------------------------------------
import copy
from functools import partial
#------------------------------------------------------------------------
#
# GRAMPS
#
#------------------------------------------------------------------------
from gramps.gen.lib import EventRoleType, EventType, NoteType, Person
from gramps.gen.plug.menu import (BooleanOption, FamilyOption, EnumeratedListOption)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.docgen import (IndexMark, FontStyle, ParagraphStyle, TableStyle,
TableCellStyle, FONT_SANS_SERIF, FONT_SERIF,
INDEX_TYPE_TOC, PARA_ALIGN_CENTER)
from gramps.gen.datehandler import get_date
from gramps.gen.ggettext import sgettext as _
from gramps.gen.display.name import displayer as global_name_display
#------------------------------------------------------------------------
#
# FamilyGroup
#
#------------------------------------------------------------------------
class FamilyGroup(Report):
def __init__(self, database, options, user):
"""
Create the DetAncestorReport object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
family_handle - Handle of the family to write report on.
includeAttrs - Whether to include attributes
name_format - Preferred format to display names
"""
Report.__init__(self, database, options, user)
menu = options.menu
self.family_handle = None
family_id = menu.get_option_by_name('family_id').get_value()
family = database.get_family_from_gramps_id(family_id)
if family:
self.family_handle = family.get_handle()
else:
self.family_handle = None
# Copy the global NameDisplay so that we don't change application
# defaults.
self._name_display = copy.deepcopy(global_name_display)
name_format = menu.get_option_by_name("name_format").get_value()
if name_format != 0:
self._name_display.set_default_format(name_format)
get_option_by_name = menu.get_option_by_name
get_value = lambda name:get_option_by_name(name).get_value()
self.recursive = get_value('recursive')
self.missingInfo = get_value('missinginfo')
self.generations = get_value('generations')
self.incParEvents = get_value('incParEvents')
self.incParAddr = get_value('incParAddr')
self.incParNotes = get_value('incParNotes')
self.incParNames = get_value('incParNames')
self.incParMar = get_value('incParMar')
self.incRelDates = get_value('incRelDates')
self.incChiMar = get_value('incChiMar')
self.includeAttrs = get_value('incattrs')
def dump_parent_event(self, name,event):
place = ""
date = ""
descr = ""
if event:
date = get_date(event)
place_handle = event.get_place_handle()
place = ReportUtils.place_name(self.database,place_handle)
descr = event.get_description()
if self.includeAttrs:
for attr in event.get_attribute_list():
if descr:
descr += "; "
descr += _("%(type)s: %(value)s") % {
'type' : attr.get_type(),
'value' : attr.get_value()
}
self.doc.start_row()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(name)
self.doc.end_paragraph()
self.doc.end_cell()
if descr:
self.doc.start_cell("FGR-TextContentsEnd",2)
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(descr)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
if date or place:
self.doc.start_row()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.end_paragraph()
self.doc.end_cell()
if (date or place) or not descr:
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(date)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell("FGR-TextContentsEnd")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(place)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
def dump_parent_parents(self,person):
family_handle = person.get_main_parents_family_handle()
father_name = ""
mother_name = ""
if family_handle:
family = self.database.get_family_from_handle(family_handle)
father_handle = family.get_father_handle()
if father_handle:
father = self.database.get_person_from_handle(father_handle)
father_name = self._name_display.display(father)
if self.incRelDates:
birth_ref = father.get_birth_ref()
birth = " "
if birth_ref:
event = self.database.get_event_from_handle(birth_ref.ref)
birth = get_date( event )
death_ref = father.get_death_ref()
death = " "
if death_ref:
event = self.database.get_event_from_handle(death_ref.ref)
death = get_date( event )
if birth_ref or death_ref:
father_name = "%s (%s - %s)" % (father_name,birth,death)
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.database.get_person_from_handle(mother_handle)
mother_name = self._name_display.display(mother)
if self.incRelDates:
birth_ref = mother.get_birth_ref()
birth = " "
if birth_ref:
event = self.database.get_event_from_handle(birth_ref.ref)
birth = get_date( event )
death_ref = mother.get_death_ref()
death = " "
if death_ref:
event = self.database.get_event_from_handle(death_ref.ref)
death = get_date( event )
if birth_ref or death_ref:
mother_name = "%s (%s - %s)" % (mother_name,birth,death)
if father_name != "":
self.doc.start_row()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(_("Father"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell("FGR-TextContentsEnd",2)
self.doc.start_paragraph('FGR-Normal')
mark = ReportUtils.get_person_mark(self.database,father)
self.doc.write_text(father_name,mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
elif self.missingInfo:
self.dump_parent_line(_("Father"), "")
if mother_name != "":
self.doc.start_row()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(_("Mother"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell("FGR-TextContentsEnd",2)
self.doc.start_paragraph('FGR-Normal')
mark = ReportUtils.get_person_mark(self.database,mother)
self.doc.write_text(mother_name,mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
elif self.missingInfo:
self.dump_parent_line(_("Mother"), "")
def dump_parent_line(self, name, text):
self.doc.start_row()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(name)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell("FGR-TextContentsEnd",2)
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(text)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
def dump_parent_noteline(self, name, note):
self.doc.start_row()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(name)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell("FGR-TextContentsEnd", 2)
self.doc.write_styled_note(note.get_styledtext(),
note.get_format(), 'FGR-Note',
contains_html= (note.get_type() ==
NoteType.HTML_CODE)
)
self.doc.end_cell()
self.doc.end_row()
def dump_parent(self,title,person_handle):
if not person_handle and not self.missingInfo:
return
elif not person_handle:
person = Person()
else:
person = self.database.get_person_from_handle(person_handle)
name = self._name_display.display(person)
self.doc.start_table(title,'FGR-ParentTable')
self.doc.start_row()
self.doc.start_cell('FGR-ParentHead',3)
self.doc.start_paragraph('FGR-ParentName')
self.doc.write_text(title + ': ')
mark = ReportUtils.get_person_mark(self.database,person)
self.doc.write_text(name,mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
birth_ref = person.get_birth_ref()
birth = None
evtName = str(EventType())
if birth_ref:
birth = self.database.get_event_from_handle(birth_ref.ref)
if birth or self.missingInfo:
self.dump_parent_event(evtName,birth)
death_ref = person.get_death_ref()
death = None
evtName = str(EventType(EventType.DEATH))
if death_ref:
death = self.database.get_event_from_handle(death_ref.ref)
if death or self.missingInfo:
self.dump_parent_event(evtName,death)
self.dump_parent_parents(person)
if self.incParEvents:
for event_ref in person.get_primary_event_ref_list():
if event_ref != birth_ref and event_ref != death_ref:
event = self.database.get_event_from_handle(event_ref.ref)
evtType = event.get_type()
name = str( evtType )
self.dump_parent_event(name,event)
if self.incParAddr:
addrlist = person.get_address_list()[:]
for addr in addrlist:
location = ReportUtils.get_address_str(addr)
date = get_date( addr )
self.doc.start_row()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(_("Address"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(date)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell("FGR-TextContentsEnd")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(location)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
if self.incParNotes:
for notehandle in person.get_note_list():
note = self.database.get_note_from_handle(notehandle)
self.dump_parent_noteline(_("Note"), note)
if self.includeAttrs:
for attr in person.get_attribute_list():
self.dump_parent_line(str(attr.get_type()),attr.get_value())
if self.incParNames:
for alt_name in person.get_alternate_names():
name_type = str( alt_name.get_type() )
name = self._name_display.display_name(alt_name)
self.dump_parent_line(name_type, name)
self.doc.end_table()
def dump_marriage(self,family):
if not family:
return
m = None
family_list = family.get_event_ref_list()
for event_ref in family_list:
if event_ref:
event = self.database.get_event_from_handle(event_ref.ref)
if event.get_type() == EventType.MARRIAGE and \
(event_ref.get_role() == EventRoleType.FAMILY or
event_ref.get_role() == EventRoleType.PRIMARY):
m = event
break
if len(family_list) > 0 or self.missingInfo:
self.doc.start_table("MarriageInfo",'FGR-ParentTable')
self.doc.start_row()
self.doc.start_cell('FGR-ParentHead',3)
self.doc.start_paragraph('FGR-ParentName')
self.doc.write_text(_("Marriage:"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.dump_parent_event(_("Marriage"),m)
for event_ref in family_list:
if event_ref:
event = self.database.get_event_from_handle(event_ref.ref)
if event.get_type() != EventType.MARRIAGE:
self.dump_parent_event(str(event.get_type()),event)
self.doc.end_table()
def dump_child_event(self,text, name,event):
date = ""
place = ""
if event:
date = get_date(event)
place_handle = event.get_place_handle()
if place_handle:
place = self.database.get_place_from_handle(place_handle).get_title()
self.doc.start_row()
self.doc.start_cell(text)
self.doc.start_paragraph('FGR-Normal')
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('FGR-TextContents')
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(name)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('FGR-TextContents')
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(date)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('FGR-TextContentsEnd')
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(place)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
def dump_child(self,index,person_handle):
person = self.database.get_person_from_handle(person_handle)
families = len(person.get_family_handle_list())
birth_ref = person.get_birth_ref()
if birth_ref:
birth = self.database.get_event_from_handle(birth_ref.ref)
else:
birth = None
death_ref = person.get_death_ref()
if death_ref:
death = self.database.get_event_from_handle(death_ref.ref)
else:
death = None
spouse_count = 0;
if self.incChiMar:
for family_handle in person.get_family_handle_list():
family = self.database.get_family_from_handle(family_handle)
spouse_id = None
if person_handle == family.get_father_handle():
spouse_id = family.get_mother_handle()
else:
spouse_id = family.get_father_handle()
if spouse_id:
spouse_count += 1
self.doc.start_row()
if spouse_count != 0 or self.missingInfo or death is not None or birth is not None:
self.doc.start_cell('FGR-TextChild1')
else:
self.doc.start_cell('FGR-TextChild2')
self.doc.start_paragraph('FGR-ChildText')
index_str = ("%d" % index)
if person.get_gender() == Person.MALE:
self.doc.write_text(index_str + _("acronym for male|M"))
elif person.get_gender() == Person.FEMALE:
self.doc.write_text(index_str + _("acronym for female|F"))
else:
self.doc.write_text(_("acronym for unknown|%dU") % index)
self.doc.end_paragraph()
self.doc.end_cell()
name = self._name_display.display(person)
mark = ReportUtils.get_person_mark(self.database,person)
self.doc.start_cell('FGR-ChildName',3)
self.doc.start_paragraph('FGR-ChildText')
self.doc.write_text(name,mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
if self.missingInfo or birth is not None:
if spouse_count != 0 or self.missingInfo or death is not None:
self.dump_child_event('FGR-TextChild1',_('Birth'),birth)
else:
self.dump_child_event('FGR-TextChild2',_('Birth'),birth)
if self.missingInfo or death is not None:
if spouse_count == 0 or not self.incChiMar:
self.dump_child_event('FGR-TextChild2',_('Death'),death)
else:
self.dump_child_event('FGR-TextChild1',_('Death'),death)
if self.incChiMar:
index = 0
for family_handle in person.get_family_handle_list():
m = None
index += 1
family = self.database.get_family_from_handle(family_handle)
for event_ref in family.get_event_ref_list():
if event_ref:
event = self.database.get_event_from_handle(event_ref.ref)
if event.type == EventType.MARRIAGE:
m = event
break
spouse_id = None
if person_handle == family.get_father_handle():
spouse_id = family.get_mother_handle()
else:
spouse_id = family.get_father_handle()
if spouse_id:
self.doc.start_row()
if m or index != families:
self.doc.start_cell('FGR-TextChild1')
else:
self.doc.start_cell('FGR-TextChild2')
self.doc.start_paragraph('FGR-Normal')
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('FGR-TextContents')
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(_("Spouse"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('FGR-TextContentsEnd',2)
self.doc.start_paragraph('FGR-Normal')
spouse = self.database.get_person_from_handle(spouse_id)
spouse_name = self._name_display.display(spouse)
if self.incRelDates:
birth = " "
birth_ref = spouse.get_birth_ref()
if birth_ref:
event = self.database.get_event_from_handle(birth_ref.ref)
birth = get_date(event)
death = " "
death_ref = spouse.get_death_ref()
if death_ref:
event = self.database.get_event_from_handle(death_ref.ref)
death = get_date(event)
if birth_ref or death_ref:
spouse_name = "%s (%s - %s)" % (spouse_name,birth,death)
mark = ReportUtils.get_person_mark(self.database,spouse)
self.doc.write_text(spouse_name,mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
if m:
evtName = str(EventType(EventType.MARRIAGE))
if index == families:
self.dump_child_event('FGR-TextChild2',evtName,m)
else:
self.dump_child_event('FGR-TextChild1',evtName,m)
def dump_family(self,family_handle,generation):
self.doc.start_paragraph('FGR-Title')
if self.recursive and self.generations:
title=_("Family Group Report - Generation %d") % generation
else:
title=_("Family Group Report")
mark = IndexMark(title, INDEX_TYPE_TOC,1)
self.doc.write_text( title, mark )
self.doc.end_paragraph()
family = self.database.get_family_from_handle(family_handle)
self.dump_parent(_("Husband"),family.get_father_handle())
self.doc.start_paragraph("FGR-blank")
self.doc.end_paragraph()
if self.incParMar:
self.dump_marriage(family)
self.doc.start_paragraph("FGR-blank")
self.doc.end_paragraph()
self.dump_parent(_("Wife"),family.get_mother_handle())
length = len(family.get_child_ref_list())
if length > 0:
self.doc.start_paragraph("FGR-blank")
self.doc.end_paragraph()
self.doc.start_table('FGR-Children','FGR-ChildTable')
self.doc.start_row()
self.doc.start_cell('FGR-ParentHead',4)
self.doc.start_paragraph('FGR-ParentName')
self.doc.write_text(_("Children"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
index = 1
for child_ref in family.get_child_ref_list():
self.dump_child(index,child_ref.ref)
index += 1
self.doc.end_table()
if self.recursive:
for child_ref in family.get_child_ref_list():
child = self.database.get_person_from_handle(child_ref.ref)
for child_family_handle in child.get_family_handle_list():
if child_family_handle != family_handle:
self.doc.page_break()
self.dump_family(child_family_handle,(generation+1))
def write_report(self):
if self.family_handle:
self.dump_family(self.family_handle,1)
else:
self.doc.start_paragraph('FGR-Title')
self.doc.write_text(_("Family Group Report"))
self.doc.end_paragraph()
#------------------------------------------------------------------------
#
# MenuReportOptions
#
#------------------------------------------------------------------------
class FamilyGroupOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
##########################
add_option = partial(menu.add_option, _("Report Options"))
##########################
family_id = FamilyOption(_("Center Family"))
family_id.set_help(_("The center family for the report"))
add_option("family_id", family_id)
# We must figure out the value of the first option before we can
# create the EnumeratedListOption
fmt_list = global_name_display.get_name_format()
name_format = EnumeratedListOption(_("Name format"), 0)
name_format.add_item(0, _("Default"))
for num, name, fmt_str, act in fmt_list:
name_format.add_item(num, name)
name_format.set_help(_("Select the format to display names"))
add_option("name_format", name_format)
recursive = BooleanOption(_('Recursive'),False)
recursive.set_help(_("Create reports for all descendants "
"of this family."))
add_option("recursive", recursive)
##########################
add_option = partial(menu.add_option, _("Include"))
##########################
generations = BooleanOption(_("Generation numbers "
"(recursive only)"),True)
generations.set_help(_("Whether to include the generation on each "
"report (recursive only)."))
add_option("generations", generations)
incParEvents = BooleanOption(_("Parent Events"),False)
incParEvents.set_help(_("Whether to include events for parents."))
add_option("incParEvents", incParEvents)
incParAddr = BooleanOption(_("Parent Addresses"),False)
incParAddr.set_help(_("Whether to include addresses for parents."))
add_option("incParAddr", incParAddr)
incParNotes = BooleanOption(_("Parent Notes"),False)
incParNotes.set_help(_("Whether to include notes for parents."))
add_option("incParNotes", incParNotes)
incattrs = BooleanOption(_("Parent Attributes"),False)
incattrs.set_help(_("Whether to include attributes."))
add_option("incattrs", incattrs)
incParNames = BooleanOption(_("Alternate Parent Names"),False)
incParNames.set_help(_("Whether to include alternate "
"names for parents."))
add_option("incParNames", incParNames)
incParMar = BooleanOption(_("Parent Marriage"),False)
incParMar.set_help(_("Whether to include marriage information "
"for parents."))
add_option("incParMar", incParMar)
incRelDates = BooleanOption(_("Dates of Relatives"),False)
incRelDates.set_help(_("Whether to include dates for relatives "
"(father, mother, spouse)."))
add_option("incRelDates", incRelDates)
incChiMar = BooleanOption(_("Children Marriages"),True)
incChiMar.set_help(_("Whether to include marriage information "
"for children."))
add_option("incChiMar", incChiMar)
##########################
add_option = partial(menu.add_option, _("Missing Information"))
##########################
missinginfo = BooleanOption(_("Print fields for missing "
"information"),True)
missinginfo.set_help(_("Whether to include fields for missing "
"information."))
add_option("missinginfo", missinginfo)
def make_default_style(self,default_style):
"""Make default output style for the Family Group Report."""
para = ParagraphStyle()
#Paragraph Styles
font = FontStyle()
font.set_size(4)
para.set_font(font)
default_style.add_paragraph_style('FGR-blank',para)
font = FontStyle()
font.set_type_face(FONT_SANS_SERIF)
font.set_size(16)
font.set_bold(1)
para = ParagraphStyle()
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_header_level(1)
para.set_description(_("The style used for the title of the page."))
default_style.add_paragraph_style('FGR-Title',para)
font = FontStyle()
font.set_type_face(FONT_SERIF)
font.set_size(10)
font.set_bold(0)
para = ParagraphStyle()
para.set_font(font)
para.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style('FGR-Normal',para)
para = ParagraphStyle()
font = FontStyle()
font.set_type_face(FONT_SERIF)
font.set_size(10)
font.set_bold(0)
para.set_font(font)
para.set(lmargin=0.0)
para.set_top_margin(0.0)
para.set_bottom_margin(0.0)
para.set_description(_('The basic style used for the note display.'))
default_style.add_paragraph_style("FGR-Note",para)
font = FontStyle()
font.set_type_face(FONT_SANS_SERIF)
font.set_size(10)
font.set_bold(1)
para = ParagraphStyle()
para.set_font(font)
para.set_description(_('The style used for the text related to the children.'))
default_style.add_paragraph_style('FGR-ChildText',para)
font = FontStyle()
font.set_type_face(FONT_SANS_SERIF)
font.set_size(12)
font.set_bold(1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(3)
para.set_description(_("The style used for the parent's name"))
default_style.add_paragraph_style('FGR-ParentName',para)
#Table Styles
cell = TableCellStyle()
cell.set_padding(0.2)
cell.set_top_border(1)
cell.set_bottom_border(1)
cell.set_right_border(1)
cell.set_left_border(1)
default_style.add_cell_style('FGR-ParentHead',cell)
cell = TableCellStyle()
cell.set_padding(0.1)
cell.set_bottom_border(1)
cell.set_left_border(1)
default_style.add_cell_style('FGR-TextContents',cell)
cell = TableCellStyle()
cell.set_padding(0.1)
cell.set_bottom_border(0)
cell.set_left_border(1)
cell.set_padding(0.1)
default_style.add_cell_style('FGR-TextChild1',cell)
cell = TableCellStyle()
cell.set_padding(0.1)
cell.set_bottom_border(1)
cell.set_left_border(1)
cell.set_padding(0.1)
default_style.add_cell_style('FGR-TextChild2',cell)
cell = TableCellStyle()
cell.set_padding(0.1)
cell.set_bottom_border(1)
cell.set_right_border(1)
cell.set_left_border(1)
default_style.add_cell_style('FGR-TextContentsEnd',cell)
cell = TableCellStyle()
cell.set_padding(0.2)
cell.set_bottom_border(1)
cell.set_right_border(1)
cell.set_left_border(1)
default_style.add_cell_style('FGR-ChildName',cell)
table = TableStyle()
table.set_width(100)
table.set_columns(3)
table.set_column_width(0,20)
table.set_column_width(1,40)
table.set_column_width(2,40)
default_style.add_table_style('FGR-ParentTable',table)
table = TableStyle()
table.set_width(100)
table.set_columns(4)
table.set_column_width(0,7)
table.set_column_width(1,18)
table.set_column_width(2,35)
table.set_column_width(3,40)
default_style.add_table_style('FGR-ChildTable',table)
| arunkgupta/gramps | gramps/plugins/textreport/familygroup.py | Python | gpl-2.0 | 32,883 | [
"Brian"
] | e75c2f47a34e32aabb4d11149047ef0d5d900c14b641f0b317a3cee69f3f7c93 |
from __future__ import absolute_import
import MDAnalysis
from MDAnalysis.tests.datafiles import GRO, XTC
universe = MDAnalysis.Universe(GRO, XTC)
#old selection
all_selection = universe.selectAtoms('all')
#additional old selectAtoms selection (this comment shouldn't be modified despite containing the method name)
all_selection.selectAtoms('bynum 1:10')
#testing atomgroup methods to properties (and exclusion of comments from conversion):
#all_selection.residues()
all_selection.residues()
#all_selection.charges()
all_selection.charges()
#all_selection.indices()
all_selection.indices()
#all_selection.masses()
all_selection.masses()
#all_selection.names()
all_selection.names()
#all_selection.types()
all_selection.types()
#all_selection.radii()
all_selection.radii()
#all_selection.resids()
all_selection.resids()
#all_selection.resnames()
all_selection.resnames()
#all_selection.resnums()
all_selection.resnums()
#all_selection.segids()
all_selection.segids()
#similarly for atomgroup count method renaming:
#all_selection.numberOfAtoms()
all_selection.numberOfAtoms()
#all_selection.numberOfResidues()
all_selection.numberOfResidues()
#all_selection.numberOfSegments()
all_selection.numberOfSegments()
#for old import statements:
#import MDAnalysis.KDTree
import MDAnalysis.KDTree
#from MDAnalysis import KDTree
from MDAnalysis import KDTree
#import MDAnalysis.core.transformations
import MDAnalysis.core.transformations
#from MDAnalysis.core import transformations
from MDAnalysis.core import transformations
#import MDAnalysis.core.util
import MDAnalysis.core.util
#from MDAnalysis.core import util
from MDAnalysis.core import util
#import MDAnalysis.core.log
import MDAnalysis.core.log
#from MDAnalysis.core import log
from MDAnalysis.core import log
#import MDAnalysis.core.units
import MDAnalysis.core.units
#from MDAnalysis.core import units
from MDAnalysis.core import units
#import MDAnalysis.core.distances
import MDAnalysis.core.distances
#from MDAnalysis.core import distances
from MDAnalysis.core import distances
#import MDAnalysis.core.parallel
import MDAnalysis.core.parallel
#from MDAnalysis.core import parallel
from MDAnalysis.core import parallel
# These methods are now properties returning an object
#AtomGroup.bond() -> AtomGroup.bond.value()
AtomGroup.bond()
#AtomGroup.angle() -> AtomGroup.angle.value()
AtomGroup.angle()
#AtomGroup.torsion() -> AtomGroup.dihedral.value()
AtomGroup.torsion()
#AtomGroup.improper() -> AtomGroup.improper.value()
AtomGroup.improper()
#atomgroup, atom and universe torsion to dihedral conversions
#AtomGroup.torsions -> AtomGroup.dihedrals
AtomGroup.torsions
#Atom.torsions -> Atom.dihedrals
Atom.torsions
#Universe.torsions -> Universe.dihedrals
Universe.torsions
#camelcase fixes
# from core.AtomGroup
#totalMass -> total_mass
ag.totalMass
#totalCharge -> total_charge
ag.totalCharge
#centerOfGeometry -> center_of_geometry
ag.centerOfGeometry
#centerOfMass -> center_of_mass
ag.centerOfMass
#radiusOfGyration -> radius_of_gyration
ag.radiusOfGyration
#shapeParameter -> shape_parameter
ag.shapeParameter
#momentOfInertia -> moment_of_inertia
ag.momentOfInertia
#principalAxes -> principal_axes
ag.principalAxes
#packIntoBox -> pack_into_box
ag.packIntoBox
#asUniverse -> as_universe
ag.asUniverse
#align_principalAxis -> align_principal_axis
ag.align_principalAxis
# from lib.distances
#applyPBC -> apply_PBC
lib.distances.applyPBC
#frame_count = universe.trajectory.numframes
frame_count = universe.trajectory.numframes
traj = universe.trajectory
#frame_count = traj.numframes
frame_count = traj.numframes
# From MDAnalysis.lib.distances
#calc_torsions() -> calc_dihedrals()
#from MDAnalysis.lib.distances import calc_torsions
from MDAnalysis.lib.distances import calc_torsions
#MDAnalysis.lib.distances.calc_torsions()
MDAnalysis.lib.distances.calc_torsions()
result = MDAnalysis.lib.distances.calc_torsions()
#dist.calc_torsions()
dist.calc_torsions()
#atomgroup method pluralizations
#set_mass(new) --> set_masses(new)
ag.set_mass(new)
#set_charge(new) --> set_charges(new)
ag.set_charge(new)
#set_name(new) --> set_names(new)
ag.set_name(new)
#set_type(new) --> set_types(new)
ag.set_type(new)
#set_radius(new) --> set_radii(new)
ag.set_radius(new)
#set_bfactor(new) --> set_bfactors(new)
ag.set_bfactor(new)
#set_altloc(new) --> set_altlocs(new)
ag.set_altloc(new)
#set_serial(new) --> set_serials(new)
ag.set_serial(new)
#set_resid(new) --> set_resids(new)
ag.set_resid(new)
#set_resname(new) --> set_resnames(new)
ag.set_resname(new)
#set_resnum(new) --> set_resnums(new)
ag.set_resnum(new)
#set_segid(new) --> set_segids(new)
ag.set_segid(new)
#this test case has caused issues:
g.set_resid(resid * np.ones(len(g)))
#frame numbering is now 0-based:
#ts.frame - 1 -> ts.frame - 0
ts.frame - 1
#ts.frame + 2 -> ts.frame + 3
ts.frame + 2
#ts.frame == 3 -> ts.frame == 2
ts.frame == 3
#ts.frame != 5 -> ts.frame != 4
ts.frame != 5
#another
ts.frame = 9
#+1
[ts.frame for ts in self.trajectory[2:9:3]]
#+1
[ts.frame for ts in self.trajectory]
assert_equal(self.ts.frame, 1, "rewinding to frame 1")
#decoy comment
assert_almost_equal(ts.frame, 544)
assert_almost_equal(ts.dummy, 544)
#frame warning with indentation complexity:
class Dummy(object):
assert_almost_equal(ts.frame, 544)
ts.frame = 77
#numatoms to n_atoms keyword argument conversion while preserving the conversion from numberOfAtoms() to n_atoms as well:
with MDAnalysis.Writer(pdbtrj, multiframe=True, bonds=False, numatoms=u.atoms.numberOfAtoms()) as PDB:
pass
#alternative call syntax:
with MDAnalysis.coordinates.core.writer(pdbtrj, multiframe=True, bonds=False, numatoms=u.atoms.numberOfAtoms()) as PDB:
pass
#the above fix should be specific to .writer or .Writer, so the following should not be recognized (as a probe for specificity) from the keyword argument standpoint [method replacement is ok]:
with MDAnalysis.coordinates.core.writerr(pdbtrj, multiframe=True, bonds=False, numatoms=u.atoms.numberOfAtoms()) as PDB:
pass
#however, the fixer should be sufficiently flexible to recognize a different input filename, the omission of default arguments, spacing between 'numatoms' and '=', and an explicit integer value for numatoms, along with some additional kwargs:
with MDAnalysis.Writer(other_filename, numatoms = 55, start = 0, step = 2) as GRO:
pass
| kain88-de/mdanalysis | package/MDAnalysis/migration/test_dummy_old_MDA_code.py | Python | gpl-2.0 | 6,373 | [
"MDAnalysis"
] | b6098570e5e0491e3ebbac860ac30b4507069180b2e77008bf4c4641a915f0ea |
import logging
from .frame import Frame
from .mapping import Mapping
from .bondset import BondSet
from .forcefield import ForceField
from .interface import Progress
logger = logging.getLogger(__name__)
def main(args, config):
"""
Main function of the program PyCGTOOL.
Performs the complete AA->CG mapping and outputs a files dependent on given input.
:param args: Arguments from argparse
:param config: Configuration dictionary
"""
frame = Frame(gro=args.gro, xtc=args.xtc, itp=args.itp, frame_start=args.begin)
if args.bnd:
logger.info("Bond measurements will be made")
bonds = BondSet(args.bnd, config)
else:
logger.info("Bond measurements will not be made")
if args.map:
logger.info("Mapping will be performed")
mapping = Mapping(args.map, config, itp=args.itp)
cgframe = mapping.apply(frame)
cgframe.output(config.output_name + ".gro", format=config.output)
else:
logger.info("Mapping will not be performed")
cgframe = frame
# Only measure bonds from GRO frame if no XTC is provided
# Allows the user to get a topology from a single snapshot
if args.bnd and args.xtc is None:
bonds.apply(cgframe)
# Main loop - perform mapping and measurement on every frame in XTC
def main_loop():
nonlocal cgframe
if not frame.next_frame():
return False
if args.map:
cgframe = mapping.apply(frame, cgframe=cgframe)
if config.output_xtc:
cgframe.write_xtc(config.output_name + ".xtc")
else:
cgframe = frame
if args.bnd:
bonds.apply(cgframe)
return True
numframes = frame.numframes - args.begin if args.end == -1 else args.end - args.begin
logger.info("Beginning analysis of {0} frames".format(numframes))
Progress(numframes, dowhile=main_loop, quiet=args.quiet).run()
if args.bnd:
if args.map:
logger.info("Beginning Boltzmann inversion")
bonds.boltzmann_invert(progress=(not args.quiet))
if config.output_forcefield:
logger.info("Creating GROMACS forcefield directory")
ForceField(config.output_name).write(config.output_name, mapping, bonds)
logger.info("GROMACS forcefield directory created")
else:
bonds.write_itp(config.output_name + ".itp", mapping=mapping)
if config.dump_measurements:
logger.info("Dumping bond measurements to file")
bonds.dump_values(config.dump_n_values)
def map_only(args, config):
"""
Perform AA->CG mapping and output coordinate file.
:param args: Program arguments
:param config: Object containing run options
"""
frame = Frame(gro=args.gro, xtc=args.xtc)
mapping = Mapping(args.map, config)
cgframe = mapping.apply(frame)
cgframe.output(config.output_name + ".gro", format=config.output)
if args.xtc and (config.output_xtc or args.outputxtc):
# Main loop - perform mapping and measurement on every frame in XTC
def main_loop():
nonlocal cgframe
if not frame.next_frame():
return False
cgframe = mapping.apply(frame, cgframe=cgframe)
cgframe.write_xtc(config.output_name + ".xtc")
return True
numframes = frame.numframes - args.begin if args.end == -1 else args.end - args.begin
logger.info("Beginning analysis of {0} frames".format(numframes))
its = Progress(numframes, dowhile=main_loop, quiet=args.quiet).run()
| jag1g13/pycgtool | pycgtool/pycgtool.py | Python | gpl-3.0 | 3,645 | [
"Gromacs"
] | 6d5085d72f4a03dc34de7afa7dd56a415a5225efea19ac12f0e55d587c6b6d89 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
"""
This package contains various command line wrappers to programs used in
pymatgen that do not have Python equivalents.
"""
| xhqu1981/pymatgen | pymatgen/command_line/__init__.py | Python | mit | 277 | [
"pymatgen"
] | d4908798b821104e96efbafea072577a24270dbb2c5b6368ccf3dfd8a1e78663 |
""" JobWrapper test
"""
import unittest
import os
import sys
from DIRAC import gLogger
from DIRAC.Resources.Computing.ComputingElementFactory import ComputingElementFactory
from DIRAC.WorkloadManagementSystem.Utilities.Utils import createJobWrapper
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
class JobWrapperTestCase(unittest.TestCase):
"""Base class for the jobWrapper test cases"""
def setUp(self):
gLogger.setLevel("DEBUG")
self.wrapperFile = None
# get proxy
proxyInfo = getProxyInfo(disableVOMS=True)
proxyChain = proxyInfo["Value"]["chain"]
proxyDumped = proxyChain.dumpAllToString()
self.payloadProxy = proxyDumped["Value"]
def tearDown(self):
pass
class JobWrapperSubmissionCase(JobWrapperTestCase):
"""JobWrapperSubmissionCase represents a test suite for"""
def test_CreateAndSubmit(self):
jobParams = {
"JobID": "1",
"JobType": "Merge",
"CPUTime": "1000000",
"Executable": "dirac-jobexec",
"Arguments": "helloWorld.xml -o LogLevel=DEBUG --cfg pilot.cfg",
"InputSandbox": ["helloWorld.xml", "exe-script.py"],
}
resourceParams = {}
optimizerParams = {}
# res = createJobWrapper( 1, jobParams, resourceParams, optimizerParams, logLevel = 'DEBUG' )
# self.assertTrue( res['OK'] )
# wrapperFile = res['Value']
ceFactory = ComputingElementFactory()
ceInstance = ceFactory.getCE("InProcess")
self.assertTrue(ceInstance["OK"])
computingElement = ceInstance["Value"]
# res = computingElement.submitJob( wrapperFile, self.payloadProxy )
# self.assertTrue( res['OK'] )
if "pilot.cfg" in os.listdir("."):
jobParams.setdefault("ExtraOptions", "pilot.cfg")
res = createJobWrapper(
2, jobParams, resourceParams, optimizerParams, extraOptions="pilot.cfg", logLevel="DEBUG"
)
else:
res = createJobWrapper(2, jobParams, resourceParams, optimizerParams, logLevel="DEBUG")
self.assertTrue(res["OK"], res.get("Message"))
wrapperFile = res["Value"][0]
res = computingElement.submitJob(wrapperFile, self.payloadProxy)
self.assertTrue(res["OK"], res.get("Message"))
if __name__ == "__main__":
suite = unittest.defaultTestLoader.loadTestsFromTestCase(JobWrapperTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(JobWrapperSubmissionCase))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
| DIRACGrid/DIRAC | tests/Integration/WorkloadManagementSystem/Test_JobWrapper.py | Python | gpl-3.0 | 2,686 | [
"DIRAC"
] | 5f8eaa8736ab4c5508e760b6b620fca608a62d6d5b5090723ca52a657c791710 |
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2007-2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Test single point time-dependent logfiles in cclib"""
import numpy
import bettertest
class GenericTDTest(bettertest.TestCase):
"""Generic time-dependent HF/DFT unittest"""
number = 5
expected_l_max = 41000
def testenergies(self):
"""Is the l_max reasonable?"""
self.assertEqual(len(self.data.etenergies), self.number)
# Note that if all oscillator strengths are zero (like for triplets)
# then this will simply pick out the first energy.
idx_lambdamax = [i for i, x in enumerate(self.data.etoscs)
if x==max(self.data.etoscs)][0]
self.assertInside(self.data.etenergies[idx_lambdamax], self.expected_l_max, 5000)
def testoscs(self):
"""Is the maximum of etoscs in the right range?"""
self.assertEqual(len(self.data.etoscs), self.number)
self.assertInside(max(self.data.etoscs), 0.67, 0.1)
def testsecs(self):
"""Is the sum of etsecs close to 1?"""
self.assertEqual(len(self.data.etsecs), self.number)
lowestEtrans = self.data.etsecs[1]
sumofsec = sum([z*z for (x, y, z) in lowestEtrans])
self.assertInside(sumofsec, 1.0, 0.16)
def testsecs_transition(self):
"""Is the lowest E transition from the HOMO or to the LUMO?"""
idx_minenergy = [i for i, x in enumerate(self.data.etenergies)
if x==min(self.data.etenergies)][0]
sec = self.data.etsecs[idx_minenergy]
t = [(c*c, s, e) for (s, e, c) in sec]
t.sort()
t.reverse()
self.assert_(t[0][1][0]==self.data.homos[0] or
t[0][2][0]==self.data.homos[0]+1, t[0])
def testsymsnumber(self):
"""Is the length of etsyms correct?"""
self.assertEqual(len(self.data.etsyms), self.number)
class ADFTDDFTTest(GenericTDTest):
"""Customized time-dependent DFT unittest"""
number = 5
def testsecs(self):
"""Is the sum of etsecs close to 1?"""
self.assertEqual(len(self.data.etsecs), self.number)
lowestEtrans = self.data.etsecs[1]
#ADF squares the etsecs
sumofsec = sum([z for (x, y, z) in lowestEtrans])
self.assertInside(sumofsec, 1.0, 0.16)
class GaussianTDDFTTest(GenericTDTest):
"""Customized time-dependent HF/DFT unittest"""
expected_l_max = 48000
def testrotatsnumber(self):
"""Is the length of etrotats correct?"""
self.assertEqual(len(self.data.etrotats), self.number)
class GAMESSUSTDDFTTest(GenericTDTest):
"""Customized time-dependent HF/DFT unittest"""
number = 10
class JaguarTDDFTTest(GenericTDTest):
"""Customized time-dependent HF/DFT unittest"""
expected_l_max = 48000
def testoscs(self):
"""Is the maximum of etoscs in the right range?"""
self.assertEqual(len(self.data.etoscs), self.number)
self.assertInside(max(self.data.etoscs), 1.0, 0.2)
class OrcaTDDFTTest(GenericTDTest):
"""Customized time-dependent HF/DFT unittest"""
number = 10
expected_l_max = 48000
def testoscs(self):
"""Is the maximum of etoscs in the right range?"""
self.assertEqual(len(self.data.etoscs), self.number)
self.assertInside(max(self.data.etoscs), 1.0, 0.1)
class GenericTDDFTtrpTest(GenericTDTest):
"""Generic time-dependent HF/DFT (triplet) unittest"""
number = 5
expected_l_max = 24500
def testoscs(self):
"""Triplet excitations should be disallowed."""
self.assertEqual(len(self.data.etoscs), self.number)
self.assertInside(max(self.data.etoscs), 0.0, 0.01)
if __name__=="__main__":
from testall import testall
testall(modules=["TD"])
| Clyde-fare/cclib | test/testTD.py | Python | lgpl-2.1 | 4,216 | [
"ADF",
"cclib"
] | 14eb62d612c39700d24dd7e8df0c5cc1a65b9c03872285aa914621d9526ddb5e |
from __future__ import print_function
from builtins import range
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
import random
import os
def javapredict_dynamic_data():
# Generate random dataset
dataset_params = {}
dataset_params['rows'] = random.sample(list(range(5000,15001)),1)[0]
dataset_params['cols'] = random.sample(list(range(10,21)),1)[0]
dataset_params['categorical_fraction'] = round(random.random(),1)
left_over = (1 - dataset_params['categorical_fraction'])
dataset_params['integer_fraction'] = round(left_over - round(random.uniform(0,left_over),1),1)
if dataset_params['integer_fraction'] + dataset_params['categorical_fraction'] == 1:
if dataset_params['integer_fraction'] > dataset_params['categorical_fraction']:
dataset_params['integer_fraction'] = dataset_params['integer_fraction'] - 0.1
else:
dataset_params['categorical_fraction'] = dataset_params['categorical_fraction'] - 0.1
dataset_params['categorical_fraction'] = dataset_params['categorical_fraction'] - 0.1
dataset_params['missing_fraction'] = random.uniform(0,0.5)
dataset_params['has_response'] = True
dataset_params['randomize'] = True
dataset_params['factors'] = random.randint(2,10)
print("Dataset parameters: {0}".format(dataset_params))
append_response = False
family = random.sample(['binomial','gaussian','poisson','tweedie','gamma'], 1)[0]
if family == 'binomial': dataset_params['response_factors'] = 2
elif family == 'gaussian': dataset_params['response_factors'] = 1
else:
dataset_params['has_response'] = False
response = h2o.H2OFrame([[random.randint(1,1000)] for r in range(0,dataset_params['rows'])])
append_response = True
print("Family: {0}".format(family))
train = h2o.create_frame(**dataset_params)
if append_response:
response.set_name(0,"response")
train = response.cbind(train)
if family == 'binomial': train['response'] = train['response'].asfactor()
results_dir = pyunit_utils.locate("results")
h2o.download_csv(train["response"],os.path.join(results_dir,"glm_dynamic_preimputed_response.log"))
train.impute("response", method="mode")
print("Training dataset:")
print(train)
# Save dataset to results directory
h2o.download_csv(train,os.path.join(results_dir,"glm_dynamic_training_dataset.log"))
# Generate random parameters
params = {}
if random.randint(0,1): params['alpha'] = random.random()
params['family'] = family
if params['family'] == "tweedie":
if random.randint(0,1):
params['tweedie_variance_power'] = round(random.random()+1,6)
params['tweedie_link_power'] = 1 - params['tweedie_variance_power']
print("Parameter list: {0}".format(params))
x = list(range(1,train.ncol))
y = "response"
pyunit_utils.javapredict(algo="glm", equality=None, train=train, test=None, x=x, y=y, compile_only=True, **params)
if __name__ == "__main__":
pyunit_utils.standalone_test(javapredict_dynamic_data)
else:
javapredict_dynamic_data()
| YzPaul3/h2o-3 | h2o-py/tests/testdir_javapredict/pyunit_javapredict_dynamic_data_paramsGLM.py | Python | apache-2.0 | 3,164 | [
"Gaussian"
] | ca39333cefafad5e2b5d88c238e9685702dad64f793a21b404f1fcf3adeb7974 |
'''
Created on Mar 23, 2015
@author: venkatesan
'''
#import sys
#sys.path.append("/home/venkatesan/Downloads/biopython-1.65/Bio")
from Bio import SwissProt
from riceKB.globalVars import *
import pprint
import re
import os
from _collections import defaultdict
def upToRDF(up_files, rdf_out_dir): #, output_file
rdf_file = "uniport.plants.ttl"
output_file = os.path.join(rdf_out_dir, rdf_file)
output_writer = open(output_file, "w")
rdf_buffer = ''
prot_counter = 0
pp = pprint.PrettyPrinter(indent=4)
up_base_uri = "http://purl.uniprot.org/"
# up_base_ns = "uniprot_base:"
print "************* Converting Uniprot data to RDF ***************\n"
output_writer.write(base + "\t" + "<" + base_uri + "> .\n")
output_writer.write(pr + "\t" + rdf_ns + "<" + rdf + "> .\n")
output_writer.write(pr + "\t" + rdfs_ns + "<" + rdfs + "> .\n")
output_writer.write(pr + "\t" + owl_ns + "<" + owl + "> .\n")
# output_writer.write(pr + "\t" + xsd_ns + "<" + xsd + "> .\n")
output_writer.write(pr + "\t" + base_vocab_ns + "<" + base_vocab_uri + "> .\n")
output_writer.write(pr + "\t" + obo_ns + "<" + obo_uri + "> .\n")
output_writer.write(pr + "\t" + sio_ns + "<" + sio_uri + "> .\n")
# output_writer.write(pr + "\t" + ncbi_tax_ns + "<" + ncbi_tax_uri + "> .\n")
# output_writer.write(pr + "\t" + up_base_ns + "<" + up_base_uri + "> .\n")
output_writer.write(pr + "\t" + up_ns + "<" + uniprot + "> .\n\n")
for upfile in up_files:
file_handle = open(upfile, "r")
up_records = SwissProt.parse(file_handle)
# xrefs = defaultdict(list)
# xref_ids = list()
for record in up_records:
xrefs = defaultdict(list)
rdf_buffer = ''
for taxID in record.taxonomy_id:
if taxID in taxon_ids:
# rdf_buffer = ''
# Accession
if len(record.accessions) > 1:
prim_accession = record.accessions.pop(0)
prot_counter += 1
rdf_buffer += up_ns + prim_accession + "\n" #output_writer.write(up_ns + prim_accession + "\n")
rdf_buffer += "\t" + rdf_ns + "type" + "\t" + owl_ns + "Class" + " ;\n" #output_writer.write("\t" + rdf_ns + "type" + "\t" + base_vocab_ns + "Protein" + " ;\n")
rdf_buffer += "\t" + rdfs_ns + "subClassOf" + "\t" + obo_ns + protein_term + " ;\n" #output_writer.write("\t" + rdfs_ns + "subClassOf" + "\t" + obo_ns + protein_term + " ;\n")
for altID in record.accessions:
rdf_buffer += "\t" + base_vocab_ns + "has_alternative_id" + "\t" + up_ns + altID + " ;\n" #output_writer.write("\t" + rdfs_ns + "subClassOf" + "\t" + obo_ns + protein_term + " ;\n")
else:
prim_accession = record.accessions[0]
prot_counter += 1
rdf_buffer += up_ns + prim_accession + "\n"
rdf_buffer += "\t" + rdf_ns + "type" + "\t" + owl_ns + "Class" + " ;\n"
rdf_buffer += "\t" + rdfs_ns + "subClassOf" + "\t" + obo_ns + protein_term + " ;\n"
# Label
print record.entry_name
rdf_buffer += "\t" + rdfs_ns + "label" + "\t" + '"%s"' % (record.entry_name) + " ;\n"
# Description
if record.description:
descriptions = record.description.split(';')
description = descriptions[0][14:]#.lstrip('RecName: Full=')
rdf_buffer += "\t" + base_vocab_ns + "description" + "\t" + '"%s"' % (description) + " ;\n"
# print description
# Gene Name
# print record.gene_name
if record.gene_name:
raw_strings = record.gene_name.split(';')
# print raw_strings
string_name = raw_strings[0]
gene_names = string_name.split('=')#record.gene_name.lstrip('Name=')
search_pattern = re.search("\s{", gene_names[1])
if search_pattern:
names = re.split("\s{", gene_names[1])
symbol = names[0]
rdf_buffer += "\t" + base_vocab_ns + "has_symbol" + "\t" + '"%s"' % (symbol) + " ;\n" #gene_names[0].lstrip('Name=')
else:
symbol = gene_names[1]
rdf_buffer += "\t" + base_vocab_ns + "has_symbol" + "\t" + '"%s"' % (symbol) + " ;\n"
# print symbol #gene_names[0].lstrip('Name=')
# for name in gene_names:
# name = name.lstrip(' ')
# print name
# Taxon
rdf_buffer += "\t" + base_vocab_ns + "taxon" + "\t" + obo_ns + "NCBITaxon_" + taxID + " ;\n"
# taxID
# Comments
if record.comments:
raw_comment = ''.join(record.comments)
comment = raw_comment.replace('"', '')
rdf_buffer += "\t" + base_vocab_ns + "comment" + "\t" + '"%s"' % (comment) + " ;\n"
# print (comment)
# Keywords
# print record.keywords
if record.keywords:
for keyword in record.keywords:
# print keyword
rdf_buffer += "\t" + base_vocab_ns + "classified_with" + "\t" + '"%s"' % (keyword) + " ;\n"
# Cross References
# pp.pprint(record.cross_references[0])
for dbs in record.cross_references:
dbname = dbs[0]
ids = dbs[1]
xrefs[dbname].append(ids)
for key in xrefs:
if key != "GO":
db_namespace = key.lower()
for dbid in xrefs[key]:
rdf_buffer += "\t" + base_vocab_ns + "has_dbxref" + "\t" + "<" + up_base_uri + db_namespace + "/" + dbid + ">" + " ;\n"
# Corss references using blank node
# for key in xrefs:
# rdf_buffer += "\t" + base_vocab_ns + "has_dbxref" + "\t" + "[" + "\n"
# rdf_buffer += "\t" + "\t" + base_vocab_ns + "dbname" + "\t" + '"%s"' % (key) + " ;\n" #"[" +
# for dbid in xrefs[key]:
# rdf_buffer += "\t" + "\t" + base_vocab_ns + "id" + "\t" + '"%s"' % (dbid) + " ;\n"
# rdf_buffer = re.sub(' ;$', '', rdf_buffer)
# rdf_buffer += "\t" + "\t" + "]" + " ;\n"
rdf_buffer = re.sub(' ;$', ' .', rdf_buffer)
output_writer.write(rdf_buffer)
file_handle.close()
output_writer.close()
print "Number of Proteins: %s\n" % (str(prot_counter))
print "*************** UniProt RDF conversion completed ************\n"
# pp.pprint(record.cross_references) #taxonomy_id cross_references comments description keywords gene_name molecule_type
| aravindvenkatesan/AgroLD-scripts | AgroLD_ETL/riceKB/uniprotToRDF.py | Python | cc0-1.0 | 7,725 | [
"Biopython"
] | d8c14428e48035ef70a928a1c790ae8ea0ea4818367bdfdc635a34d1eea0baa1 |
import json
import colorsys
from random import random
from string import upper
from django.http import HttpResponse
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.models import User
def access_check(user):
""" Returns true if users are logged in or if they have the general
can_browse permission assigned (i.e. not with respect to a certain object).
This is used to also allow the not logged in anonymous user to retrieve
data if it is granted the 'can_browse' permission.
"""
return user.is_authenticated() or user.has_perm('catmaid.can_browse')
@user_passes_test(access_check)
def user_list(request):
result = []
for u in User.objects.all().order_by('last_name', 'first_name'):
up = u.userprofile
result.append({
"id": u.id,
"login": u.username,
"full_name": u.get_full_name(),
"first_name": u.first_name,
"last_name": u.last_name,
"color": (up.color.r, up.color.g, up.color.b) })
return HttpResponse(json.dumps(result), content_type='text/json')
@user_passes_test(access_check)
def user_list_datatable(request):
display_start = int(request.POST.get('iDisplayStart', 0))
display_length = int(request.POST.get('iDisplayLength', -1))
if display_length < 0:
display_length = 2000 # Default number of result rows
should_sort = request.POST.get('iSortCol_0', False)
user_query = User.objects.all()
# By default, there is no need to explicitly request a distinct result
distinct = False
# This field can be used to only return users that have used a certain
# annotation.
annotations = [v for k,v in request.POST.iteritems()
if k.startswith('annotations[')]
for annotation in annotations:
user_query = user_query.filter(
classinstanceclassinstance__relation__relation_name = \
'annotated_with',
classinstanceclassinstance__class_instance_b__name = \
annotation)
# Make sure we only get distinct user names
distinct = True
# The neuron_id field can be used to constrain the result by only showing
# users that annotated a certain neuron.
neuron_annotated = request.POST.get('neuron_id', None)
if neuron_annotated:
user_query = user_query.filter(
classinstanceclassinstance__relation__relation_name = \
'annotated_with',
classinstanceclassinstance__class_instance_a__id = \
neuron_annotated)
# Make sure we only get distinct user names
distinct = True
if distinct:
user_query = user_query.distinct()
if should_sort:
column_count = int(request.POST.get('iSortingCols', 0))
sorting_directions = [request.POST.get('sSortDir_%d' % d, 'DESC')
for d in range(column_count)]
sorting_directions = map(lambda d: '-' if upper(d) == 'DESC' else '',
sorting_directions)
fields = ['username', 'first_name', 'last_name']
sorting_index = [int(request.POST.get('iSortCol_%d' % d))
for d in range(column_count)]
sorting_cols = map(lambda i: fields[i], sorting_index)
user_query = user_query.extra(order_by=[di + col for (di, col) in zip(
sorting_directions, sorting_cols)])
num_records = len(user_query)
result = list(user_query[display_start:display_start + display_length])
response = {
'iTotalRecords': num_records,
'iTotalDisplayRecords': num_records,
'aaData': []
}
for user in result:
response['aaData'] += [[
user.username,
user.first_name,
user.last_name,
user.id,
]]
return HttpResponse(json.dumps(response), content_type='text/json')
initial_colors = ((1, 0, 0, 1),
(0, 1, 0, 1),
(0, 0, 1, 1),
(1, 0, 1, 1),
(0, 1, 1, 1),
(1, 1, 0, 1),
(1, 1, 1, 1),
(1, 0.5, 0, 1),
(1, 0, 0.5, 1),
(0.5, 1, 0, 1),
(0, 1, 0.5, 1),
(0.5, 0, 1, 1),
(0, 0.5, 1, 1))
def distinct_user_color():
""" Returns a color for a new user. If there are less users registered than
entries in the initial_colors list, the next free color is used. Otherwise,
a random color is generated.
"""
nr_users = User.objects.exclude(id__exact=-1).count()
if nr_users < len(initial_colors):
distinct_color = initial_colors[nr_users]
else:
distinct_color = colorsys.hsv_to_rgb(random(), random(), 1.0) + (1,)
return distinct_color
@user_passes_test(access_check)
def update_user_profile(request):
""" Allows users to update some of their user settings, e.g. whether
reference lines should be visible. If the request is done by the anonymous
user, nothing is updated, but no error is raised.
"""
# Ignore anonymous user
if not request.user.is_authenticated() or request.user.is_anonymous():
return HttpResponse(json.dumps({'success': "The user profile of the " +
"anonymous user won't be updated"}), content_type='text/json')
for var in [{'name': 'display_stack_reference_lines', 'parse': json.loads},
{'name': 'tracing_overlay_screen_scaling', 'parse': json.loads},
{'name': 'tracing_overlay_scale', 'parse': float},
{'name': 'prefer_webgl_layers', 'parse': json.loads}]:
request_var = request.POST.get(var['name'], None)
if request_var:
request_var = var['parse'](request_var)
# Set new user profile values
setattr(request.user.userprofile, var['name'], request_var)
# Save user profile
request.user.userprofile.save()
return HttpResponse(json.dumps({'success': 'Updated user profile'}),
content_type='text/json')
| fzadow/CATMAID | django/applications/catmaid/control/user.py | Python | agpl-3.0 | 6,114 | [
"NEURON"
] | d0c06d6a44c4a7eb94613c3a337a5b90dcc694dd4ed863865155dce9d9ba96b9 |
'''
This script gets barcode data from a barcode scanner using serial communication
and sends the state representated by the barcode scanner & the barcode string
to the Galaxy LIMS RabbitMQ server. The message is sent in XML which has 2 tags,
barcode & state. The state of the scanner should be set in the galaxy_amq.ini
file as a configuration variable.
'''
from amqplib import client_0_8 as amqp
import ConfigParser
import sys, os
import serial
import array
import time
import optparse
xml = \
''' <sample>
<barcode>%(BARCODE)s</barcode>
<state>%(STATE)s</state>
</sample>'''
def handle_scan(states, amqp_config, barcode):
if states.get(barcode[:2], None):
values = dict( BARCODE=barcode[2:],
STATE=states.get(barcode[:2]) )
print values
data = xml % values
print data
conn = amqp.Connection(host=amqp_config['host']+":"+amqp_config['port'],
userid=amqp_config['userid'],
password=amqp_config['password'],
virtual_host=amqp_config['virtual_host'],
insist=False)
chan = conn.channel()
msg = amqp.Message(data)
msg.properties["delivery_mode"] = 2
chan.basic_publish(msg,
exchange=amqp_config['exchange'],
routing_key=amqp_config['routing_key'])
chan.close()
conn.close()
def recv_data(states, amqp_config, s):
while True:
bytes = s.inWaiting()
if bytes:
print '%i bytes recvd' % bytes
msg = s.read(bytes)
print msg
handle_scan(states, amqp_config, msg.strip())
def main():
parser = optparse.OptionParser()
parser.add_option('-c', '--config-file', help='config file with all the AMQP config parameters',
dest='config_file', action='store')
parser.add_option('-p', '--port', help='Name of the port where the scanner is connected',
dest='port', action='store')
(opts, args) = parser.parse_args()
config = ConfigParser.ConfigParser()
config.read(opts.config_file)
amqp_config = {}
states = {}
for option in config.options("galaxy:amqp"):
amqp_config[option] = config.get("galaxy:amqp", option)
count = 1
while True:
section = 'scanner%i' % count
if config.has_section(section):
states[config.get(section, 'prefix')] = config.get(section, 'state')
count = count + 1
else:
break
print amqp_config
print states
s = serial.Serial(int(opts.port))
print 'Port %s is open: %s' %( opts.port, s.isOpen())
recv_data(states, amqp_config, s)
s.close()
print 'Port %s is open: %s' %( opts.port, s.isOpen())
if __name__ == '__main__':
main()
| volpino/Yeps-EURAC | scripts/galaxy_messaging/client/amqp_publisher.py | Python | mit | 2,944 | [
"Galaxy"
] | 7e7d92cf093a73a358dab1d49ba0f07754ffb77626cade0f7a68d27255b31f89 |
# -*- coding: utf-8 -*-
"""
Unit tests for instructor.api methods.
"""
import datetime
import ddt
import functools
import random
import pytz
import io
import json
import shutil
import tempfile
from urllib import quote
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.http import HttpRequest, HttpResponse
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from django.utils.timezone import utc
from django.utils.translation import ugettext as _
from mock import Mock, patch
from nose.tools import raises
from nose.plugins.attrib import attr
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import UsageKey
from course_modes.models import CourseMode
from courseware.models import StudentModule
from courseware.tests.factories import StaffFactory, InstructorFactory, BetaTesterFactory, UserProfileFactory
from courseware.tests.helpers import LoginEnrollmentTestCase
from django_comment_common.models import FORUM_ROLE_COMMUNITY_TA
from django_comment_common.utils import seed_permissions_roles
from microsite_configuration import microsite
from shoppingcart.models import (
RegistrationCodeRedemption, Order, CouponRedemption,
PaidCourseRegistration, Coupon, Invoice, CourseRegistrationCode, CourseRegistrationCodeInvoiceItem,
InvoiceTransaction)
from shoppingcart.pdf import PDFInvoice
from student.models import (
CourseEnrollment, CourseEnrollmentAllowed, NonExistentCourseError,
ManualEnrollmentAudit, UNENROLLED_TO_ENROLLED, ENROLLED_TO_UNENROLLED,
ALLOWEDTOENROLL_TO_UNENROLLED, ENROLLED_TO_ENROLLED, UNENROLLED_TO_ALLOWEDTOENROLL,
UNENROLLED_TO_UNENROLLED, ALLOWEDTOENROLL_TO_ENROLLED
)
from student.tests.factories import UserFactory, CourseModeFactory, AdminFactory
from student.roles import CourseBetaTesterRole, CourseSalesAdminRole, CourseFinanceAdminRole, CourseInstructorRole
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.fields import Date
from courseware.models import StudentFieldOverride
import instructor_task.api
import instructor.views.api
from instructor.views.api import require_finance_admin
from instructor.tests.utils import FakeContentTask, FakeEmail, FakeEmailInfo
from instructor.views.api import _split_input_list, common_exceptions_400, generate_unique_password
from instructor_task.api_helper import AlreadyRunningError
from certificates.tests.factories import GeneratedCertificateFactory
from certificates.models import CertificateStatuses
from openedx.core.djangoapps.course_groups.cohorts import set_course_cohort_settings
from .test_tools import msk_from_problem_urlname
DATE_FIELD = Date()
EXPECTED_CSV_HEADER = (
'"code","redeem_code_url","course_id","company_name","created_by","redeemed_by","invoice_id","purchaser",'
'"customer_reference_number","internal_reference"'
)
EXPECTED_COUPON_CSV_HEADER = '"Coupon Code","Course Id","% Discount","Description","Expiration Date",' \
'"Is Active","Code Redeemed Count","Total Discounted Seats","Total Discounted Amount"'
# ddt data for test cases involving reports
REPORTS_DATA = (
{
'report_type': 'grade',
'instructor_api_endpoint': 'calculate_grades_csv',
'task_api_endpoint': 'instructor_task.api.submit_calculate_grades_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrolled learner profile',
'instructor_api_endpoint': 'get_students_features',
'task_api_endpoint': 'instructor_task.api.submit_calculate_students_features_csv',
'extra_instructor_api_kwargs': {'csv': '/csv'}
},
{
'report_type': 'detailed enrollment',
'instructor_api_endpoint': 'get_enrollment_report',
'task_api_endpoint': 'instructor_task.api.submit_detailed_enrollment_features_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrollment',
'instructor_api_endpoint': 'get_students_who_may_enroll',
'task_api_endpoint': 'instructor_task.api.submit_calculate_may_enroll_csv',
'extra_instructor_api_kwargs': {},
},
{
'report_type': 'proctored exam results',
'instructor_api_endpoint': 'get_proctored_exam_results',
'task_api_endpoint': 'instructor_task.api.submit_proctored_exam_results_report',
'extra_instructor_api_kwargs': {},
},
{
'report_type': 'problem responses',
'instructor_api_endpoint': 'get_problem_responses',
'task_api_endpoint': 'instructor_task.api.submit_calculate_problem_responses_csv',
'extra_instructor_api_kwargs': {},
}
)
# ddt data for test cases involving executive summary report
EXECUTIVE_SUMMARY_DATA = (
{
'report_type': 'executive summary',
'instructor_api_endpoint': 'get_exec_summary_report',
'task_api_endpoint': 'instructor_task.api.submit_executive_summary_report',
'extra_instructor_api_kwargs': {}
},
)
@common_exceptions_400
def view_success(request): # pylint: disable=unused-argument
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request): # pylint: disable=unused-argument
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request): # pylint: disable=unused-argument
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
@attr('shard_1')
class TestCommonExceptions400(TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
super(TestCommonExceptions400, self).setUp()
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
self.assertEqual(resp.status_code, 200)
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("User does not exist", result["error"])
def test_alreadyrunningerror(self):
self.request.is_ajax.return_value = False
resp = view_alreadyrunningerror(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("Task is already running", resp.content)
def test_alreadyrunningerror_ajax(self):
self.request.is_ajax.return_value = True
resp = view_alreadyrunningerror(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("Task is already running", result["error"])
@attr('shard_1')
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message', autospec=True))
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorAPIDenyLevels(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIDenyLevels, cls).setUpClass()
cls.course = CourseFactory.create()
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = cls.problem_location.to_deprecated_string()
def setUp(self):
super(TestInstructorAPIDenyLevels, self).setUp()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
_module = StudentModule.objects.create(
student=self.user,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
# Endpoints that only Staff or Instructors can access
self.staff_level_endpoints = [
('students_update_enrollment',
{'identifiers': 'foo@example.org', 'action': 'enroll'}),
('get_grading_config', {}),
('get_students_features', {}),
('get_student_progress_url', {'unique_student_identifier': self.user.username}),
('reset_student_attempts',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
('update_forum_role_membership',
{'unique_student_identifier': self.user.email, 'rolename': 'Moderator', 'action': 'allow'}),
('list_forum_members', {'rolename': FORUM_ROLE_COMMUNITY_TA}),
('send_email', {'send_to': 'staff', 'subject': 'test', 'message': 'asdf'}),
('list_instructor_tasks', {}),
('list_background_email_tasks', {}),
('list_report_downloads', {}),
('list_financial_report_downloads', {}),
('calculate_grades_csv', {}),
('get_students_features', {}),
('get_enrollment_report', {}),
('get_students_who_may_enroll', {}),
('get_exec_summary_report', {}),
('get_proctored_exam_results', {}),
('get_problem_responses', {}),
]
# Endpoints that only Instructors can access
self.instructor_level_endpoints = [
('bulk_beta_modify_access', {'identifiers': 'foo@example.org', 'action': 'add'}),
('modify_access', {'unique_student_identifier': self.user.email, 'rolename': 'beta', 'action': 'allow'}),
('list_course_role_members', {'rolename': 'beta'}),
('rescore_problem',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
]
def _access_endpoint(self, endpoint, args, status_code, msg):
"""
Asserts that accessing the given `endpoint` gets a response of `status_code`.
endpoint: string, endpoint for instructor dash API
args: dict, kwargs for `reverse` call
status_code: expected HTTP status code response
msg: message to display if assertion fails.
"""
url = reverse(endpoint, kwargs={'course_id': self.course.id.to_deprecated_string()})
if endpoint in ['send_email', 'students_update_enrollment', 'bulk_beta_modify_access']:
response = self.client.post(url, args)
else:
response = self.client.get(url, args)
self.assertEqual(
response.status_code,
status_code,
msg=msg
)
def test_student_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
self.client.login(username=self.user.username, password='test')
for endpoint, args in self.staff_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
def _access_problem_responses_endpoint(self, msg):
"""
Access endpoint for problem responses report, ensuring that
UsageKey.from_string returns a problem key that the endpoint
can work with.
msg: message to display if assertion fails.
"""
mock_problem_key = Mock(return_value=u'')
mock_problem_key.course_key = self.course.id
with patch.object(UsageKey, 'from_string') as patched_method:
patched_method.return_value = mock_problem_key
self._access_endpoint('get_problem_responses', {}, 200, msg)
def test_staff_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
staff_member = StaffFactory(course_key=self.course.id)
CourseEnrollment.enroll(staff_member, self.course.id)
CourseFinanceAdminRole(self.course.id).add_users(staff_member)
self.client.login(username=staff_member.username, password='test')
# Try to promote to forums admin - not working
# update_forum_role(self.course.id, staff_member, FORUM_ROLE_ADMINISTRATOR, 'allow')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'list_forum_members']:
continue
elif endpoint == 'get_problem_responses':
self._access_problem_responses_endpoint(
"Staff member should be allowed to access endpoint " + endpoint
)
continue
self._access_endpoint(
endpoint,
args,
200,
"Staff member should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Staff member should not be allowed to access endpoint " + endpoint
)
def test_instructor_level(self):
"""
Ensure that an instructor member can access all endpoints.
"""
inst = InstructorFactory(course_key=self.course.id)
CourseEnrollment.enroll(inst, self.course.id)
CourseFinanceAdminRole(self.course.id).add_users(inst)
self.client.login(username=inst.username, password='test')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership']:
continue
elif endpoint == 'get_problem_responses':
self._access_problem_responses_endpoint(
"Instructor should be allowed to access endpoint " + endpoint
)
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
# TODO: make this work
if endpoint in ['rescore_problem']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
@attr('shard_1')
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
class TestInstructorAPIBulkAccountCreationAndEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test Bulk account creation and enrollment from csv file
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIBulkAccountCreationAndEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
# Create a course with mode 'audit'
cls.audit_course = CourseFactory.create()
CourseModeFactory(course_id=cls.audit_course.id, mode_slug=CourseMode.AUDIT)
cls.url = reverse(
'register_and_enroll_students', kwargs={'course_id': unicode(cls.course.id)}
)
cls.audit_course_url = reverse(
'register_and_enroll_students', kwargs={'course_id': unicode(cls.audit_course.id)}
)
def setUp(self):
super(TestInstructorAPIBulkAccountCreationAndEnrollment, self).setUp()
# Create a course with mode 'honor' and with price
self.white_label_course = CourseFactory.create()
self.white_label_course_mode = CourseModeFactory(
course_id=self.white_label_course.id,
mode_slug=CourseMode.HONOR,
min_price=10,
suggested_prices='10',
)
self.white_label_course_url = reverse(
'register_and_enroll_students', kwargs={'course_id': unicode(self.white_label_course.id)}
)
self.request = RequestFactory().request()
self.instructor = InstructorFactory(course_key=self.course.id)
self.audit_course_instructor = InstructorFactory(course_key=self.audit_course.id)
self.white_label_course_instructor = InstructorFactory(course_key=self.white_label_course.id)
self.client.login(username=self.instructor.username, password='test')
self.not_enrolled_student = UserFactory(
username='NotEnrolledStudent',
email='nonenrolled@test.com',
first_name='NotEnrolled',
last_name='Student'
)
@patch('instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv_with_blank_lines(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = "\ntest_student@example.com,test_student_1,tester1,USA\n\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('instructor.views.api.log.info')
def test_email_and_username_already_exist(self, info_log):
"""
If the email address and username already exists
and the user is enrolled in the course, do nothing (including no email gets sent out)
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA\n" \
"test_student@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with(
u"user already exists with username '%s' and email '%s'",
'test_student_1',
'test_student@example.com'
)
def test_file_upload_type_not_csv(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.jpg", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'Make sure that the file you upload is in CSV format with no extraneous characters or rows.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_bad_file_upload_type(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.csv", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'Could not read uploaded file.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_insufficient_data(self):
"""
Try uploading a CSV file which does not have the exact four columns of data
"""
csv_content = "test_student@example.com,test_student_1\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 1)
self.assertEquals(data['general_errors'][0]['response'], 'Data in row #1 must have exactly four columns: email, username, full name, and country')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_invalid_email_in_csv(self):
"""
Test failure case of a poorly formatted email field
"""
csv_content = "test_student.example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
data = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Invalid email {0}.'.format('test_student.example.com'))
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
@patch('instructor.views.api.log.info')
def test_csv_user_exist_and_not_enrolled(self, info_log):
"""
If the email address and username already exists
and the user is not enrolled in the course, enrolled him/her and iterate to next one.
"""
csv_content = "nonenrolled@test.com,NotEnrolledStudent,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
info_log.assert_called_with(
u'user %s enrolled in the course %s',
u'NotEnrolledStudent',
self.course.id
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertTrue(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
def test_user_with_already_existing_email_in_csv(self):
"""
If the email address already exists, but the username is different,
assume it is the correct user and just register the user in the course.
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA\n" \
"test_student@example.com,test_student_2,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
warning_message = 'An account with email {email} exists but the provided username {username} ' \
'is different. Enrolling anyway with {email}.'.format(email='test_student@example.com', username='test_student_2')
self.assertNotEquals(len(data['warnings']), 0)
self.assertEquals(data['warnings'][0]['response'], warning_message)
user = User.objects.get(email='test_student@example.com')
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertTrue(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
def test_user_with_already_existing_username_in_csv(self):
"""
If the username already exists (but not the email),
assume it is a different user and fail to create the new account.
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Username {user} already exists.'.format(user='test_student_1'))
def test_csv_file_not_attached(self):
"""
Test when the user does not attach a file
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'file_not_found': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'File is not attached.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_raising_exception_in_auto_registration_and_enrollment_case(self):
"""
Test that exceptions are handled well
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
with patch('instructor.views.api.create_manual_course_enrollment') as mock:
mock.side_effect = NonExistentCourseError()
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'NonExistentCourseError')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_generate_unique_password(self):
"""
generate_unique_password should generate a unique password string that excludes certain characters.
"""
password = generate_unique_password([], 12)
self.assertEquals(len(password), 12)
for letter in password:
self.assertNotIn(letter, 'aAeEiIoOuU1l')
def test_users_created_and_enrolled_successfully_if_others_fail(self):
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student3@example.com,test_student_1,tester3,CA\n" \
"test_student2@example.com,test_student_2,tester2,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Username {user} already exists.'.format(user='test_student_1'))
self.assertTrue(User.objects.filter(username='test_student_1', email='test_student1@example.com').exists())
self.assertTrue(User.objects.filter(username='test_student_2', email='test_student2@example.com').exists())
self.assertFalse(User.objects.filter(email='test_student3@example.com').exists())
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 2)
@patch.object(instructor.views.api, 'generate_random_string',
Mock(side_effect=['first', 'first', 'second']))
def test_generate_unique_password_no_reuse(self):
"""
generate_unique_password should generate a unique password string that hasn't been generated before.
"""
generated_password = ['first']
password = generate_unique_password(generated_password, 12)
self.assertNotEquals(password, 'first')
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': False})
def test_allow_automated_signups_flag_not_set(self):
csv_content = "test_student1@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEquals(response.status_code, 403)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
def test_audit_enrollment_mode(self):
"""
Test that enrollment mode for audit courses (paid courses) is 'audit'.
"""
# Login Audit Course instructor
self.client.login(username=self.audit_course_instructor.username, password='test')
csv_content = "test_student_wl@example.com,test_student_wl,Test Student,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.audit_course_url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# Verify enrollment modes to be 'audit'
for enrollment in manual_enrollments:
self.assertEqual(enrollment.enrollment.mode, CourseMode.AUDIT)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
def test_honor_enrollment_mode(self):
"""
Test that enrollment mode for unpaid honor courses is 'honor'.
"""
# Remove white label course price
self.white_label_course_mode.min_price = 0
self.white_label_course_mode.suggested_prices = ''
self.white_label_course_mode.save() # pylint: disable=no-member
# Login Audit Course instructor
self.client.login(username=self.white_label_course_instructor.username, password='test')
csv_content = "test_student_wl@example.com,test_student_wl,Test Student,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.white_label_course_url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# Verify enrollment modes to be 'honor'
for enrollment in manual_enrollments:
self.assertEqual(enrollment.enrollment.mode, CourseMode.HONOR)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
def test_default_shopping_cart_enrollment_mode_for_white_label(self):
"""
Test that enrollment mode for white label courses (paid courses) is DEFAULT_SHOPPINGCART_MODE_SLUG.
"""
# Login white label course instructor
self.client.login(username=self.white_label_course_instructor.username, password='test')
csv_content = "test_student_wl@example.com,test_student_wl,Test Student,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.white_label_course_url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# Verify enrollment modes to be CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG
for enrollment in manual_enrollments:
self.assertEqual(enrollment.enrollment.mode, CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
@attr('shard_1')
@ddt.ddt
class TestInstructorAPIEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
# Email URL values
cls.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
cls.about_path = '/courses/{}/about'.format(cls.course.id)
cls.course_path = '/courses/{}/'.format(cls.course.id)
def setUp(self):
super(TestInstructorAPIEnrollment, self).setUp()
self.request = RequestFactory().request()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled',
last_name='Student')
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='robot-allowed@robot.org', course_id=self.course.id)
cea.save()
self.allowed_email = 'robot-allowed@robot.org'
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': action})
self.assertEqual(response.status_code, 400)
def test_invalid_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': 'percivaloctavius@', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius@',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_invalid_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': 'percivaloctavius', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_with_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'enroll',
'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": self.notenrolled_student.username,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll',
'email_students': False})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
@ddt.data('http', 'https')
def test_enroll_with_email(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been enrolled in {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear NotEnrolled Student\n\nYou have been enrolled in {} "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to NotEnrolled Student".format(
self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the "
"registration form making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, "
"visit {proto}://{site}{about_path} to join the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name, proto=protocol, site=self.site_name, about_path=self.about_path
)
)
@ddt.data('http', 'https')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_mktgsite(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"You can then enroll in {display_name}.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name, proto=protocol, site=self.site_name
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered_autoenroll(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account,"
" you will see {display_name} listed on your dashboard.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, display_name=self.course.display_name
)
)
def test_unenroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll',
'email_students': False})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_unenroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll',
'email_students': True})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Enrolled Student\n\nYou have been un-enrolled in {display_name} "
"at edx.org by a member of the course staff. "
"The course will no longer appear on your edx.org dashboard.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to Enrolled Student".format(
display_name=self.course.display_name,
)
)
def test_unenroll_with_email_allowed_student(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.allowed_email, 'action': 'unenroll', 'email_students': True})
print "type(self.allowed_email): {}".format(type(self.allowed_email))
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.allowed_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": True,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ALLOWEDTOENROLL_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course {display_name} by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n----\n"
"This email was automatically sent from edx.org to robot-allowed@robot.org".format(
display_name=self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name} at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{about_path} and register for the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, about_path=self.about_path,
display_name=self.course.display_name,
)
)
@patch('instructor.enrollment.uses_shib')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_shib_mktgsite(self, mock_uses_shib):
# Try with marketing site enabled and shib on
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.post(url, {'identifiers': self.notregistered_email, 'action': 'enroll',
'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib_autoenroll(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{course_path} and login.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
def test_enroll_already_enrolled_student(self):
"""
Ensure that already enrolled "verified" students cannot be downgraded
to "honor"
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# make this enrollment "verified"
course_enrollment.mode = u'verified'
course_enrollment.save()
self.assertEqual(course_enrollment.mode, u'verified')
# now re-enroll the student through the instructor dash
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
# affirm that the student is still in "verified" mode
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_ENROLLED)
self.assertEqual(course_enrollment.mode, u"verified")
def create_paid_course(self):
"""
create paid course mode.
"""
paid_course = CourseFactory.create()
CourseModeFactory.create(course_id=paid_course.id, min_price=50, mode_slug=CourseMode.HONOR)
CourseInstructorRole(paid_course.id).add_users(self.instructor)
return paid_course
def test_reason_field_should_not_be_empty(self):
"""
test to check that reason field should not be empty when
manually enrolling the students for the paid courses.
"""
paid_course = self.create_paid_course()
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"error": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenrolled_allowed_to_enroll_user(self):
"""
test to unenroll allow to enroll user.
"""
paid_course = self.create_paid_course()
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing..'}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
# now registered the user
UserFactory(email=self.notregistered_email)
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing'}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 2)
self.assertEqual(manual_enrollments[1].state_transition, ALLOWEDTOENROLL_TO_ENROLLED)
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notregistered_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": True,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": True,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenrolled_already_not_enrolled_user(self):
"""
test unenrolled user already not enrolled in a course.
"""
paid_course = self.create_paid_course()
course_enrollment = CourseEnrollment.objects.filter(
user__email=self.notregistered_email, course_id=paid_course.id
)
self.assertEqual(course_enrollment.count(), 0)
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'unenroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing'}
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notregistered_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenroll_and_enroll_verified(self):
"""
Test that unenrolling and enrolling a student from a verified track
results in that student being in the default track
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# upgrade enrollment
course_enrollment.mode = u'verified'
course_enrollment.save()
self.assertEqual(course_enrollment.mode, u'verified')
self._change_student_enrollment(self.enrolled_student, self.course, 'unenroll')
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
self.assertEqual(course_enrollment.mode, CourseMode.DEFAULT_MODE_SLUG)
def _change_student_enrollment(self, user, course, action):
"""
Helper function that posts to 'students_update_enrollment' to change
a student's enrollment
"""
url = reverse(
'students_update_enrollment',
kwargs={'course_id': course.id.to_deprecated_string()},
)
params = {
'identifiers': user.email,
'action': action,
'email_students': True,
'reason': 'change user enrollment'
}
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
return response
@attr('shard_1')
@ddt.ddt
class TestInstructorAPIBulkBetaEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test bulk beta modify access endpoint.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIBulkBetaEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
# Email URL values
cls.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
cls.about_path = '/courses/{}/about'.format(cls.course.id)
cls.course_path = '/courses/{}/'.format(cls.course.id)
def setUp(self):
super(TestInstructorAPIBulkBetaEnrollment, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.beta_tester = BetaTesterFactory(course_key=self.course.id)
CourseEnrollment.enroll(
self.beta_tester,
self.course.id
)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
self.notenrolled_student = UserFactory(username='NotEnrolledStudent')
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
self.request = RequestFactory().request()
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.beta_tester.email, 'action': action})
self.assertEqual(response.status_code, 400)
def add_notenrolled(self, response, identifier):
"""
Test Helper Method (not a test, called by other tests)
Takes a client response from a call to bulk_beta_modify_access with 'email_students': False,
and the student identifier (email or username) given as 'identifiers' in the request.
Asserts the reponse returns cleanly, that the student was added as a beta tester, and the
response properly contains their identifier, 'error': False, and 'userDoesNotExist': False.
Additionally asserts no email was sent.
"""
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": identifier,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_add_notenrolled_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
@ddt.data('http', 'https')
def test_add_notenrolled_with_email(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"Visit {proto}://{site}{about_path} to join "
"the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
about_path=self.about_path
)
)
@ddt.data('http', 'https')
def test_add_notenrolled_with_email_autoenroll(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
course_path=self.course_path
)
)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_add_notenrolled_email_mktgsite(self):
# Try with marketing site enabled
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
u"Dear {}\n\nYou have been invited to be a beta tester "
"for {} at edx.org by a member of the course staff.\n\n"
"Visit edx.org to enroll in the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {}".format(
self.notenrolled_student.profile.name,
self.course.display_name,
self.notenrolled_student.email,
)
)
def test_enroll_with_email_not_registered(self):
# User doesn't exist
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.notregistered_email, 'action': 'add', 'email_students': True,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notregistered_email,
"error": True,
"userDoesNotExist": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_without_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': False,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': True,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been removed from a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear {full_name}\n\nYou have been removed as a beta tester for "
"{display_name} at edx.org by a member of the course staff. "
"The course will remain on your dashboard, but you will no longer "
"be part of the beta testing group.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to {email_address}".format(
display_name=self.course.display_name,
full_name=self.beta_tester.profile.name,
email_address=self.beta_tester.email
)
)
@attr('shard_1')
class TestInstructorAPILevelsAccess(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not have a very meaningful
response yet, so only the status code is tested.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPILevelsAccess, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorAPILevelsAccess, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = InstructorFactory(course_key=self.course.id)
self.other_staff = StaffFactory(course_key=self.course.id)
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.email,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_allow_with_uname(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_instructor.username,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_with_username(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.username,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_with_fake_user(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': 'GandalfTheGrey',
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': 'GandalfTheGrey',
'userDoesNotExist': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_with_inactive_user(self):
self.other_user.is_active = False
self.other_user.save() # pylint: disable=no-member
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.username,
'rolename': 'beta',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': self.other_user.username,
'inactiveUser': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'unique_student_identifier': self.instructor.username,
'rolename': 'instructor',
'action': 'revoke',
'removingSelfAsInstructor': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'robot-not-a-rolename',
})
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'staff',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'beta',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'beta': []
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_update_forum_role_membership(self):
"""
Test update forum role membership with user's email and username.
"""
# Seed forum roles for course.
seed_permissions_roles(self.course.id)
for user in [self.instructor, self.other_user]:
for identifier_attr in [user.email, user.username]:
for rolename in ["Administrator", "Moderator", "Community TA"]:
for action in ["allow", "revoke"]:
self.assert_update_forum_role_membership(user, identifier_attr, rolename, action)
def assert_update_forum_role_membership(self, current_user, identifier, rolename, action):
"""
Test update forum role membership.
Get unique_student_identifier, rolename and action and update forum role.
"""
url = reverse('update_forum_role_membership', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(
url,
{
'unique_student_identifier': identifier,
'rolename': rolename,
'action': action,
}
)
# Status code should be 200.
self.assertEqual(response.status_code, 200)
user_roles = current_user.roles.filter(course_id=self.course.id).values_list("name", flat=True)
if action == 'allow':
self.assertIn(rolename, user_roles)
elif action == 'revoke':
self.assertNotIn(rolename, user_roles)
@attr('shard_1')
@ddt.ddt
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class TestInstructorAPILevelsDataDump(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPILevelsDataDump, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorAPILevelsDataDump, self).setUp()
self.course_mode = CourseMode(course_id=self.course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=40)
self.course_mode.save()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.cart = Order.get_cart_for_user(self.instructor)
self.coupon_code = 'abcde'
self.coupon = Coupon(code=self.coupon_code, description='testing code', course_id=self.course.id,
percentage_discount=10, created_by=self.instructor, is_active=True)
self.coupon.save()
# Create testing invoice 1
self.sale_invoice_1 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw', recipient_email='test1@test.com', customer_reference_number='2Fwe23S',
internal_reference="A", course_id=self.course.id, is_valid=True
)
self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.sale_invoice_1,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
self.students = [UserFactory() for _ in xrange(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
self.students_who_may_enroll = self.students + [UserFactory() for _ in range(5)]
for student in self.students_who_may_enroll:
CourseEnrollmentAllowed.objects.create(
email=student.email, course_id=self.course.id
)
def register_with_redemption_code(self, user, code):
"""
enroll user using a registration code
"""
redeem_url = reverse('register_code_redemption', args=[code])
self.client.login(username=user.username, password='test')
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
def test_invalidate_sale_record(self):
"""
Testing the sale invalidating scenario.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
data = {'invoice_number': self.sale_invoice_1.id, 'event_type': "invalidate"}
url = reverse('sale_validation', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url, method="POST", data=data)
#Now try to fetch data against not existing invoice number
test_data_1 = {'invoice_number': 100, 'event_type': "invalidate"}
self.assert_request_status_code(404, url, method="POST", data=test_data_1)
# Now invalidate the same invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("The sale associated with this invoice has already been invalidated.", response.content)
# now re_validate the invoice number
data['event_type'] = "re_validate"
self.assert_request_status_code(200, url, method="POST", data=data)
# Now re_validate the same active invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("This invoice is already active.", response.content)
test_data_2 = {'invoice_number': self.sale_invoice_1.id}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_2)
self.assertIn("Missing required event_type parameter", response.content)
test_data_3 = {'event_type': "re_validate"}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_3)
self.assertIn("Missing required invoice_number parameter", response.content)
# submitting invalid invoice number
data['invoice_number'] = 'testing'
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("invoice_number must be an integer, {value} provided".format(value=data['invoice_number']), response.content)
def test_get_sale_order_records_features_csv(self):
"""
Test that the response from get_sale_order_records is in csv format.
"""
# add the coupon code for the course
coupon = Coupon(
code='test_code', description='test_description', course_id=self.course.id,
percentage_discount='10', created_by=self.instructor, is_active=True
)
coupon.save()
self.cart.order_type = 'business'
self.cart.save()
self.cart.add_billing_details(company_name='Test Company', company_contact_name='Test',
company_contact_email='test@123', recipient_name='R1',
recipient_email='', customer_reference_number='PO#23')
paid_course_reg_item = PaidCourseRegistration.add_to_order(
self.cart,
self.course.id,
mode_slug=CourseMode.HONOR
)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': paid_course_reg_item.id, 'qty': '4'})
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': coupon.code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
# get the updated item
item = self.cart.orderitem_set.all().select_subclasses()[0]
# get the redeemed coupon information
coupon_redemption = CouponRedemption.objects.select_related('coupon').filter(order=self.cart)
sale_order_url = reverse('get_sale_order_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(sale_order_url)
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertIn('36', response.content.split('\r\n')[1])
self.assertIn(str(item.unit_cost), response.content.split('\r\n')[1],)
self.assertIn(str(item.list_price), response.content.split('\r\n')[1],)
self.assertIn(item.status, response.content.split('\r\n')[1],)
self.assertIn(coupon_redemption[0].coupon.code, response.content.split('\r\n')[1],)
def test_coupon_redeem_count_in_ecommerce_section(self):
"""
Test that checks the redeem count in the instructor_dashboard coupon section
"""
# add the coupon code for the course
coupon = Coupon(
code='test_code', description='test_description', course_id=self.course.id,
percentage_discount='10', created_by=self.instructor, is_active=True
)
coupon.save()
# Coupon Redeem Count only visible for Financial Admins.
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': coupon.code})
self.assertEqual(resp.status_code, 200)
# URL for instructor dashboard
instructor_dashboard = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
# visit the instructor dashboard page and
# check that the coupon redeem count should be 0
resp = self.client.get(instructor_dashboard)
self.assertEqual(resp.status_code, 200)
self.assertIn('Number Redeemed', resp.content)
self.assertIn('<td>0</td>', resp.content)
# now make the payment of your cart items
self.cart.purchase()
# visit the instructor dashboard page and
# check that the coupon redeem count should be 1
resp = self.client.get(instructor_dashboard)
self.assertEqual(resp.status_code, 200)
self.assertIn('Number Redeemed', resp.content)
self.assertIn('<td>1</td>', resp.content)
def test_get_sale_records_features_csv(self):
"""
Test that the response from get_sale_records is in csv format.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
url = reverse(
'get_sale_records',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
response = self.client.get(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_sale_records_features_json(self):
"""
Test that the response from get_sale_records is in json format.
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
for res in res_json['sale']:
self.validate_sale_records_response(
res,
course_registration_code,
self.sale_invoice_1,
0,
invoice_item=self.invoice_item
)
def test_get_sale_records_features_with_multiple_invoices(self):
"""
Test that the response from get_sale_records is in json format for multiple invoices
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='qwerty{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
# Create test invoice 2
sale_invoice_2 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw_2', recipient_email='test2@test.com', customer_reference_number='2Fwe23S',
internal_reference="B", course_id=self.course.id
)
invoice_item_2 = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=sale_invoice_2,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='xyzmn{}'.format(i), course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor, invoice=sale_invoice_2, invoice_item=invoice_item_2, mode_slug='honor'
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
self.validate_sale_records_response(
res_json['sale'][0],
course_registration_code,
self.sale_invoice_1,
0,
invoice_item=self.invoice_item
)
self.validate_sale_records_response(
res_json['sale'][1],
course_registration_code,
sale_invoice_2,
0,
invoice_item=invoice_item_2
)
def validate_sale_records_response(self, res, course_registration_code, invoice, used_codes, invoice_item):
"""
validate sale records attribute values with the response object
"""
self.assertEqual(res['total_amount'], invoice.total_amount)
self.assertEqual(res['recipient_email'], invoice.recipient_email)
self.assertEqual(res['recipient_name'], invoice.recipient_name)
self.assertEqual(res['company_name'], invoice.company_name)
self.assertEqual(res['company_contact_name'], invoice.company_contact_name)
self.assertEqual(res['company_contact_email'], invoice.company_contact_email)
self.assertEqual(res['internal_reference'], invoice.internal_reference)
self.assertEqual(res['customer_reference_number'], invoice.customer_reference_number)
self.assertEqual(res['invoice_number'], invoice.id)
self.assertEqual(res['created_by'], course_registration_code.created_by.username)
self.assertEqual(res['course_id'], invoice_item.course_id.to_deprecated_string())
self.assertEqual(res['total_used_codes'], used_codes)
self.assertEqual(res['total_codes'], 5)
def test_get_problem_responses_invalid_location(self):
"""
Test whether get_problem_responses returns an appropriate status
message when users submit an invalid problem location.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': unicode(self.course.id)}
)
problem_location = ''
response = self.client.get(url, {'problem_location': problem_location})
res_json = json.loads(response.content)
self.assertEqual(res_json, 'Could not find problem with this location.')
def valid_problem_location(test): # pylint: disable=no-self-argument
"""
Decorator for tests that target get_problem_responses endpoint and
need to pretend user submitted a valid problem location.
"""
@functools.wraps(test)
def wrapper(self, *args, **kwargs):
"""
Run `test` method, ensuring that UsageKey.from_string returns a
problem key that the get_problem_responses endpoint can
work with.
"""
mock_problem_key = Mock(return_value=u'')
mock_problem_key.course_key = self.course.id
with patch.object(UsageKey, 'from_string') as patched_method:
patched_method.return_value = mock_problem_key
test(self, *args, **kwargs)
return wrapper
@valid_problem_location
def test_get_problem_responses_successful(self):
"""
Test whether get_problem_responses returns an appropriate status
message if CSV generation was started successfully.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': unicode(self.course.id)}
)
problem_location = ''
response = self.client.get(url, {'problem_location': problem_location})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
status = res_json['status']
self.assertIn('is being created', status)
self.assertNotIn('already in progress', status)
@valid_problem_location
def test_get_problem_responses_already_running(self):
"""
Test whether get_problem_responses returns an appropriate status
message if CSV generation is already in progress.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': unicode(self.course.id)}
)
with patch('instructor_task.api.submit_calculate_problem_responses_csv') as submit_task_function:
error = AlreadyRunningError()
submit_task_function.side_effect = error
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertIn('already in progress', res_json['status'])
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
self.assertEqual(student_json['username'], student.username)
self.assertEqual(student_json['email'], student.email)
@ddt.data(True, False)
def test_get_students_features_cohorted(self, is_cohorted):
"""
Test that get_students_features includes cohort info when the course is
cohorted, and does not when the course is not cohorted.
"""
url = reverse('get_students_features', kwargs={'course_id': unicode(self.course.id)})
set_course_cohort_settings(self.course.id, is_cohorted=is_cohorted)
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertEqual('cohort' in res_json['feature_names'], is_cohorted)
@ddt.data(True, False)
def test_get_students_features_teams(self, has_teams):
"""
Test that get_students_features includes team info when the course is
has teams enabled, and does not when the course does not have teams enabled
"""
if has_teams:
self.course = CourseFactory.create(teams_configuration={
'max_size': 2, 'topics': [{'topic-id': 'topic', 'name': 'Topic', 'description': 'A Topic'}]
})
course_instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=course_instructor.username, password='test')
url = reverse('get_students_features', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertEqual('team' in res_json['feature_names'], has_teams)
def test_get_students_who_may_enroll(self):
"""
Test whether get_students_who_may_enroll returns an appropriate
status message when users request a CSV file of students who
may enroll in a course.
"""
url = reverse(
'get_students_who_may_enroll',
kwargs={'course_id': unicode(self.course.id)}
)
# Successful case:
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertNotIn('currently being created', res_json['status'])
# CSV generation already in progress:
with patch('instructor_task.api.submit_calculate_may_enroll_csv') as submit_task_function:
error = AlreadyRunningError()
submit_task_function.side_effect = error
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertIn('currently being created', res_json['status'])
def test_get_student_exam_results(self):
"""
Test whether get_proctored_exam_results returns an appropriate
status message when users request a CSV file.
"""
url = reverse(
'get_proctored_exam_results',
kwargs={'course_id': unicode(self.course.id)}
)
# Successful case:
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertNotIn('currently being created', res_json['status'])
# CSV generation already in progress:
with patch('instructor_task.api.submit_proctored_exam_results_report') as submit_task_function:
error = AlreadyRunningError()
submit_task_function.side_effect = error
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertIn('currently being created', res_json['status'])
def test_access_course_finance_admin_with_invalid_course_key(self):
"""
Test assert require_course fiance_admin before generating
a detailed enrollment report
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
response = decorated_func(request, 'invalid_course_key')
self.assertEqual(response.status_code, 404)
self.assertFalse(func.called)
def mock_request(self):
"""
mock request
"""
request = Mock()
request.user = self.instructor
return request
def test_access_course_finance_admin_with_valid_course_key(self):
"""
Test to check the course_finance_admin role with valid key
but doesn't have access to the function
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
response = decorated_func(request, 'valid/course/key')
self.assertEqual(response.status_code, 403)
self.assertFalse(func.called)
def test_add_user_to_fiance_admin_role_with_valid_course(self):
"""
test to check that a function is called using a fiance_admin
rights.
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
decorated_func(request, self.course.id.to_deprecated_string())
self.assertTrue(func.called)
def test_enrollment_report_features_csv(self):
"""
test to generate enrollment report.
enroll users, admin staff using registration codes.
"""
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=self.sale_invoice_1.total_amount,
status='completed',
created_by=self.instructor,
last_modified_by=self.instructor
)
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
admin_user = AdminFactory()
admin_cart = Order.get_cart_for_user(admin_user)
PaidCourseRegistration.add_to_order(admin_cart, self.course.id)
admin_cart.purchase()
# create a new user/student and enroll
# in the course using a registration code
# and then validates the generated detailed enrollment report
test_user = UserFactory()
self.register_with_redemption_code(test_user, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
UserProfileFactory.create(user=self.students[0], meta='{"company": "asdasda"}')
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_bulk_purchase_detailed_report(self):
"""
test to generate detailed enrollment report.
1 Purchase registration codes.
2 Enroll users via registration code.
3 Validate generated enrollment report.
"""
paid_course_reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'),
{'ItemId': paid_course_reg_item.id, 'qty': '4'})
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
course_reg_codes = CourseRegistrationCode.objects.filter(order=self.cart)
self.register_with_redemption_code(self.instructor, course_reg_codes[0].code)
test_user = UserFactory()
test_user_cart = Order.get_cart_for_user(test_user)
PaidCourseRegistration.add_to_order(test_user_cart, self.course.id)
test_user_cart.purchase()
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=-self.sale_invoice_1.total_amount,
status='refunded',
created_by=self.instructor,
last_modified_by=self.instructor
)
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_create_registration_code_without_invoice_and_order(self):
"""
test generate detailed enrollment report,
used a registration codes which has been created via invoice or bulk
purchase scenario.
"""
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_invoice_payment_is_still_pending_for_registration_codes(self):
"""
test generate enrollment report
enroll a user in a course using registration code
whose invoice has not been paid yet
"""
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
@patch.object(instructor.views.api, 'anonymous_id_for_user', Mock(return_value='42'))
@patch.object(instructor.views.api, 'unique_id_for_user', Mock(return_value='41'))
def test_get_anon_ids(self):
"""
Test the CSV output for the anonymized user ids.
"""
url = reverse('get_anon_ids', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(
'"User ID","Anonymized User ID","Course Specific Anonymized User ID"'
'\n"{user_id}","41","42"\n'.format(user_id=self.students[0].id)
))
self.assertTrue(
body.endswith('"{user_id}","41","42"\n'.format(user_id=self.students[-1].id))
)
def test_list_report_downloads(self):
url = reverse('list_report_downloads', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor_task.models.LocalFSReportStore.links_for') as mock_links_for:
mock_links_for.return_value = [
('mock_file_name_1', 'https://1.mock.url'),
('mock_file_name_2', 'https://2.mock.url'),
]
response = self.client.get(url, {})
expected_response = {
"downloads": [
{
"url": "https://1.mock.url",
"link": "<a href=\"https://1.mock.url\">mock_file_name_1</a>",
"name": "mock_file_name_1"
},
{
"url": "https://2.mock.url",
"link": "<a href=\"https://2.mock.url\">mock_file_name_2</a>",
"name": "mock_file_name_2"
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected_response)
@ddt.data(*REPORTS_DATA)
@ddt.unpack
@valid_problem_location
def test_calculate_report_csv_success(self, report_type, instructor_api_endpoint, task_api_endpoint, extra_instructor_api_kwargs):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
success_status = "The {report_type} report is being created.".format(report_type=report_type)
if report_type == 'problem responses':
with patch(task_api_endpoint):
response = self.client.get(url, {'problem_location': ''})
self.assertIn(success_status, response.content)
else:
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint):
response = self.client.get(url, {})
self.assertIn(success_status, response.content)
@ddt.data(*EXECUTIVE_SUMMARY_DATA)
@ddt.unpack
def test_executive_summary_report_success(
self,
report_type,
instructor_api_endpoint,
task_api_endpoint,
extra_instructor_api_kwargs
):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint):
response = self.client.get(url, {})
success_status = "The {report_type} report is being created." \
" To view the status of the report, see Pending" \
" Instructor Tasks" \
" below".format(report_type=report_type)
self.assertIn(success_status, response.content)
@ddt.data(*EXECUTIVE_SUMMARY_DATA)
@ddt.unpack
def test_executive_summary_report_already_running(
self,
report_type,
instructor_api_endpoint,
task_api_endpoint,
extra_instructor_api_kwargs
):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint) as mock:
mock.side_effect = AlreadyRunningError()
response = self.client.get(url, {})
already_running_status = "The {report_type} report is currently being created." \
" To view the status of the report, see Pending Instructor Tasks below." \
" You will be able to download the report" \
" when it is" \
" complete.".format(report_type=report_type)
self.assertIn(already_running_status, response.content)
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].email.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_from_uname(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].username.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
@attr('shard_1')
class TestInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIRegradeTask, cls).setUpClass()
cls.course = CourseFactory.create()
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = cls.problem_location.to_deprecated_string()
def setUp(self):
super(TestInstructorAPIRegradeTask, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': 'robot-not-a-real-module',
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_delete(self):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.module_to_reset.course_id,
# module_id=self.module_to_reset.module_id,
).count(),
0
)
def test_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single_from_uname(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.username,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_course_has_entrance_exam_in_student_attempts_reset(self):
""" Test course has entrance exam id set while resetting attempts"""
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
'delete_module': False,
})
self.assertEqual(response.status_code, 400)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test course has entrance exam id set while re-scoring. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
@attr('shard_1')
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
class TestEntranceExamInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can rescore student grades,
reset student attempts and delete state for entrance exam.
"""
@classmethod
def setUpClass(cls):
super(TestEntranceExamInstructorAPIRegradeTask, cls).setUpClass()
cls.course = CourseFactory.create(
org='test_org',
course='test_course',
run='test_run',
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
cls.course_with_invalid_ee = CourseFactory.create(entrance_exam_id='invalid_exam')
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.entrance_exam = ItemFactory.create(
parent=cls.course,
category='chapter',
display_name='Entrance exam'
)
subsection = ItemFactory.create(
parent=cls.entrance_exam,
category='sequential',
display_name='Subsection 1'
)
vertical = ItemFactory.create(
parent=subsection,
category='vertical',
display_name='Vertical 1'
)
cls.ee_problem_1 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 1"
)
cls.ee_problem_2 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 2"
)
def setUp(self):
super(TestEntranceExamInstructorAPIRegradeTask, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
# Add instructor to invalid ee course
CourseInstructorRole(self.course_with_invalid_ee.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
ee_module_to_reset1 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_1.location,
state=json.dumps({'attempts': 10, 'done': True}),
)
ee_module_to_reset2 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_2.location,
state=json.dumps({'attempts': 10, 'done': True}),
)
self.ee_modules = [ee_module_to_reset1.module_state_key, ee_module_to_reset2.module_state_key]
def test_reset_entrance_exam_student_attempts_deletall(self):
""" Make sure no one can delete all students state on entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_entrance_exam_student_attempts_single(self):
""" Test reset single student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
for changed_module in changed_modules:
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_in_entrance_exam')
def test_reset_entrance_exam_all_student_attempts(self, act):
""" Test reset all student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_invalid_entrance_exam(self):
""" Test reset for invalid entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_entrance_exam_sttudent_delete_state(self):
""" Test delete single student entrance exam state. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
self.assertEqual(changed_modules.count(), 0)
def test_entrance_exam_delete_state_with_staff(self):
""" Test entrance exam delete state failure with staff access. """
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
self.client.login(username=staff_user.username, password='test')
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 403)
def test_entrance_exam_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_entrance_exam_for_student')
def test_rescore_entrance_exam_single_student(self, act):
""" Test re-scoring of entrance exam for single student. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_rescore_entrance_exam_all_student(self):
""" Test rescoring for all students. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
})
self.assertEqual(response.status_code, 200)
def test_rescore_entrance_exam_all_student_and_single(self):
""" Test re-scoring with both all students and single student parameters. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test re-scoring of entrance exam with invalid exam. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_list_entrance_exam_instructor_tasks_student(self):
""" Test list task history for entrance exam AND student. """
# create a re-score entrance exam task
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
tasks = json.loads(response.content)['tasks']
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0]['status'], _('Complete'))
def test_list_entrance_exam_instructor_tasks_all_student(self):
""" Test list task history for entrance exam AND all student. """
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
tasks = json.loads(response.content)['tasks']
self.assertEqual(len(tasks), 0)
def test_list_entrance_exam_instructor_with_invalid_exam_key(self):
""" Test list task history for entrance exam failure if course has invalid exam. """
url = reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_skip_entrance_exam_student(self):
""" Test skip entrance exam api for student. """
# create a re-score entrance exam task
url = reverse('mark_student_can_skip_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
message = _('This student (%s) will skip the entrance exam.') % self.student.email
self.assertContains(response, message)
# post again with same student
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
# This time response message should be different
message = _('This student (%s) is already allowed to skip the entrance exam.') % self.student.email
self.assertContains(response, message)
@attr('shard_1')
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message', autospec=True))
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorSendEmail(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Checks that only instructors have access to email endpoints, and that
these endpoints are only accessible with courses that actually exist,
only with valid email messages.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorSendEmail, cls).setUpClass()
cls.course = CourseFactory.create()
test_subject = u'\u1234 test subject'
test_message = u'\u6824 test message'
cls.full_test_message = {
'send_to': 'staff',
'subject': test_subject,
'message': test_message,
}
def setUp(self):
super(TestInstructorSendEmail, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_send_email_as_logged_in_instructor(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
def test_send_email_but_not_logged_in(self):
self.client.logout()
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_not_staff(self):
self.client.logout()
student = UserFactory()
self.client.login(username=student.username, password='test')
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_course_not_exist(self):
url = reverse('send_email', kwargs={'course_id': 'GarbageCourse/DNE/NoTerm'})
response = self.client.post(url, self.full_test_message)
self.assertNotEqual(response.status_code, 200)
def test_send_email_no_sendto(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_subject(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_message(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'subject': 'test subject',
})
self.assertEqual(response.status_code, 400)
class MockCompletionInfo(object):
"""Mock for get_task_completion_info"""
times_called = 0
def mock_get_task_completion_info(self, *args): # pylint: disable=unused-argument
"""Mock for get_task_completion_info"""
self.times_called += 1
if self.times_called % 2 == 0:
return True, 'Task Completed'
return False, 'Task Errored In Some Way'
@attr('shard_1')
class TestInstructorAPITaskLists(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask(object):
""" Fake task object """
FEATURES = [
'task_type',
'task_input',
'task_id',
'requester',
'task_state',
'created',
'status',
'task_message',
'duration_sec'
]
def __init__(self, completion):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
# created needs to be a datetime
self.created = datetime.datetime(2013, 10, 25, 11, 42, 35)
# set 'status' and 'task_message' attrs
success, task_message = completion()
if success:
self.status = "Complete"
else:
self.status = "Incomplete"
self.task_message = task_message
# Set 'task_output' attr, which will be parsed to the 'duration_sec' attr.
self.task_output = '{"duration_ms": 1035000}'
self.duration_sec = 1035000 / 1000.0
def make_invalid_output(self):
"""Munge task_output to be invalid json"""
self.task_output = 'HI MY NAME IS INVALID JSON'
# This should be given the value of 'unknown' if the task output
# can't be properly parsed
self.duration_sec = 'unknown'
def to_dict(self):
""" Convert fake task to dictionary representation. """
attr_dict = {key: getattr(self, key) for key in self.FEATURES}
attr_dict['created'] = attr_dict['created'].isoformat()
return attr_dict
@classmethod
def setUpClass(cls):
super(TestInstructorAPITaskLists, cls).setUpClass()
cls.course = CourseFactory.create(
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = cls.problem_location.to_deprecated_string()
def setUp(self):
super(TestInstructorAPITaskLists, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
mock_factory = MockCompletionInfo()
self.tasks = [self.FakeTask(mock_factory.mock_get_task_completion_info) for _ in xrange(7)]
self.tasks[-1].make_invalid_output()
@patch.object(instructor_task.api, 'get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_background_email_tasks(self, act):
"""Test list of background email tasks."""
act.return_value = self.tasks
url = reverse('list_background_email_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@attr('shard_1')
@patch.object(instructor_task.api, 'get_instructor_task_history', autospec=True)
class TestInstructorEmailContentList(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test the instructor email content history endpoint.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorEmailContentList, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorEmailContentList, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.tasks = {}
self.emails = {}
self.emails_info = {}
def setup_fake_email_info(self, num_emails, with_failures=False):
""" Initialize the specified number of fake emails """
for email_id in range(num_emails):
num_sent = random.randint(1, 15401)
if with_failures:
failed = random.randint(1, 15401)
else:
failed = 0
self.tasks[email_id] = FakeContentTask(email_id, num_sent, failed, 'expected')
self.emails[email_id] = FakeEmail(email_id)
self.emails_info[email_id] = FakeEmailInfo(self.emails[email_id], num_sent, failed)
def get_matching_mock_email(self, **kwargs):
""" Returns the matching mock emails for the given id """
email_id = kwargs.get('id', 0)
return self.emails[email_id]
def get_email_content_response(self, num_emails, task_history_request, with_failures=False):
""" Calls the list_email_content endpoint and returns the repsonse """
self.setup_fake_email_info(num_emails, with_failures)
task_history_request.return_value = self.tasks.values()
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.side_effect = self.get_matching_mock_email
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
return response
def check_emails_sent(self, num_emails, task_history_request, with_failures=False):
""" Tests sending emails with or without failures """
response = self.get_email_content_response(num_emails, task_history_request, with_failures)
self.assertTrue(task_history_request.called)
expected_email_info = [email_info.to_dict() for email_info in self.emails_info.values()]
actual_email_info = json.loads(response.content)['emails']
self.assertEqual(len(actual_email_info), num_emails)
for exp_email, act_email in zip(expected_email_info, actual_email_info):
self.assertDictEqual(exp_email, act_email)
self.assertEqual(expected_email_info, actual_email_info)
def test_content_list_one_email(self, task_history_request):
""" Test listing of bulk emails when email list has one email """
response = self.get_email_content_response(1, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should have one email
self.assertEqual(len(email_info), 1)
# Email content should be what's expected
expected_message = self.emails[0].html_message
returned_email_info = email_info[0]
received_message = returned_email_info[u'email'][u'html_message']
self.assertEqual(expected_message, received_message)
def test_content_list_no_emails(self, task_history_request):
""" Test listing of bulk emails when email list empty """
response = self.get_email_content_response(0, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should be empty
self.assertEqual(len(email_info), 0)
def test_content_list_email_content_many(self, task_history_request):
""" Test listing of bulk emails sent large amount of emails """
self.check_emails_sent(50, task_history_request)
def test_list_email_content_error(self, task_history_request):
""" Test handling of error retrieving email """
invalid_task = FakeContentTask(0, 0, 0, 'test')
invalid_task.make_invalid_input()
task_history_request.return_value = [invalid_task]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_email_info = json.loads(response.content)['emails']
self.assertEqual(len(returned_email_info), 1)
returned_info = returned_email_info[0]
for info in ['created', 'sent_to', 'email', 'number_sent', 'requester']:
self.assertEqual(returned_info[info], None)
def test_list_email_with_failure(self, task_history_request):
""" Test the handling of email task that had failures """
self.check_emails_sent(1, task_history_request, True)
def test_list_many_emails_with_failures(self, task_history_request):
""" Test the handling of many emails with failures """
self.check_emails_sent(50, task_history_request, True)
def test_list_email_with_no_successes(self, task_history_request):
task_info = FakeContentTask(0, 0, 10, 'expected')
email = FakeEmail(0)
email_info = FakeEmailInfo(email, 0, 10)
task_history_request.return_value = [task_info]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.return_value = email
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_info_list = json.loads(response.content)['emails']
self.assertEqual(len(returned_info_list), 1)
returned_info = returned_info_list[0]
expected_info = email_info.to_dict()
self.assertDictEqual(expected_info, returned_info)
@attr('shard_1')
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append(
"Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed")
lists.append(['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus',
'ut@lacinia.Sed'])
for (stng, lst) in zip(strings, lists):
self.assertEqual(_split_input_list(stng), lst)
def test_split_input_list_unicode(self):
self.assertEqual(_split_input_list('robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
[u'robot@robot.edu', 'robot2@robot.edu'])
scary_unistuff = unichr(40960) + u'abcd' + unichr(1972)
self.assertEqual(_split_input_list(scary_unistuff), [scary_unistuff])
def test_msk_from_problem_urlname(self):
course_id = SlashSeparatedCourseKey('MITx', '6.002x', '2013_Spring')
name = 'L2Node1'
output = 'i4x://MITx/6.002x/problem/L2Node1'
self.assertEqual(msk_from_problem_urlname(course_id, name).to_deprecated_string(), output)
@raises(ValueError)
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
msk_from_problem_urlname(*args)
def get_extended_due(course, unit, user):
"""
Gets the overridden due date for the given user on the given unit. Returns
`None` if there is no override set.
"""
try:
override = StudentFieldOverride.objects.get(
course_id=course.id,
student=user,
location=unit.location,
field='due'
)
return DATE_FIELD.from_json(json.loads(override.value))
except StudentFieldOverride.DoesNotExist:
return None
@attr('shard_1')
class TestDueDateExtensions(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test data dumps for reporting.
"""
@classmethod
def setUpClass(cls):
super(TestDueDateExtensions, cls).setUpClass()
cls.course = CourseFactory.create()
cls.due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=utc)
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.week1 = ItemFactory.create(due=cls.due)
cls.week2 = ItemFactory.create(due=cls.due)
cls.week3 = ItemFactory.create() # No due date
cls.course.children = [
cls.week1.location.to_deprecated_string(),
cls.week2.location.to_deprecated_string(),
cls.week3.location.to_deprecated_string()
]
cls.homework = ItemFactory.create(
parent_location=cls.week1.location,
due=cls.due
)
cls.week1.children = [cls.homework.location.to_deprecated_string()]
def setUp(self):
"""
Fixtures.
"""
super(TestDueDateExtensions, self).setUp()
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_change_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=utc),
get_extended_due(self.course, self.week1, self.user1))
def test_change_to_invalid_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '01/01/2009 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_change_nonexistent_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week3.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week3, self.user1)
)
def test_reset_date(self):
self.test_change_due_date()
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_reset_nonexistent_extension(self):
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 400, response.content)
@SharedModuleStoreTestCase.modifies_courseware
def test_reset_extension_to_deleted_date(self):
"""
Test that we can delete a due date extension after deleting the normal
due date, without causing an error.
"""
self.test_change_due_date()
self.week1.due = None
self.week1 = self.store.update_item(self.week1, self.user1.id)
# Now, week1's normal due date is deleted but the extension still exists.
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_show_unit_extensions(self):
self.test_change_due_date()
url = reverse('show_unit_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'url': self.week1.location.to_deprecated_string()})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Full Name': self.user1.profile.name,
u'Username': self.user1.username}],
u'header': [u'Username', u'Full Name', u'Extended Due Date'],
u'title': u'Users with due date extensions for %s' %
self.week1.display_name})
def test_show_student_extensions(self):
self.test_change_due_date()
url = reverse('show_student_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'student': self.user1.username})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Unit': self.week1.display_name}],
u'header': [u'Unit', u'Extended Due Date'],
u'title': u'Due date extensions for %s (%s)' % (
self.user1.profile.name, self.user1.username)})
@attr('shard_1')
class TestCourseIssuedCertificatesData(SharedModuleStoreTestCase):
"""
Test data dumps for issued certificates.
"""
@classmethod
def setUpClass(cls):
super(TestCourseIssuedCertificatesData, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestCourseIssuedCertificatesData, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def generate_certificate(self, course_id, mode, status):
"""
Generate test certificate
"""
test_user = UserFactory()
GeneratedCertificateFactory.create(
user=test_user,
course_id=course_id,
mode=mode,
status=status
)
def test_certificates_features_against_status(self):
"""
Test certificates with status 'downloadable' should be in the response.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': unicode(self.course.id)})
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in xrange(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.generating)
response = self.client.get(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
self.assertEqual(len(res_json['certificates']), 0)
# Certificates with status 'downloadable' should be in response.
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
response = self.client.get(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
self.assertEqual(len(res_json['certificates']), 1)
def test_certificates_features_group_by_mode(self):
"""
Test for certificate csv features against mode. Certificates should be group by 'mode' in reponse.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': unicode(self.course.id)})
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in xrange(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
response = self.client.get(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
self.assertEqual(len(res_json['certificates']), 1)
# retrieve the first certificate from the list, there should be 3 certificates for 'honor' mode.
certificate = res_json['certificates'][0]
self.assertEqual(certificate.get('total_issued_certificate'), 3)
self.assertEqual(certificate.get('mode'), 'honor')
self.assertEqual(certificate.get('course_id'), str(self.course.id))
# Now generating downloadable certificates with 'verified' mode
for __ in xrange(certificate_count):
self.generate_certificate(
course_id=self.course.id,
mode='verified',
status=CertificateStatuses.downloadable
)
response = self.client.get(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
# total certificate count should be 2 for 'verified' mode.
self.assertEqual(len(res_json['certificates']), 2)
# retrieve the second certificate from the list
certificate = res_json['certificates'][1]
self.assertEqual(certificate.get('total_issued_certificate'), 3)
self.assertEqual(certificate.get('mode'), 'verified')
def test_certificates_features_csv(self):
"""
Test for certificate csv features.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': unicode(self.course.id)})
url += '?csv=true'
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in xrange(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
current_date = datetime.date.today().strftime("%B %d, %Y")
response = self.client.get(url)
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertEqual(response['Content-Disposition'], 'attachment; filename={0}'.format('issued_certificates.csv'))
self.assertEqual(
response.content.strip(),
'"CourseID","Certificate Type","Total Certificates Issued","Date Report Run"\r\n"'
+ str(self.course.id) + '","honor","3","' + current_date + '"'
)
@attr('shard_1')
@override_settings(REGISTRATION_CODE_LENGTH=8)
class TestCourseRegistrationCodes(SharedModuleStoreTestCase):
"""
Test data dumps for E-commerce Course Registration Codes.
"""
@classmethod
def setUpClass(cls):
super(TestCourseRegistrationCodes, cls).setUpClass()
cls.course = CourseFactory.create()
cls.url = reverse(
'generate_registration_codes',
kwargs={'course_id': cls.course.id.to_deprecated_string()}
)
def setUp(self):
"""
Fixtures.
"""
super(TestCourseRegistrationCodes, self).setUp()
CourseModeFactory.create(course_id=self.course.id, min_price=50)
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
CourseSalesAdminRole(self.course.id).add_users(self.instructor)
data = {
'total_registration_codes': 12, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street',
'address_line_2': '', 'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(self.url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(5):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(5):
i += 1
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=i,
redeemed_by=self.instructor
)
registration_code_redemption.save()
@override_settings(FINANCE_EMAIL='finance@example.com')
def test_finance_email_in_recipient_list_when_generating_registration_codes(self):
"""
Test to verify that the invoice will also be sent to the FINANCE_EMAIL when
generating registration codes
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# check for the last mail.outbox, The FINANCE_EMAIL has been appended at the
# very end, when generating registration codes
self.assertEqual(mail.outbox[-1].to[0], 'finance@example.com')
def test_user_invoice_copy_preference(self):
"""
Test to remember user invoice copy preference
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
# user invoice copy preference will be saved in api user preference; model
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], True)
# updating the user invoice copy preference during code generation flow
data['invoice'] = ''
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], False)
def test_generate_course_registration_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
def test_generate_course_registration_with_redeem_url_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
rows = body.split('\n')
index = 1
while index < len(rows):
if rows[index]:
row_data = rows[index].split(',')
code = row_data[0].replace('"', '')
self.assertTrue(row_data[1].startswith('"http')
and row_data[1].endswith('/shoppingcart/register/redeem/{0}/"'.format(code)))
index += 1
@patch.object(instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'second', 'third', 'fourth']))
def test_generate_course_registration_codes_matching_existing_coupon_code(self):
"""
Test the generated course registration code is already in the Coupon Table
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
coupon = Coupon(code='first', course_id=self.course.id.to_deprecated_string(), created_by=self.instructor)
coupon.save()
data = {
'total_registration_codes': 3, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 5) # 1 for headers, 1 for new line at the end and 3 for the actual data
@patch.object(instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'first', 'second', 'third']))
def test_generate_course_registration_codes_integrity_error(self):
"""
Test for the Integrity error against the generated code
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 2, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 4)
def test_spent_course_registration_codes_csv(self):
"""
Test to generate a response of all the spent course registration codes
"""
url = reverse('spent_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'spent_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 7)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'unit_price': 122.45, 'company_contact_email': 'Test@company.com', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(9):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(9):
i += 13
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=i,
redeemed_by=self.instructor
)
registration_code_redemption.save()
data = {'spent_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_active_course_registration_codes_csv(self):
"""
Test to generate a response of all the active course registration codes
"""
url = reverse('active_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'active_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 9)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'active_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_get_all_course_registration_codes_csv(self):
"""
Test to generate a response of all the course registration codes
"""
url = reverse(
'get_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {'download_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 14)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'download_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_pdf_file_throws_exception(self):
"""
test to mock the pdf file generation throws an exception
when generating registration codes.
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
with patch.object(PDFInvoice, 'generate_pdf', side_effect=Exception):
response = self.client.post(generate_code_url, data)
self.assertEqual(response.status_code, 200, response.content)
def test_get_codes_with_sale_invoice(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 5.5, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
url = reverse('get_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'download_company_name': 'Group Invoice'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
def test_with_invalid_unit_price(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 10, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 'invalid', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 400, response.content)
self.assertIn('Could not parse amount as', response.content)
def test_get_historical_coupon_codes(self):
"""
Test to download a response of all the active coupon codes
"""
get_coupon_code_url = reverse(
'get_coupon_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
for i in range(10):
coupon = Coupon(
code='test_code{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True
)
coupon.save()
#now create coupons with the expiration dates
for i in range(5):
coupon = Coupon(
code='coupon{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True,
expiration_date=datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=2)
)
coupon.save()
response = self.client.get(get_coupon_code_url)
self.assertEqual(response.status_code, 200, response.content)
# filter all the coupons
for coupon in Coupon.objects.all():
self.assertIn(
'"{coupon_code}","{course_id}","{discount}","{description}","{expiration_date}","{is_active}",'
'"{code_redeemed_count}","{total_discounted_seats}","{total_discounted_amount}"'.format(
coupon_code=coupon.code,
course_id=coupon.course_id,
discount=coupon.percentage_discount,
description=coupon.description,
expiration_date=coupon.display_expiry_date,
is_active=coupon.is_active,
code_redeemed_count="0",
total_discounted_seats="0",
total_discounted_amount="0",
), response.content
)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_COUPON_CSV_HEADER))
@attr('shard_1')
class TestBulkCohorting(SharedModuleStoreTestCase):
"""
Test adding users to cohorts in bulk via CSV upload.
"""
@classmethod
def setUpClass(cls):
super(TestBulkCohorting, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestBulkCohorting, self).setUp()
self.staff_user = StaffFactory(course_key=self.course.id)
self.non_staff_user = UserFactory.create()
self.tempdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tempdir)
def call_add_users_to_cohorts(self, csv_data, suffix='.csv', method='POST'):
"""
Call `add_users_to_cohorts` with a file generated from `csv_data`.
"""
# this temporary file will be removed in `self.tearDown()`
__, file_name = tempfile.mkstemp(suffix=suffix, dir=self.tempdir)
with open(file_name, 'w') as file_pointer:
file_pointer.write(csv_data.encode('utf-8'))
with open(file_name, 'r') as file_pointer:
url = reverse('add_users_to_cohorts', kwargs={'course_id': unicode(self.course.id)})
if method == 'POST':
return self.client.post(url, {'uploaded-file': file_pointer})
elif method == 'GET':
return self.client.get(url, {'uploaded-file': file_pointer})
def expect_error_on_file_content(self, file_content, error, file_suffix='.csv'):
"""
Verify that we get the error we expect for a given file input.
"""
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content, suffix=file_suffix)
self.assertEqual(response.status_code, 400)
result = json.loads(response.content)
self.assertEqual(result['error'], error)
def verify_success_on_file_content(self, file_content, mock_store_upload, mock_cohort_task):
"""
Verify that `addd_users_to_cohorts` successfully validates the
file content, uploads the input file, and triggers the
background task.
"""
mock_store_upload.return_value = (None, 'fake_file_name.csv')
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content)
self.assertEqual(response.status_code, 204)
self.assertTrue(mock_store_upload.called)
self.assertTrue(mock_cohort_task.called)
def test_no_cohort_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a cohort field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'username,email\n', "The file must contain a 'cohort' column containing cohort names."
)
def test_no_username_or_email_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a username or email field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'cohort\n', "The file must contain a 'username' column, an 'email' column, or both."
)
def test_empty_csv(self):
"""
Verify that we get a descriptive verification error when we haven't
included any data in the uploaded CSV.
"""
self.expect_error_on_file_content(
'', "The file must contain a 'cohort' column containing cohort names."
)
def test_wrong_extension(self):
"""
Verify that we get a descriptive verification error when we haven't
uploaded a file with a '.csv' extension.
"""
self.expect_error_on_file_content(
'', "The file must end with the extension '.csv'.", file_suffix='.notcsv'
)
def test_non_staff_no_access(self):
"""
Verify that we can't access the view when we aren't a staff user.
"""
self.client.login(username=self.non_staff_user.username, password='test')
response = self.call_add_users_to_cohorts('')
self.assertEqual(response.status_code, 403)
def test_post_only(self):
"""
Verify that we can't call the view when we aren't using POST.
"""
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts('', method='GET')
self.assertEqual(response.status_code, 405)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_username(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call a background task when
the CSV has username and cohort columns.
"""
self.verify_success_on_file_content(
'username,cohort\nfoo_username,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has email and cohort columns.
"""
self.verify_success_on_file_content(
'email,cohort\nfoo_email,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_username_and_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has username, email and cohort columns.
"""
self.verify_success_on_file_content(
'username,email,cohort\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_carriage_return(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns.
"""
self.verify_success_on_file_content(
'username,email,cohort\rfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_carriage_return_line_feed(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns and line
feeds.
"""
self.verify_success_on_file_content(
'username,email,cohort\r\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
| IndonesiaX/edx-platform | lms/djangoapps/instructor/tests/test_api.py | Python | agpl-3.0 | 212,156 | [
"VisIt"
] | 60f5d7799dc274bc5a5a57b0baade9779467a678743d9cfd967a35ea73182ddb |
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
from setuptools import setup
def get_static_files(path):
return [os.path.join(dirpath.replace("luigi/", ""), ext)
for (dirpath, dirnames, filenames) in os.walk(path)
for ext in ["*.html", "*.js", "*.css", "*.png",
"*.eot", "*.svg", "*.ttf", "*.woff", "*.woff2"]]
luigi_package_data = sum(map(get_static_files, ["luigi/static", "luigi/templates"]), [])
readme_note = """\
.. note::
For the latest source, discussion, etc, please visit the
`GitHub repository <https://github.com/spotify/luigi>`_\n\n
"""
with open('README.rst') as fobj:
long_description = readme_note + fobj.read()
install_requires = [
'tornado>=4.0,<5',
'python-daemon<3.0',
]
if os.environ.get('READTHEDOCS', None) == 'True':
# So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla
install_requires.append('sqlalchemy')
# readthedocs don't like python-daemon, see #1342
install_requires.remove('python-daemon<3.0')
install_requires.append('sphinx>=1.4.4') # Value mirrored in doc/conf.py
setup(
name='h3luigi',
version='1.2.1',
description='Workflow mgmgt + task scheduling + dependency resolution',
long_description=long_description,
author='The Luigi Authors',
url='https://github.com/spotify/luigi',
license='Apache License 2.0',
packages=[
'luigi',
'luigi.contrib',
'luigi.contrib.hdfs',
'luigi.tools'
],
package_data={
'luigi': luigi_package_data
},
entry_points={
'console_scripts': [
'luigi = luigi.cmdline:luigi_run',
'luigid = luigi.cmdline:luigid',
'luigi-grep = luigi.tools.luigi_grep:main',
'luigi-deps = luigi.tools.deps:main',
'luigi-deps-tree = luigi.tools.deps_tree:main',
'luigi-migrate = luigi.tools.migrate:main'
]
},
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Monitoring',
],
)
| h3biomed/luigi | setup.py | Python | apache-2.0 | 3,094 | [
"VisIt"
] | 33b20a24b125724918e3223ad979c679ecc4be3eea61179b339e4a9ebf682b91 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair.
MIT Licensed.
Contact at www.sinclair.bio
"""
# Built-in modules #
# Internal modules #
from seqsearch.databases import Database
from seqsearch.search import SeqSearch
# First party modules #
from autopaths.tmp_path import new_temp_dir
from fasta import FASTA
###############################################################################
class NucleotideDatabase(Database):
"""
The Nucleotide database is a collection of sequences from several sources,
including GenBank, RefSeq, TPA and PDB.
To install:
from seqsearch.databases.nt import nt
nt.download()
nt.untargz()
nt.test()
It will put it in ~/databases/nt
"""
short_name = "nt"
long_name = "The Nucleotide database (NCBI)"
ftp_url = "ftp.ncbi.nlm.nih.gov"
ftp_dir = "/blast/db/"
pattern = 'nt.*.tar.gz'
def test(self):
"""Search one sequence, and see if it works."""
# New directory #
directory = new_temp_dir()
# A randomly chosen sequence (Homo sapiens mRNA for prepro cortistatin) #
seq = """ACAAGATGCCATTGTCCCCCGGCCTCCTGCTGCTGCTGCTCTCCGGGGCCACGGCCACCGCTGCCCTGCC
CCTGGAGGGTGGCCCCACCGGCCGAGACAGCGAGCATATGCAGGAAGCGGCAGGAATAAGGAAAAGCAGC
CTCCTGACTTTCCTCGCTTGGTGGTTTGAGTGGACCTCCCAGGCCAGTGCCGGGCCCCTCATAGGAGAGG
AAGCTCGGGAGGTGGCCAGGCGGCAGGAAGGCGCACCCCCCCAGCAATCCGCGCGCCGGGACAGAATGCC
CTGCAGGAACTTCTTCTGGAAGACCTTCTCCTCCTGCAAATAAAACCTCACCCATGAATGCTCACGCAAG
TTTAATTACAGACCTGAA"""
seq = seq.replace('\n','')
seq = seq.replace(' ','')
# Make input #
input_fasta = FASTA(directory + 'input.fasta')
input_fasta.create()
input_fasta.add_str(seq, "My test sequence")
input_fasta.close()
# Make output #
out_path = directory + 'output.blast'
# Make extras parameters #
params = {'-outfmt': 0,
'-evalue': 1e-5,
'-perc_identity': 99}
# Make the search #
search = SeqSearch(input_fasta,
self.blast_db,
'nucl',
'blast',
num_threads = 1,
out_path = out_path,
params = params)
# Run it #
search.run()
# Print result #
print("Success", directory)
###############################################################################
nt = NucleotideDatabase("nucl") | xapple/seqsearch | seqsearch/databases/nt.py | Python | mit | 2,601 | [
"BLAST"
] | a888a0111cc61ac18db074ea6b5c007eba05e02a487a217f6b35cb8fc8c8fcc6 |
#
# The Python Imaging Library.
# $Id$
#
# the Image class wrapper
#
# partial release history:
# 1995-09-09 fl Created
# 1996-03-11 fl PIL release 0.0 (proof of concept)
# 1996-04-30 fl PIL release 0.1b1
# 1999-07-28 fl PIL release 1.0 final
# 2000-06-07 fl PIL release 1.1
# 2000-10-20 fl PIL release 1.1.1
# 2001-05-07 fl PIL release 1.1.2
# 2002-03-15 fl PIL release 1.1.3
# 2003-05-10 fl PIL release 1.1.4
# 2005-03-28 fl PIL release 1.1.5
# 2006-12-02 fl PIL release 1.1.6
# 2009-11-15 fl PIL release 1.1.7
#
# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved.
# Copyright (c) 1995-2009 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from PIL import VERSION, PILLOW_VERSION, _plugins
import logging
import warnings
logger = logging.getLogger(__name__)
class DecompressionBombWarning(RuntimeWarning):
pass
class _imaging_not_installed(object):
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imaging C module is not installed")
# Limit to around a quarter gigabyte for a 24 bit (3 bpp) image
MAX_IMAGE_PIXELS = int(1024 * 1024 * 1024 / 4 / 3)
try:
# give Tk a chance to set up the environment, in case we're
# using an _imaging module linked against libtcl/libtk (use
# __import__ to hide this from naive packagers; we don't really
# depend on Tk unless ImageTk is used, and that module already
# imports Tkinter)
__import__("FixTk")
except ImportError:
pass
try:
# If the _imaging C module is not present, Pillow will not load.
# Note that other modules should not refer to _imaging directly;
# import Image and use the Image.core variable instead.
# Also note that Image.core is not a publicly documented interface,
# and should be considered private and subject to change.
from PIL import _imaging as core
if PILLOW_VERSION != getattr(core, 'PILLOW_VERSION', None):
raise ImportError("The _imaging extension was built for another "
" version of Pillow or PIL")
except ImportError as v:
core = _imaging_not_installed()
# Explanations for ways that we know we might have an import error
if str(v).startswith("Module use of python"):
# The _imaging C module is present, but not compiled for
# the right version (windows only). Print a warning, if
# possible.
warnings.warn(
"The _imaging extension was built for another version "
"of Python.",
RuntimeWarning
)
elif str(v).startswith("The _imaging extension"):
warnings.warn(str(v), RuntimeWarning)
elif "Symbol not found: _PyUnicodeUCS2_FromString" in str(v):
warnings.warn(
"The _imaging extension was built for Python with UCS2 support; "
"recompile PIL or build Python --without-wide-unicode. ",
RuntimeWarning
)
elif "Symbol not found: _PyUnicodeUCS4_FromString" in str(v):
warnings.warn(
"The _imaging extension was built for Python with UCS4 support; "
"recompile PIL or build Python --with-wide-unicode. ",
RuntimeWarning
)
# Fail here anyway. Don't let people run with a mostly broken Pillow.
# see docs/porting-pil-to-pillow.rst
raise
try:
import builtins
except ImportError:
import __builtin__
builtins = __builtin__
from PIL import ImageMode
from PIL._binary import i8
from PIL._util import isPath
from PIL._util import isStringType
from PIL._util import deferred_error
import os
import sys
import io
import struct
# type stuff
import collections
import numbers
# works everywhere, win for pypy, not cpython
USE_CFFI_ACCESS = hasattr(sys, 'pypy_version_info')
try:
import cffi
HAS_CFFI = True
except ImportError:
HAS_CFFI = False
def isImageType(t):
"""
Checks if an object is an image object.
.. warning::
This function is for internal use only.
:param t: object to check if it's an image
:returns: True if the object is an image
"""
return hasattr(t, "im")
#
# Constants (also defined in _imagingmodule.c!)
NONE = 0
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
ROTATE_90 = 2
ROTATE_180 = 3
ROTATE_270 = 4
TRANSPOSE = 5
# transforms
AFFINE = 0
EXTENT = 1
PERSPECTIVE = 2
QUAD = 3
MESH = 4
# resampling filters
NEAREST = NONE = 0
LANCZOS = ANTIALIAS = 1
BILINEAR = LINEAR = 2
BICUBIC = CUBIC = 3
# dithers
NONE = 0
NEAREST = 0
ORDERED = 1 # Not yet implemented
RASTERIZE = 2 # Not yet implemented
FLOYDSTEINBERG = 3 # default
# palettes/quantizers
WEB = 0
ADAPTIVE = 1
MEDIANCUT = 0
MAXCOVERAGE = 1
FASTOCTREE = 2
# categories
NORMAL = 0
SEQUENCE = 1
CONTAINER = 2
if hasattr(core, 'DEFAULT_STRATEGY'):
DEFAULT_STRATEGY = core.DEFAULT_STRATEGY
FILTERED = core.FILTERED
HUFFMAN_ONLY = core.HUFFMAN_ONLY
RLE = core.RLE
FIXED = core.FIXED
# --------------------------------------------------------------------
# Registries
ID = []
OPEN = {}
MIME = {}
SAVE = {}
SAVE_ALL = {}
EXTENSION = {}
# --------------------------------------------------------------------
# Modes supported by this version
_MODEINFO = {
# NOTE: this table will be removed in future versions. use
# getmode* functions or ImageMode descriptors instead.
# official modes
"1": ("L", "L", ("1",)),
"L": ("L", "L", ("L",)),
"I": ("L", "I", ("I",)),
"F": ("L", "F", ("F",)),
"P": ("RGB", "L", ("P",)),
"RGB": ("RGB", "L", ("R", "G", "B")),
"RGBX": ("RGB", "L", ("R", "G", "B", "X")),
"RGBA": ("RGB", "L", ("R", "G", "B", "A")),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K")),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")),
"LAB": ("RGB", "L", ("L", "A", "B")),
"HSV": ("RGB", "L", ("H", "S", "V")),
# Experimental modes include I;16, I;16L, I;16B, RGBa, BGR;15, and
# BGR;24. Use these modes only if you know exactly what you're
# doing...
}
if sys.byteorder == 'little':
_ENDIAN = '<'
else:
_ENDIAN = '>'
_MODE_CONV = {
# official modes
"1": ('|b1', None), # broken
"L": ('|u1', None),
"I": (_ENDIAN + 'i4', None),
"F": (_ENDIAN + 'f4', None),
"P": ('|u1', None),
"RGB": ('|u1', 3),
"RGBX": ('|u1', 4),
"RGBA": ('|u1', 4),
"CMYK": ('|u1', 4),
"YCbCr": ('|u1', 3),
"LAB": ('|u1', 3), # UNDONE - unsigned |u1i1i1
# I;16 == I;16L, and I;32 == I;32L
"I;16": ('<u2', None),
"I;16B": ('>u2', None),
"I;16L": ('<u2', None),
"I;16S": ('<i2', None),
"I;16BS": ('>i2', None),
"I;16LS": ('<i2', None),
"I;32": ('<u4', None),
"I;32B": ('>u4', None),
"I;32L": ('<u4', None),
"I;32S": ('<i4', None),
"I;32BS": ('>i4', None),
"I;32LS": ('<i4', None),
}
def _conv_type_shape(im):
shape = im.size[1], im.size[0]
typ, extra = _MODE_CONV[im.mode]
if extra is None:
return shape, typ
else:
return shape+(extra,), typ
MODES = sorted(_MODEINFO.keys())
# raw modes that may be memory mapped. NOTE: if you change this, you
# may have to modify the stride calculation in map.c too!
_MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "I;16", "I;16L", "I;16B")
def getmodebase(mode):
"""
Gets the "base" mode for given mode. This function returns "L" for
images that contain grayscale data, and "RGB" for images that
contain color data.
:param mode: Input mode.
:returns: "L" or "RGB".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).basemode
def getmodetype(mode):
"""
Gets the storage type mode. Given a mode, this function returns a
single-layer mode suitable for storing individual bands.
:param mode: Input mode.
:returns: "L", "I", or "F".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).basetype
def getmodebandnames(mode):
"""
Gets a list of individual band names. Given a mode, this function returns
a tuple containing the names of individual bands (use
:py:method:`~PIL.Image.getmodetype` to get the mode used to store each
individual band.
:param mode: Input mode.
:returns: A tuple containing band names. The length of the tuple
gives the number of bands in an image of the given mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).bands
def getmodebands(mode):
"""
Gets the number of individual bands for this mode.
:param mode: Input mode.
:returns: The number of bands in this mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return len(ImageMode.getmode(mode).bands)
# --------------------------------------------------------------------
# Helpers
_initialized = 0
def preinit():
"Explicitly load standard file format drivers."
global _initialized
if _initialized >= 1:
return
try:
from PIL import BmpImagePlugin
except ImportError:
pass
try:
from PIL import GifImagePlugin
except ImportError:
pass
try:
from PIL import JpegImagePlugin
except ImportError:
pass
try:
from PIL import PpmImagePlugin
except ImportError:
pass
try:
from PIL import PngImagePlugin
except ImportError:
pass
# try:
# import TiffImagePlugin
# except ImportError:
# pass
_initialized = 1
def init():
"""
Explicitly initializes the Python Imaging Library. This function
loads all available file format drivers.
"""
global _initialized
if _initialized >= 2:
return 0
for plugin in _plugins:
try:
logger.debug("Importing %s", plugin)
__import__("PIL.%s" % plugin, globals(), locals(), [])
except ImportError as e:
logger.debug("Image: failed to import %s: %s", plugin, e)
if OPEN or SAVE:
_initialized = 2
return 1
# --------------------------------------------------------------------
# Codec factories (used by tobytes/frombytes and ImageFile.load)
def _getdecoder(mode, decoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
try:
# get decoder
decoder = getattr(core, decoder_name + "_decoder")
# print(decoder, mode, args + extra)
return decoder(mode, *args + extra)
except AttributeError:
raise IOError("decoder %s not available" % decoder_name)
def _getencoder(mode, encoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
try:
# get encoder
encoder = getattr(core, encoder_name + "_encoder")
# print(encoder, mode, args + extra)
return encoder(mode, *args + extra)
except AttributeError:
raise IOError("encoder %s not available" % encoder_name)
# --------------------------------------------------------------------
# Simple expression analyzer
def coerce_e(value):
return value if isinstance(value, _E) else _E(value)
class _E(object):
def __init__(self, data):
self.data = data
def __add__(self, other):
return _E((self.data, "__add__", coerce_e(other).data))
def __mul__(self, other):
return _E((self.data, "__mul__", coerce_e(other).data))
def _getscaleoffset(expr):
stub = ["stub"]
data = expr(_E(stub)).data
try:
(a, b, c) = data # simplified syntax
if (a is stub and b == "__mul__" and isinstance(c, numbers.Number)):
return c, 0.0
if a is stub and b == "__add__" and isinstance(c, numbers.Number):
return 1.0, c
except TypeError:
pass
try:
((a, b, c), d, e) = data # full syntax
if (a is stub and b == "__mul__" and isinstance(c, numbers.Number) and
d == "__add__" and isinstance(e, numbers.Number)):
return c, e
except TypeError:
pass
raise ValueError("illegal expression")
# --------------------------------------------------------------------
# Implementation wrapper
class Image(object):
"""
This class represents an image object. To create
:py:class:`~PIL.Image.Image` objects, use the appropriate factory
functions. There's hardly ever any reason to call the Image constructor
directly.
* :py:func:`~PIL.Image.open`
* :py:func:`~PIL.Image.new`
* :py:func:`~PIL.Image.frombytes`
"""
format = None
format_description = None
def __init__(self):
# FIXME: take "new" parameters / other image?
# FIXME: turn mode and size into delegating properties?
self.im = None
self.mode = ""
self.size = (0, 0)
self.palette = None
self.info = {}
self.category = NORMAL
self.readonly = 0
self.pyaccess = None
@property
def width(self):
return self.size[0]
@property
def height(self):
return self.size[1]
def _new(self, im):
new = Image()
new.im = im
new.mode = im.mode
new.size = im.size
if self.palette:
new.palette = self.palette.copy()
if im.mode == "P" and not new.palette:
from PIL import ImagePalette
new.palette = ImagePalette.ImagePalette()
try:
new.info = self.info.copy()
except AttributeError:
# fallback (pre-1.5.2)
new.info = {}
for k, v in self.info:
new.info[k] = v
return new
_makeself = _new # compatibility
# Context Manager Support
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
"""
Closes the file pointer, if possible.
This operation will destroy the image core and release its memory.
The image data will be unusable afterward.
This function is only required to close images that have not
had their file read and closed by the
:py:meth:`~PIL.Image.Image.load` method.
"""
try:
self.fp.close()
except Exception as msg:
logger.debug("Error closing: %s" % msg)
# Instead of simply setting to None, we're setting up a
# deferred error that will better explain that the core image
# object is gone.
self.im = deferred_error(ValueError("Operation on closed image"))
def _copy(self):
self.load()
self.im = self.im.copy()
self.pyaccess = None
self.readonly = 0
def _dump(self, file=None, format=None):
import tempfile
suffix = ''
if format:
suffix = '.'+format
if not file:
f, file = tempfile.mkstemp(suffix)
os.close(f)
self.load()
if not format or format == "PPM":
self.im.save_ppm(file)
else:
if not file.endswith(format):
file = file + "." + format
self.save(file, format)
return file
def __eq__(self, other):
if self.__class__.__name__ != other.__class__.__name__:
return False
a = (self.mode == other.mode)
b = (self.size == other.size)
c = (self.getpalette() == other.getpalette())
d = (self.info == other.info)
e = (self.category == other.category)
f = (self.readonly == other.readonly)
g = (self.tobytes() == other.tobytes())
return a and b and c and d and e and f and g
def __ne__(self, other):
eq = (self == other)
return not eq
def __repr__(self):
return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % (
self.__class__.__module__, self.__class__.__name__,
self.mode, self.size[0], self.size[1],
id(self)
)
def _repr_png_(self):
""" iPython display hook support
:returns: png version of the image as bytes
"""
from io import BytesIO
b = BytesIO()
self.save(b, 'PNG')
return b.getvalue()
def __getattr__(self, name):
if name == "__array_interface__":
# numpy array interface support
new = {}
shape, typestr = _conv_type_shape(self)
new['shape'] = shape
new['typestr'] = typestr
new['data'] = self.tobytes()
return new
raise AttributeError(name)
def __getstate__(self):
return [
self.info,
self.mode,
self.size,
self.getpalette(),
self.tobytes()]
def __setstate__(self, state):
Image.__init__(self)
self.tile = []
info, mode, size, palette, data = state
self.info = info
self.mode = mode
self.size = size
self.im = core.new(mode, size)
if mode in ("L", "P") and palette:
self.putpalette(palette)
self.frombytes(data)
def tobytes(self, encoder_name="raw", *args):
"""
Return image as a bytes object
:param encoder_name: What encoder to use. The default is to
use the standard "raw" encoder.
:param args: Extra arguments to the encoder.
:rtype: A bytes object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if encoder_name == "raw" and args == ():
args = self.mode
self.load()
# unpack data
e = _getencoder(self.mode, encoder_name, args)
e.setimage(self.im)
bufsize = max(65536, self.size[0] * 4) # see RawEncode.c
data = []
while True:
l, s, d = e.encode(bufsize)
data.append(d)
if s:
break
if s < 0:
raise RuntimeError("encoder error %d in tobytes" % s)
return b"".join(data)
def tostring(self, *args, **kw):
raise Exception("tostring() has been removed. " +
"Please call tobytes() instead.")
def tobitmap(self, name="image"):
"""
Returns the image converted to an X11 bitmap.
.. note:: This method only works for mode "1" images.
:param name: The name prefix to use for the bitmap variables.
:returns: A string containing an X11 bitmap.
:raises ValueError: If the mode is not "1"
"""
self.load()
if self.mode != "1":
raise ValueError("not a bitmap")
data = self.tobytes("xbm")
return b"".join([
("#define %s_width %d\n" % (name, self.size[0])).encode('ascii'),
("#define %s_height %d\n" % (name, self.size[1])).encode('ascii'),
("static char %s_bits[] = {\n" % name).encode('ascii'), data, b"};"
])
def frombytes(self, data, decoder_name="raw", *args):
"""
Loads this image with pixel data from a bytes object.
This method is similar to the :py:func:`~PIL.Image.frombytes` function,
but loads data into this image instead of creating a new image object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
def fromstring(self, *args, **kw):
raise Exception("fromstring() has been removed. " +
"Please call frombytes() instead.")
def load(self):
"""
Allocates storage for the image and loads the pixel data. In
normal cases, you don't need to call this method, since the
Image class automatically loads an opened image when it is
accessed for the first time. This method will close the file
associated with the image.
:returns: An image access object.
:rtype: :ref:`PixelAccess` or :py:class:`PIL.PyAccess`
"""
if self.im and self.palette and self.palette.dirty:
# realize palette
self.im.putpalette(*self.palette.getdata())
self.palette.dirty = 0
self.palette.mode = "RGB"
self.palette.rawmode = None
if "transparency" in self.info:
if isinstance(self.info["transparency"], int):
self.im.putpalettealpha(self.info["transparency"], 0)
else:
self.im.putpalettealphas(self.info["transparency"])
self.palette.mode = "RGBA"
if self.im:
if HAS_CFFI and USE_CFFI_ACCESS:
if self.pyaccess:
return self.pyaccess
from PIL import PyAccess
self.pyaccess = PyAccess.new(self, self.readonly)
if self.pyaccess:
return self.pyaccess
return self.im.pixel_access(self.readonly)
def verify(self):
"""
Verifies the contents of a file. For data read from a file, this
method attempts to determine if the file is broken, without
actually decoding the image data. If this method finds any
problems, it raises suitable exceptions. If you need to load
the image after using this method, you must reopen the image
file.
"""
pass
def convert(self, mode=None, matrix=None, dither=None,
palette=WEB, colors=256):
"""
Returns a converted copy of this image. For the "P" mode, this
method translates pixels through the palette. If mode is
omitted, a mode is chosen so that all information in the image
and the palette can be represented without a palette.
The current version supports all possible conversions between
"L", "RGB" and "CMYK." The **matrix** argument only supports "L"
and "RGB".
When translating a color image to black and white (mode "L"),
the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
The default method of converting a greyscale ("L") or "RGB"
image into a bilevel (mode "1") image uses Floyd-Steinberg
dither to approximate the original image luminosity levels. If
dither is NONE, all non-zero values are set to 255 (white). To
use other thresholds, use the :py:meth:`~PIL.Image.Image.point`
method.
:param mode: The requested mode. See: :ref:`concept-modes`.
:param matrix: An optional conversion matrix. If given, this
should be 4- or 12-tuple containing floating point values.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are NONE or FLOYDSTEINBERG (default).
:param palette: Palette to use when converting from mode "RGB"
to "P". Available palettes are WEB or ADAPTIVE.
:param colors: Number of colors to use for the ADAPTIVE palette.
Defaults to 256.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if not mode:
# determine default mode
if self.mode == "P":
self.load()
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
else:
return self.copy()
self.load()
if matrix:
# matrix conversion
if mode not in ("L", "RGB"):
raise ValueError("illegal conversion")
im = self.im.convert_matrix(mode, matrix)
return self._new(im)
if mode == "P" and self.mode == "RGBA":
return self.quantize(colors)
trns = None
delete_trns = False
# transparency handling
if "transparency" in self.info and \
self.info['transparency'] is not None:
if self.mode in ('L', 'RGB') and mode == 'RGBA':
# Use transparent conversion to promote from transparent
# color to an alpha channel.
return self._new(self.im.convert_transparent(
mode, self.info['transparency']))
elif self.mode in ('L', 'RGB', 'P') and mode in ('L', 'RGB', 'P'):
t = self.info['transparency']
if isinstance(t, bytes):
# Dragons. This can't be represented by a single color
warnings.warn('Palette images with Transparency ' +
' expressed in bytes should be converted ' +
'to RGBA images')
delete_trns = True
else:
# get the new transparency color.
# use existing conversions
trns_im = Image()._new(core.new(self.mode, (1, 1)))
if self.mode == 'P':
trns_im.putpalette(self.palette)
trns_im.putpixel((0, 0), t)
if mode in ('L', 'RGB'):
trns_im = trns_im.convert(mode)
else:
# can't just retrieve the palette number, got to do it
# after quantization.
trns_im = trns_im.convert('RGB')
trns = trns_im.getpixel((0, 0))
elif self.mode == 'P' and mode == 'RGBA':
t = self.info['transparency']
delete_trns = True
if isinstance(t, bytes):
self.im.putpalettealphas(t)
elif isinstance(t, int):
self.im.putpalettealpha(t, 0)
else:
raise ValueError("Transparency for P mode should" +
" be bytes or int")
if mode == "P" and palette == ADAPTIVE:
im = self.im.quantize(colors)
new = self._new(im)
from PIL import ImagePalette
new.palette = ImagePalette.raw("RGB", new.im.getpalette("RGB"))
if delete_trns:
# This could possibly happen if we requantize to fewer colors.
# The transparency would be totally off in that case.
del(new.info['transparency'])
if trns is not None:
try:
new.info['transparency'] = new.palette.getcolor(trns)
except:
# if we can't make a transparent color, don't leave the old
# transparency hanging around to mess us up.
del(new.info['transparency'])
warnings.warn("Couldn't allocate palette entry " +
"for transparency")
return new
# colorspace conversion
if dither is None:
dither = FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
im = self.im.convert(getmodebase(self.mode))
im = im.convert(mode, dither)
except KeyError:
raise ValueError("illegal conversion")
new_im = self._new(im)
if delete_trns:
# crash fail if we leave a bytes transparency in an rgb/l mode.
del(new_im.info['transparency'])
if trns is not None:
if new_im.mode == 'P':
try:
new_im.info['transparency'] = new_im.palette.getcolor(trns)
except:
del(new_im.info['transparency'])
warnings.warn("Couldn't allocate palette entry " +
"for transparency")
else:
new_im.info['transparency'] = trns
return new_im
def quantize(self, colors=256, method=None, kmeans=0, palette=None):
"""
Convert the image to 'P' mode with the specified number
of colors.
:param colors: The desired number of colors, <= 256
:param method: 0 = median cut
1 = maximum coverage
2 = fast octree
:param kmeans: Integer
:param palette: Quantize to the :py:class:`PIL.ImagingPalette` palette.
:returns: A new image
"""
self.load()
if method is None:
# defaults:
method = 0
if self.mode == 'RGBA':
method = 2
if self.mode == 'RGBA' and method != 2:
# Caller specified an invalid mode.
raise ValueError('Fast Octree (method == 2) is the ' +
' only valid method for quantizing RGBA images')
if palette:
# use palette from reference image
palette.load()
if palette.mode != "P":
raise ValueError("bad mode for palette image")
if self.mode != "RGB" and self.mode != "L":
raise ValueError(
"only RGB or L mode images can be quantized to a palette"
)
im = self.im.convert("P", 1, palette.im)
return self._makeself(im)
im = self.im.quantize(colors, method, kmeans)
return self._new(im)
def copy(self):
"""
Copies this image. Use this method if you wish to paste things
into an image, but still retain the original.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
im = self.im.copy()
return self._new(im)
def crop(self, box=None):
"""
Returns a rectangular region from this image. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
This is a lazy operation. Changes to the source image may or
may not be reflected in the cropped image. To break the
connection, call the :py:meth:`~PIL.Image.Image.load` method on
the cropped copy.
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if box is None:
return self.copy()
# lazy operation
return _ImageCrop(self, box)
def draft(self, mode, size):
"""
Configures the image file loader so it returns a version of the
image that as closely as possible matches the given mode and
size. For example, you can use this method to convert a color
JPEG to greyscale while loading it, or to extract a 128x192
version from a PCD file.
Note that this method modifies the :py:class:`~PIL.Image.Image` object
in place. If the image has already been loaded, this method has no
effect.
:param mode: The requested mode.
:param size: The requested size.
"""
pass
def _expand(self, xmargin, ymargin=None):
if ymargin is None:
ymargin = xmargin
self.load()
return self._new(self.im.expand(xmargin, ymargin, 0))
def filter(self, filter):
"""
Filters this image using the given filter. For a list of
available filters, see the :py:mod:`~PIL.ImageFilter` module.
:param filter: Filter kernel.
:returns: An :py:class:`~PIL.Image.Image` object. """
self.load()
if isinstance(filter, collections.Callable):
filter = filter()
if not hasattr(filter, "filter"):
raise TypeError("filter argument should be ImageFilter.Filter " +
"instance or class")
if self.im.bands == 1:
return self._new(filter.filter(self.im))
# fix to handle multiband images since _imaging doesn't
ims = []
for c in range(self.im.bands):
ims.append(self._new(filter.filter(self.im.getband(c))))
return merge(self.mode, ims)
def getbands(self):
"""
Returns a tuple containing the name of each band in this image.
For example, **getbands** on an RGB image returns ("R", "G", "B").
:returns: A tuple containing band names.
:rtype: tuple
"""
return ImageMode.getmode(self.mode).bands
def getbbox(self):
"""
Calculates the bounding box of the non-zero regions in the
image.
:returns: The bounding box is returned as a 4-tuple defining the
left, upper, right, and lower pixel coordinate. If the image
is completely empty, this method returns None.
"""
self.load()
return self.im.getbbox()
def getcolors(self, maxcolors=256):
"""
Returns a list of colors used in this image.
:param maxcolors: Maximum number of colors. If this number is
exceeded, this method returns None. The default limit is
256 colors.
:returns: An unsorted list of (count, pixel) values.
"""
self.load()
if self.mode in ("1", "L", "P"):
h = self.im.histogram()
out = []
for i in range(256):
if h[i]:
out.append((h[i], i))
if len(out) > maxcolors:
return None
return out
return self.im.getcolors(maxcolors)
def getdata(self, band=None):
"""
Returns the contents of this image as a sequence object
containing pixel values. The sequence object is flattened, so
that values for line one follow directly after the values of
line zero, and so on.
Note that the sequence object returned by this method is an
internal PIL data type, which only supports certain sequence
operations. To convert it to an ordinary sequence (e.g. for
printing), use **list(im.getdata())**.
:param band: What band to return. The default is to return
all bands. To return a single band, pass in the index
value (e.g. 0 to get the "R" band from an "RGB" image).
:returns: A sequence-like object.
"""
self.load()
if band is not None:
return self.im.getband(band)
return self.im # could be abused
def getextrema(self):
"""
Gets the the minimum and maximum pixel values for each band in
the image.
:returns: For a single-band image, a 2-tuple containing the
minimum and maximum pixel value. For a multi-band image,
a tuple containing one 2-tuple for each band.
"""
self.load()
if self.im.bands > 1:
extrema = []
for i in range(self.im.bands):
extrema.append(self.im.getband(i).getextrema())
return tuple(extrema)
return self.im.getextrema()
def getim(self):
"""
Returns a capsule that points to the internal image memory.
:returns: A capsule object.
"""
self.load()
return self.im.ptr
def getpalette(self):
"""
Returns the image palette as a list.
:returns: A list of color values [r, g, b, ...], or None if the
image has no palette.
"""
self.load()
try:
if bytes is str:
return [i8(c) for c in self.im.getpalette()]
else:
return list(self.im.getpalette())
except ValueError:
return None # no palette
def getpixel(self, xy):
"""
Returns the pixel value at a given position.
:param xy: The coordinate, given as (x, y).
:returns: The pixel value. If the image is a multi-layer image,
this method returns a tuple.
"""
self.load()
if self.pyaccess:
return self.pyaccess.getpixel(xy)
return self.im.getpixel(xy)
def getprojection(self):
"""
Get projection to x and y axes
:returns: Two sequences, indicating where there are non-zero
pixels along the X-axis and the Y-axis, respectively.
"""
self.load()
x, y = self.im.getprojection()
return [i8(c) for c in x], [i8(c) for c in y]
def histogram(self, mask=None, extrema=None):
"""
Returns a histogram for the image. The histogram is returned as
a list of pixel counts, one for each pixel value in the source
image. If the image has more than one band, the histograms for
all bands are concatenated (for example, the histogram for an
"RGB" image contains 768 values).
A bilevel image (mode "1") is treated as a greyscale ("L") image
by this method.
If a mask is provided, the method returns a histogram for those
parts of the image where the mask image is non-zero. The mask
image must have the same size as the image, and be either a
bi-level image (mode "1") or a greyscale image ("L").
:param mask: An optional mask.
:returns: A list containing pixel counts.
"""
self.load()
if mask:
mask.load()
return self.im.histogram((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.histogram(extrema)
return self.im.histogram()
def offset(self, xoffset, yoffset=None):
raise Exception("offset() has been removed. " +
"Please call ImageChops.offset() instead.")
def paste(self, im, box=None, mask=None):
"""
Pastes another image into this image. The box argument is either
a 2-tuple giving the upper left corner, a 4-tuple defining the
left, upper, right, and lower pixel coordinate, or None (same as
(0, 0)). If a 4-tuple is given, the size of the pasted image
must match the size of the region.
If the modes don't match, the pasted image is converted to the mode of
this image (see the :py:meth:`~PIL.Image.Image.convert` method for
details).
Instead of an image, the source can be a integer or tuple
containing pixel values. The method then fills the region
with the given color. When creating RGB images, you can
also use color strings as supported by the ImageColor module.
If a mask is given, this method updates only the regions
indicated by the mask. You can use either "1", "L" or "RGBA"
images (in the latter case, the alpha band is used as mask).
Where the mask is 255, the given image is copied as is. Where
the mask is 0, the current value is preserved. Intermediate
values will mix the two images together, including their alpha
channels if they have them.
See :py:meth:`~PIL.Image.Image.alpha_composite` if you want to
combine images with respect to their alpha channels.
:param im: Source image or pixel value (integer or tuple).
:param box: An optional 4-tuple giving the region to paste into.
If a 2-tuple is used instead, it's treated as the upper left
corner. If omitted or None, the source is pasted into the
upper left corner.
If an image is given as the second argument and there is no
third, the box defaults to (0, 0), and the second argument
is interpreted as a mask image.
:param mask: An optional mask image.
"""
if isImageType(box) and mask is None:
# abbreviated paste(im, mask) syntax
mask = box
box = None
if box is None:
# cover all of self
box = (0, 0) + self.size
if len(box) == 2:
# lower left corner given; get size from image or mask
if isImageType(im):
size = im.size
elif isImageType(mask):
size = mask.size
else:
# FIXME: use self.size here?
raise ValueError(
"cannot determine region size; use 4-item box"
)
box = box + (box[0]+size[0], box[1]+size[1])
if isStringType(im):
from PIL import ImageColor
im = ImageColor.getcolor(im, self.mode)
elif isImageType(im):
im.load()
if self.mode != im.mode:
if self.mode != "RGB" or im.mode not in ("RGBA", "RGBa"):
# should use an adapter for this!
im = im.convert(self.mode)
im = im.im
self.load()
if self.readonly:
self._copy()
if mask:
mask.load()
self.im.paste(im, box, mask.im)
else:
self.im.paste(im, box)
def point(self, lut, mode=None):
"""
Maps this image through a lookup table or function.
:param lut: A lookup table, containing 256 (or 65336 if
self.mode=="I" and mode == "L") values per band in the
image. A function can be used instead, it should take a
single argument. The function is called once for each
possible pixel value, and the resulting table is applied to
all bands of the image.
:param mode: Output mode (default is same as input). In the
current version, this can only be used if the source image
has mode "L" or "P", and the output has mode "1" or the
source image mode is "I" and the output mode is "L".
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if isinstance(lut, ImagePointHandler):
return lut.point(self)
if callable(lut):
# if it isn't a list, it should be a function
if self.mode in ("I", "I;16", "F"):
# check if the function can be used with point_transform
# UNDONE wiredfool -- I think this prevents us from ever doing
# a gamma function point transform on > 8bit images.
scale, offset = _getscaleoffset(lut)
return self._new(self.im.point_transform(scale, offset))
# for other modes, convert the function to a table
lut = [lut(i) for i in range(256)] * self.im.bands
if self.mode == "F":
# FIXME: _imaging returns a confusing error message for this case
raise ValueError("point operation not supported for this mode")
return self._new(self.im.point(lut, mode))
def putalpha(self, alpha):
"""
Adds or replaces the alpha layer in this image. If the image
does not have an alpha layer, it's converted to "LA" or "RGBA".
The new layer must be either "L" or "1".
:param alpha: The new alpha layer. This can either be an "L" or "1"
image having the same size as this image, or an integer or
other color value.
"""
self.load()
if self.readonly:
self._copy()
if self.mode not in ("LA", "RGBA"):
# attempt to promote self to a matching alpha mode
try:
mode = getmodebase(self.mode) + "A"
try:
self.im.setmode(mode)
self.pyaccess = None
except (AttributeError, ValueError):
# do things the hard way
im = self.im.convert(mode)
if im.mode not in ("LA", "RGBA"):
raise ValueError # sanity check
self.im = im
self.pyaccess = None
self.mode = self.im.mode
except (KeyError, ValueError):
raise ValueError("illegal image mode")
if self.mode == "LA":
band = 1
else:
band = 3
if isImageType(alpha):
# alpha layer
if alpha.mode not in ("1", "L"):
raise ValueError("illegal image mode")
alpha.load()
if alpha.mode == "1":
alpha = alpha.convert("L")
else:
# constant alpha
try:
self.im.fillband(band, alpha)
except (AttributeError, ValueError):
# do things the hard way
alpha = new("L", self.size, alpha)
else:
return
self.im.putband(alpha.im, band)
def putdata(self, data, scale=1.0, offset=0.0):
"""
Copies pixel data to this image. This method copies data from a
sequence object into the image, starting at the upper left
corner (0, 0), and continuing until either the image or the
sequence ends. The scale and offset values are used to adjust
the sequence values: **pixel = value*scale + offset**.
:param data: A sequence object.
:param scale: An optional scale value. The default is 1.0.
:param offset: An optional offset value. The default is 0.0.
"""
self.load()
if self.readonly:
self._copy()
self.im.putdata(data, scale, offset)
def putpalette(self, data, rawmode="RGB"):
"""
Attaches a palette to this image. The image must be a "P" or
"L" image, and the palette sequence must contain 768 integer
values, where each group of three values represent the red,
green, and blue values for the corresponding pixel
index. Instead of an integer sequence, you can use an 8-bit
string.
:param data: A palette sequence (either a list or a string).
"""
from PIL import ImagePalette
if self.mode not in ("L", "P"):
raise ValueError("illegal image mode")
self.load()
if isinstance(data, ImagePalette.ImagePalette):
palette = ImagePalette.raw(data.rawmode, data.palette)
else:
if not isinstance(data, bytes):
if bytes is str:
data = "".join(chr(x) for x in data)
else:
data = bytes(data)
palette = ImagePalette.raw(rawmode, data)
self.mode = "P"
self.palette = palette
self.palette.mode = "RGB"
self.load() # install new palette
def putpixel(self, xy, value):
"""
Modifies the pixel at the given position. The color is given as
a single numerical value for single-band images, and a tuple for
multi-band images.
Note that this method is relatively slow. For more extensive changes,
use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`
module instead.
See:
* :py:meth:`~PIL.Image.Image.paste`
* :py:meth:`~PIL.Image.Image.putdata`
* :py:mod:`~PIL.ImageDraw`
:param xy: The pixel coordinate, given as (x, y).
:param value: The pixel value.
"""
self.load()
if self.readonly:
self._copy()
self.pyaccess = None
self.load()
if self.pyaccess:
return self.pyaccess.putpixel(xy, value)
return self.im.putpixel(xy, value)
def resize(self, size, resample=NEAREST):
"""
Returns a resized copy of this image.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param resample: An optional resampling filter. This can be
one of :py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation),
:py:attr:`PIL.Image.BICUBIC` (cubic spline interpolation), or
:py:attr:`PIL.Image.LANCZOS` (a high-quality downsampling filter).
If omitted, or if the image has mode "1" or "P", it is
set :py:attr:`PIL.Image.NEAREST`.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if resample not in (NEAREST, BILINEAR, BICUBIC, LANCZOS):
raise ValueError("unknown resampling filter")
self.load()
size = tuple(size)
if self.size == size:
return self._new(self.im)
if self.mode in ("1", "P"):
resample = NEAREST
if self.mode == 'RGBA':
return self.convert('RGBa').resize(size, resample).convert('RGBA')
return self._new(self.im.resize(size, resample))
def rotate(self, angle, resample=NEAREST, expand=0):
"""
Returns a rotated copy of this image. This method returns a
copy of this image, rotated the given number of degrees counter
clockwise around its centre.
:param angle: In degrees counter clockwise.
:param resample: An optional resampling filter. This can be
one of :py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC`
(cubic spline interpolation in a 4x4 environment).
If omitted, or if the image has mode "1" or "P", it is
set :py:attr:`PIL.Image.NEAREST`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
input image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if expand:
import math
angle = -angle * math.pi / 180
matrix = [
math.cos(angle), math.sin(angle), 0.0,
-math.sin(angle), math.cos(angle), 0.0
]
def transform(x, y, matrix=matrix):
(a, b, c, d, e, f) = matrix
return a*x + b*y + c, d*x + e*y + f
# calculate output size
w, h = self.size
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
x, y = transform(x, y)
xx.append(x)
yy.append(y)
w = int(math.ceil(max(xx)) - math.floor(min(xx)))
h = int(math.ceil(max(yy)) - math.floor(min(yy)))
# adjust center
x, y = transform(w / 2.0, h / 2.0)
matrix[2] = self.size[0] / 2.0 - x
matrix[5] = self.size[1] / 2.0 - y
return self.transform((w, h), AFFINE, matrix, resample)
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
self.load()
if self.mode in ("1", "P"):
resample = NEAREST
return self._new(self.im.rotate(angle, resample, expand))
def save(self, fp, format=None, **params):
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described in the
:doc:`image format documentation
<../handbook/image-file-formats>` for each writer.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the ``seek``, ``tell``, and ``write``
methods, and be opened in binary mode.
:param fp: A filename (string), pathlib.Path object or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param options: Extra parameters to the image writer.
:returns: None
:exception KeyError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception IOError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
filename = ""
if isPath(fp):
filename = fp
elif sys.version_info >= (3, 4):
from pathlib import Path
if isinstance(fp, Path):
filename = str(fp.resolve())
elif hasattr(fp, "name") and isPath(fp.name):
filename = fp.name
# may mutate self!
self.load()
save_all = False
if 'save_all' in params:
save_all = params['save_all']
del params['save_all']
self.encoderinfo = params
self.encoderconfig = ()
preinit()
ext = os.path.splitext(filename)[1].lower()
if not format:
if ext not in EXTENSION:
init()
format = EXTENSION[ext]
if format.upper() not in SAVE:
init()
if save_all:
save_handler = SAVE_ALL[format.upper()]
else:
save_handler = SAVE[format.upper()]
if filename:
fp = builtins.open(filename, "wb")
close = 1
else:
close = 0
try:
save_handler(self, fp, filename)
finally:
# do what we can to clean up
if close:
fp.close()
def seek(self, frame):
"""
Seeks to the given frame in this sequence file. If you seek
beyond the end of the sequence, the method raises an
**EOFError** exception. When a sequence file is opened, the
library automatically seeks to frame 0.
Note that in the current version of the library, most sequence
formats only allows you to seek to the next frame.
See :py:meth:`~PIL.Image.Image.tell`.
:param frame: Frame number, starting at 0.
:exception EOFError: If the call attempts to seek beyond the end
of the sequence.
"""
# overridden by file handlers
if frame != 0:
raise EOFError
def show(self, title=None, command=None):
"""
Displays this image. This method is mainly intended for
debugging purposes.
On Unix platforms, this method saves the image to a temporary
PPM file, and calls the **xv** utility.
On Windows, it saves the image to a temporary BMP file, and uses
the standard BMP display utility to show it (usually Paint).
:param title: Optional title to use for the image window,
where possible.
:param command: command used to show the image
"""
_show(self, title=title, command=command)
def split(self):
"""
Split this image into individual bands. This method returns a
tuple of individual image bands from an image. For example,
splitting an "RGB" image creates three new images each
containing a copy of one of the original bands (red, green,
blue).
:returns: A tuple containing bands.
"""
self.load()
if self.im.bands == 1:
ims = [self.copy()]
else:
ims = []
for i in range(self.im.bands):
ims.append(self._new(self.im.getband(i)))
return tuple(ims)
def tell(self):
"""
Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`.
:returns: Frame number, starting with 0.
"""
return 0
def thumbnail(self, size, resample=BICUBIC):
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well,
apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original
image.
:param size: Requested size.
:param resample: Optional resampling filter. This can be one
of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BILINEAR`,
:py:attr:`PIL.Image.BICUBIC`, or :py:attr:`PIL.Image.LANCZOS`.
If omitted, it defaults to :py:attr:`PIL.Image.BICUBIC`.
(was :py:attr:`PIL.Image.NEAREST` prior to version 2.5.0)
:returns: None
"""
# preserve aspect ratio
x, y = self.size
if x > size[0]:
y = int(max(y * size[0] / x, 1))
x = int(size[0])
if y > size[1]:
x = int(max(x * size[1] / y, 1))
y = int(size[1])
size = x, y
if size == self.size:
return
self.draft(None, size)
im = self.resize(size, resample)
self.im = im.im
self.mode = im.mode
self.size = size
self.readonly = 0
self.pyaccess = None
# FIXME: the different transform methods need further explanation
# instead of bloating the method docs, add a separate chapter.
def transform(self, size, method, data=None, resample=NEAREST, fill=1):
"""
Transforms this image. This method creates a new image with the
given size, and the same mode as the original, and copies data
to the new image using the given transform.
:param size: The output size.
:param method: The transformation method. This is one of
:py:attr:`PIL.Image.EXTENT` (cut out a rectangular subregion),
:py:attr:`PIL.Image.AFFINE` (affine transform),
:py:attr:`PIL.Image.PERSPECTIVE` (perspective transform),
:py:attr:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or
:py:attr:`PIL.Image.MESH` (map a number of source quadrilaterals
in one operation).
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:attr:`PIL.Image.NEAREST`.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if self.mode == 'RGBA':
return self.convert('RGBa').transform(
size, method, data, resample, fill).convert('RGBA')
if isinstance(method, ImageTransformHandler):
return method.transform(size, self, resample=resample, fill=fill)
if hasattr(method, "getdata"):
# compatibility w. old-style transform objects
method, data = method.getdata()
if data is None:
raise ValueError("missing method data")
im = new(self.mode, size, None)
if method == MESH:
# list of quads
for box, quad in data:
im.__transformer(box, self, QUAD, quad, resample, fill)
else:
im.__transformer((0, 0)+size, self, method, data, resample, fill)
return im
def __transformer(self, box, image, method, data,
resample=NEAREST, fill=1):
# FIXME: this should be turned into a lazy operation (?)
w = box[2]-box[0]
h = box[3]-box[1]
if method == AFFINE:
# change argument order to match implementation
data = (data[2], data[0], data[1],
data[5], data[3], data[4])
elif method == EXTENT:
# convert extent to an affine transform
x0, y0, x1, y1 = data
xs = float(x1 - x0) / w
ys = float(y1 - y0) / h
method = AFFINE
data = (x0 + xs/2, xs, 0, y0 + ys/2, 0, ys)
elif method == PERSPECTIVE:
# change argument order to match implementation
data = (data[2], data[0], data[1],
data[5], data[3], data[4],
data[6], data[7])
elif method == QUAD:
# quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE.
nw = data[0:2]
sw = data[2:4]
se = data[4:6]
ne = data[6:8]
x0, y0 = nw
As = 1.0 / w
At = 1.0 / h
data = (x0, (ne[0]-x0)*As, (sw[0]-x0)*At,
(se[0]-sw[0]-ne[0]+x0)*As*At,
y0, (ne[1]-y0)*As, (sw[1]-y0)*At,
(se[1]-sw[1]-ne[1]+y0)*As*At)
else:
raise ValueError("unknown transformation method")
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
image.load()
self.load()
if image.mode in ("1", "P"):
resample = NEAREST
self.im.transform2(box, image.im, method, data, resample, fill)
def transpose(self, method):
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270` or
:py:attr:`PIL.Image.TRANSPOSE`.
:returns: Returns a flipped or rotated copy of this image.
"""
self.load()
return self._new(self.im.transpose(method))
def effect_spread(self, distance):
"""
Randomly spread pixels in an image.
:param distance: Distance to spread pixels.
"""
self.load()
im = self.im.effect_spread(distance)
return self._new(im)
def toqimage(self):
"""Returns a QImage copy of this image"""
from PIL import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.toqimage(self)
def toqpixmap(self):
"""Returns a QPixmap copy of this image"""
from PIL import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.toqpixmap(self)
# --------------------------------------------------------------------
# Lazy operations
class _ImageCrop(Image):
def __init__(self, im, box):
Image.__init__(self)
x0, y0, x1, y1 = box
if x1 < x0:
x1 = x0
if y1 < y0:
y1 = y0
self.mode = im.mode
self.size = x1-x0, y1-y0
self.__crop = x0, y0, x1, y1
self.im = im.im
def load(self):
# lazy evaluation!
if self.__crop:
self.im = self.im.crop(self.__crop)
self.__crop = None
if self.im:
return self.im.pixel_access(self.readonly)
# FIXME: future versions should optimize crop/paste
# sequences!
# --------------------------------------------------------------------
# Abstract handlers.
class ImagePointHandler(object):
# used as a mixin by point transforms (for use with im.point)
pass
class ImageTransformHandler(object):
# used as a mixin by geometry transforms (for use with im.transform)
pass
# --------------------------------------------------------------------
# Factories
#
# Debugging
def _wedge():
"Create greyscale wedge (for debugging only)"
return Image()._new(core.wedge("L"))
def new(mode, size, color=0):
"""
Creates a new image with the given mode and size.
:param mode: The mode to use for the new image. See:
:ref:`concept-modes`.
:param size: A 2-tuple, containing (width, height) in pixels.
:param color: What color to use for the image. Default is black.
If given, this should be a single integer or floating point value
for single-band modes, and a tuple for multi-band modes (one value
per band). When creating RGB images, you can also use color
strings as supported by the ImageColor module. If the color is
None, the image is not initialised.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if color is None:
# don't initialize
return Image()._new(core.new(mode, size))
if isStringType(color):
# css3-style specifier
from PIL import ImageColor
color = ImageColor.getcolor(color, mode)
return Image()._new(core.fill(mode, size, color))
def frombytes(mode, size, data, decoder_name="raw", *args):
"""
Creates a copy of an image memory from pixel data in a buffer.
In its simplest form, this function takes three arguments
(mode, size, and unpacked pixel data).
You can also use any pixel decoder supported by PIL. For more
information on available decoders, see the section
:ref:`Writing Your Own File Decoder <file-decoders>`.
Note that this function decodes pixel data only, not entire images.
If you have an entire image in a string, wrap it in a
:py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load
it.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A byte buffer containing raw data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw" and args == ():
args = mode
im = new(mode, size)
im.frombytes(data, decoder_name, args)
return im
def fromstring(*args, **kw):
raise Exception("fromstring() has been removed. " +
"Please call frombytes() instead.")
def frombuffer(mode, size, data, decoder_name="raw", *args):
"""
Creates an image memory referencing pixel data in a byte buffer.
This function is similar to :py:func:`~PIL.Image.frombytes`, but uses data
in the byte buffer, where possible. This means that changes to the
original buffer object are reflected in this image). Not all modes can
share memory; supported modes include "L", "RGBX", "RGBA", and "CMYK".
Note that this function decodes pixel data only, not entire images.
If you have an entire image file in a string, wrap it in a
**BytesIO** object, and use :py:func:`~PIL.Image.open` to load it.
In the current version, the default parameters used for the "raw" decoder
differs from that used for :py:func:`~PIL.Image.fromstring`. This is a
bug, and will probably be fixed in a future release. The current release
issues a warning if you do this; to disable the warning, you should provide
the full set of parameters. See below for details.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A bytes or other buffer object containing raw
data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder. For the
default encoder ("raw"), it's recommended that you provide the
full set of parameters::
frombuffer(mode, size, data, "raw", mode, 0, 1)
:returns: An :py:class:`~PIL.Image.Image` object.
.. versionadded:: 1.1.4
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw":
if args == ():
if warnings:
warnings.warn(
"the frombuffer defaults may change in a future release; "
"for portability, change the call to read:\n"
" frombuffer(mode, size, data, 'raw', mode, 0, 1)",
RuntimeWarning, stacklevel=2
)
args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6
if args[0] in _MAPMODES:
im = new(mode, (1, 1))
im = im._new(
core.map_buffer(data, size, decoder_name, None, 0, args)
)
im.readonly = 1
return im
return frombytes(mode, size, data, decoder_name, args)
def fromarray(obj, mode=None):
"""
Creates an image memory from an object exporting the array interface
(using the buffer protocol).
If obj is not contiguous, then the tobytes method is called
and :py:func:`~PIL.Image.frombuffer` is used.
:param obj: Object with array interface
:param mode: Mode to use (will be determined from type if None)
See: :ref:`concept-modes`.
:returns: An image object.
.. versionadded:: 1.1.6
"""
arr = obj.__array_interface__
shape = arr['shape']
ndim = len(shape)
try:
strides = arr['strides']
except KeyError:
strides = None
if mode is None:
try:
typekey = (1, 1) + shape[2:], arr['typestr']
mode, rawmode = _fromarray_typemap[typekey]
except KeyError:
# print typekey
raise TypeError("Cannot handle this data type")
else:
rawmode = mode
if mode in ["1", "L", "I", "P", "F"]:
ndmax = 2
elif mode == "RGB":
ndmax = 3
else:
ndmax = 4
if ndim > ndmax:
raise ValueError("Too many dimensions: %d > %d." % (ndim, ndmax))
size = shape[1], shape[0]
if strides is not None:
if hasattr(obj, 'tobytes'):
obj = obj.tobytes()
else:
obj = obj.tostring()
return frombuffer(mode, size, obj, "raw", rawmode, 0, 1)
def fromqimage(im):
"""Creates an image instance from a QImage image"""
from PIL import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.fromqimage(im)
def fromqpixmap(im):
"""Creates an image instance from a QPixmap image"""
from PIL import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.fromqpixmap(im)
_fromarray_typemap = {
# (shape, typestr) => mode, rawmode
# first two members of shape are set to one
# ((1, 1), "|b1"): ("1", "1"), # broken
((1, 1), "|u1"): ("L", "L"),
((1, 1), "|i1"): ("I", "I;8"),
((1, 1), "<i2"): ("I", "I;16"),
((1, 1), ">i2"): ("I", "I;16B"),
((1, 1), "<i4"): ("I", "I;32"),
((1, 1), ">i4"): ("I", "I;32B"),
((1, 1), "<f4"): ("F", "F;32F"),
((1, 1), ">f4"): ("F", "F;32BF"),
((1, 1), "<f8"): ("F", "F;64F"),
((1, 1), ">f8"): ("F", "F;64BF"),
((1, 1, 3), "|u1"): ("RGB", "RGB"),
((1, 1, 4), "|u1"): ("RGBA", "RGBA"),
}
# shortcuts
_fromarray_typemap[((1, 1), _ENDIAN + "i4")] = ("I", "I")
_fromarray_typemap[((1, 1), _ENDIAN + "f4")] = ("F", "F")
def _decompression_bomb_check(size):
if MAX_IMAGE_PIXELS is None:
return
pixels = size[0] * size[1]
if pixels > MAX_IMAGE_PIXELS:
warnings.warn(
"Image size (%d pixels) exceeds limit of %d pixels, "
"could be decompression bomb DOS attack." %
(pixels, MAX_IMAGE_PIXELS),
DecompressionBombWarning)
def open(fp, mode="r"):
"""
Opens and identifies the given image file.
This is a lazy operation; this function identifies the file, but
the file remains open and the actual image data is not read from
the file until you try to process the data (or call the
:py:meth:`~PIL.Image.Image.load` method). See
:py:func:`~PIL.Image.new`.
:param fp: A filename (string), pathlib.Path object or a file object.
The file object must implement :py:meth:`~file.read`,
:py:meth:`~file.seek`, and :py:meth:`~file.tell` methods,
and be opened in binary mode.
:param mode: The mode. If given, this argument must be "r".
:returns: An :py:class:`~PIL.Image.Image` object.
:exception IOError: If the file cannot be found, or the image cannot be
opened and identified.
"""
if mode != "r":
raise ValueError("bad mode %r" % mode)
filename = ""
if isPath(fp):
filename = fp
elif sys.version_info >= (3, 4):
from pathlib import Path
if isinstance(fp, Path):
filename = str(fp.resolve())
if filename:
fp = builtins.open(filename, "rb")
try:
fp.seek(0)
except (AttributeError, io.UnsupportedOperation):
fp = io.BytesIO(fp.read())
prefix = fp.read(16)
preinit()
def _open_core(fp, filename, prefix):
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
im = factory(fp, filename)
_decompression_bomb_check(im.size)
return im
except (SyntaxError, IndexError, TypeError, struct.error):
# Leave disabled by default, spams the logs with image
# opening failures that are entirely expected.
#logger.debug("", exc_info=True)
continue
return None
im = _open_core(fp, filename, prefix)
if im is None:
if init():
im = _open_core(fp, filename, prefix)
if im:
return im
raise IOError("cannot identify image file %r"
% (filename if filename else fp))
#
# Image processing.
def alpha_composite(im1, im2):
"""
Alpha composite im2 over im1.
:param im1: The first image.
:param im2: The second image. Must have the same mode and size as
the first image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
im1.load()
im2.load()
return im1._new(core.alpha_composite(im1.im, im2.im))
def blend(im1, im2, alpha):
"""
Creates a new image by interpolating between two input images, using
a constant alpha.::
out = image1 * (1.0 - alpha) + image2 * alpha
:param im1: The first image.
:param im2: The second image. Must have the same mode and size as
the first image.
:param alpha: The interpolation alpha factor. If alpha is 0.0, a
copy of the first image is returned. If alpha is 1.0, a copy of
the second image is returned. There are no restrictions on the
alpha value. If necessary, the result is clipped to fit into
the allowed output range.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
im1.load()
im2.load()
return im1._new(core.blend(im1.im, im2.im, alpha))
def composite(image1, image2, mask):
"""
Create composite image by blending images using a transparency mask.
:param image1: The first image.
:param image2: The second image. Must have the same mode and
size as the first image.
:param mask: A mask image. This image can have mode
"1", "L", or "RGBA", and must have the same size as the
other two images.
"""
image = image2.copy()
image.paste(image1, None, mask)
return image
def eval(image, *args):
"""
Applies the function (which should take one argument) to each pixel
in the given image. If the image has more than one band, the same
function is applied to each band. Note that the function is
evaluated once for each possible pixel value, so you cannot use
random components or other generators.
:param image: The input image.
:param function: A function object, taking one integer argument.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
return image.point(args[0])
def merge(mode, bands):
"""
Merge a set of single band images into a new multiband image.
:param mode: The mode to use for the output image. See:
:ref:`concept-modes`.
:param bands: A sequence containing one single-band image for
each band in the output image. All bands must have the
same size.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if getmodebands(mode) != len(bands) or "*" in mode:
raise ValueError("wrong number of bands")
for im in bands[1:]:
if im.mode != getmodetype(mode):
raise ValueError("mode mismatch")
if im.size != bands[0].size:
raise ValueError("size mismatch")
im = core.new(mode, bands[0].size)
for i in range(getmodebands(mode)):
bands[i].load()
im.putband(bands[i].im, i)
return bands[0]._new(im)
# --------------------------------------------------------------------
# Plugin registry
def register_open(id, factory, accept=None):
"""
Register an image file plugin. This function should not be used
in application code.
:param id: An image format identifier.
:param factory: An image file factory method.
:param accept: An optional function that can be used to quickly
reject images having another format.
"""
id = id.upper()
ID.append(id)
OPEN[id] = factory, accept
def register_mime(id, mimetype):
"""
Registers an image MIME type. This function should not be used
in application code.
:param id: An image format identifier.
:param mimetype: The image MIME type for this format.
"""
MIME[id.upper()] = mimetype
def register_save(id, driver):
"""
Registers an image save function. This function should not be
used in application code.
:param id: An image format identifier.
:param driver: A function to save images in this format.
"""
SAVE[id.upper()] = driver
def register_save_all(id, driver):
"""
Registers an image function to save all the frames
of a multiframe format. This function should not be
used in application code.
:param id: An image format identifier.
:param driver: A function to save images in this format.
"""
SAVE_ALL[id.upper()] = driver
def register_extension(id, extension):
"""
Registers an image extension. This function should not be
used in application code.
:param id: An image format identifier.
:param extension: An extension used for this format.
"""
EXTENSION[extension.lower()] = id.upper()
# --------------------------------------------------------------------
# Simple display support. User code may override this.
def _show(image, **options):
# override me, as necessary
_showxv(image, **options)
def _showxv(image, title=None, **options):
from PIL import ImageShow
ImageShow.show(image, title, **options)
# --------------------------------------------------------------------
# Effects
def effect_mandelbrot(size, extent, quality):
"""
Generate a Mandelbrot set covering the given extent.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param extent: The extent to cover, as a 4-tuple:
(x0, y0, x1, y2).
:param quality: Quality.
"""
return Image()._new(core.effect_mandelbrot(size, extent, quality))
def effect_noise(size, sigma):
"""
Generate Gaussian noise centered around 128.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param sigma: Standard deviation of noise.
"""
return Image()._new(core.effect_noise(size, sigma))
# End of file
| FreddieShoreditch/image_folder_organiser | venv/lib/python2.7/site-packages/PIL/Image.py | Python | mit | 81,182 | [
"Gaussian"
] | 541c6ecd6eb97264efcbd69b3534a09e1b0201dd77286e46d67be6cd59665981 |
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008-2011 Kees Bakker
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2013-2017 Alois Poettker
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"Import from Pro-Gen"
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
import re, os, struct, sys, time
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
LOG = logging.getLogger('.ImportProGen')
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.datehandler import displayer
from gramps.gen.db.txn import DbTxn
from gramps.gen.db.dbconst import (PERSON_KEY, FAMILY_KEY, EVENT_KEY, PLACE_KEY,
NOTE_KEY, TAG_KEY, CITATION_KEY, SOURCE_KEY)
from gramps.gen.errors import HandleError
from gramps.gen.lib import (Address, Attribute, AttributeType, ChildRef,
Citation, Date, Event, EventRef, EventType, Family,
FamilyRelType, Name, NameType, NameOriginType, Note,
NoteType, Person, Place, PlaceName, Source,
SrcAttribute, Surname, Tag)
from gramps.gen.updatecallback import UpdateCallback
from gramps.gen.utils.id import create_id
from gramps.gui.utils import ProgressMeter
from gramps.plugins.importer.importxml import ImportInfo
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
PRIMOBJECTS = ('person', 'family', 'child')
TAGOBJECTS = ('person', 'family', 'event', 'place', 'source', 'citation', 'note')
OPTOBJECTS = (
'person-ident', 'family-ident',
'surname-male', 'surname-female',
'birth-date', 'death-date', 'death-cause',
'refn-code'
)
MONTHES = {
'jan' : 1, # de, en, nl
'feb' : 2, 'febr' : 2, # de, en, nl
'mrz' : 3, # de
'mar' : 3, 'march' : 3, # en
'maa' : 3, 'mrt' : 3, 'maart' : 3, # nl
'apr' : 4, 'april' : 4, # de, en, nl
'mai' : 5, 'may' : 5, 'mei' : 5, # de, en, nl
'jun' : 6, 'june' : 6, 'juni' : 6, # de, en, nl
'jul' : 7, 'july' : 7, 'juli' : 7, # de, en, nl
'aug' : 8, # de, en, nl
'sep' : 9, 'sept' : 9, # de, en, nl
'okt' : 10, 'oct' : 10, 'ok' : 10, # de, en, nl
'nov' : 11, # de, en, nl
'dez' : 12, 'dec' : 12 # de, en, nl
}
PREFIXES = (
't ', # nl
'den ', 'der ', 'de ', # de, nl
'het ', # nl
'in den ', # nl
'ten ', 'ter ', 'te ', # nl
'van ', 'van den ', 'van der ', 'van de ', # nl
'von ', 'von der ', # de
'zu ' # DE
)
class ProgenError(Exception):
"""
Class used to report Pro-Gen exceptions (mostly errors).
"""
def __init__(self, value=''):
Exception.__init__(self)
self.value = value
def __str__(self):
return self.value
def _read_mem(bname):
"""
Read a Pro-Gen record.
"""
# Each record is 32 bytes. First a 4 byte reference to the next record
# followed by 28 bytes of text. The information forms a chain of records,
# that stops when a reference is 0 or smaller. There are two special
# sequences:
# <ESC> <CR> hard return
# <ESC> <^Z> end of the memo field
if os.path.exists(bname + '.MEM'):
fname = bname + '.MEM'
else:
fname = bname + '.mem'
with open(fname, "rb") as file_:
LOG.debug("The current system is %s-endian", sys.byteorder)
# The input file comes from [what was originally] a DOS machine so will
# be little-endian, regardless of the 'native' byte order of the host
recfmt = "<i28s"
reclen = struct.calcsize(str(recfmt))
# print("# reclen = %d" % reclen)
mems = []
while 1:
buf = file_.read(reclen)
if not buf:
break
(recno, text) = struct.unpack(recfmt, buf)
mems.append([recno, text])
return mems
def _read_recs(table, bname, mems):
"""
Read records from .PER or .REL file.
"""
if os.path.exists(bname + table.fileext):
fname = bname + table.fileext
else:
fname = bname + table.fileext.lower()
with open(fname, "rb") as file_:
recfmt = table.recfmt
LOG.info("# %s - recfmt = %s", table['name1'], recfmt)
reclen = struct.calcsize(str(recfmt))
LOG.info("# %s - reclen = %d", table['name1'], reclen)
recs = []
pos_recs, tot_recs = 0, 0 # positive / total records
while 1:
buf = file_.read(reclen)
if not buf:
break
tot_recs += 1
# check if all items in list are identical
if buf.count(buf[0]) != len(buf):
pos_recs += 1
tups = struct.unpack(recfmt, buf)
gid = str(tot_recs).encode('cp850')
tups = list(tups) # casting to list
tups.insert(0, gid) # inserting essential Gramps ID!
recs.append(tups)
# recflds = table.convert_record_to_list(tups, mems) # Debug!
LOG.info("# length %s.recs[] = %d", table['name1'], len(recs))
LOG.info("# total %d, pos. %d, null %d recs", \
tot_recs, pos_recs, tot_recs - pos_recs)
return recs
def _get_defname(fname):
"""
Get the name of the PG30.DEF file by looking at the user DEF file.
"""
# Return the name of the DEF file. <fname> is expected to be somewhere in
# the PG30 tree. Contents of <fname> is always something like:
# => \\0
# => C:\\PG30\\NL\\PG30-1.DEF
# We will strip the C: and convert the rest to a native pathname. Next,
# this pathname is compared with <fname>.
with open(fname, buffering=1, encoding='cp437', errors='strict') as file_:
lines = file_.readlines()
if not lines[0].startswith(r'\0') or len(lines) != 2:
return None, fname
defname = lines[1]
defname = defname.strip()
# Strip drive, if any
defname = re.sub(r'^\w:', '', defname)
defname = defname.replace('\\', os.sep)
# Strip leading slash, if any.
if defname.startswith(os.sep):
defname = defname[1:]
# LOG.warning('_get_defname: fname=%(fname)s => defname=%(defname)s', vars())
# Using directory of <fname>, go to parent directory until the DEF is found
dir_, file_ = os.path.split(os.path.abspath(fname))
while dir_ and dir_ != os.sep:
# LOG.warning('_get_defname: dir=%(dir_)s => defname=%(defname)s', vars())
newdefname = os.path.join(dir_, defname)
if os.path.exists(newdefname):
return newdefname, defname
newdefname = newdefname.lower()
if os.path.exists(newdefname):
return newdefname, defname
# One level up
dir_, file_ = os.path.split(dir_)
return None, defname
# Example field:
# ['First name', '47', '64', '4', '2', '15', '""', '""']
# item 0
# item 1 is a number indicating the fieldtype
# item 2
# item 3 is the size of the field
class PG30DefTableField(object):
"""
This class represents a field in one of the tables in the DEF file.
"""
def __init__(self, name, value):
self.fieldname = name
self.fields = value.split(',')
self.fields = [p.strip() for p in self.fields]
# We have seen some case insensitivity in DEF files ...
self.name = self.fields[0].lower()
self.type_ = int(self.fields[1])
self.size = int(self.fields[3])
def __repr__(self):
return self.fieldname + ' -> ' + ', '.join(self.fields)
ESC_CTRLZ = re.compile(r'\033\032.*')
class PG30DefTable(object):
"""
This class represents a table in the DEF file.
"""
def __init__(self, name, lines):
self.name = name
self.flds = []
self.parms = {}
self.recfmt = None
# Example line:
# f02=Person_last_change ,32,10,10, 1,68,"","INDI CHAN DATE"
line_pat = re.compile(r'(\w+) = (.*)', re.VERBOSE)
for lne in lines:
mtch = line_pat.match(lne)
if mtch: # Catch duplicates?
self.parms[mtch.group(1)] = mtch.group(2)
self.fileext = self.parms.get('fileext', None)
# If there is a n_fields entry then this is a table that
# has details about the record format of another file (PER or REL).
if 'n_fields' in self.parms:
self.flds = self.get_fields()
self.recfmt = self.get_recfmt()
self.nam2fld = {}
self.nam2idx = {}
self.recflds = [] # list of fields that use up space in a record
j = 0
for i, fld in enumerate(self.flds):
# print("# field %s" % fld)
nam = fld.name
self.nam2fld[nam] = fld
# fld.size == 0: Field will not be acknowleged!
if (i == 0) or (fld.size != 0):
self.nam2idx[nam] = j
# print("# %s <= %d" % (fld.fieldname, j))
self.recflds.append(fld)
j += 1
def __getitem__(self, i):
return self.parms.get(i, None)
def get_recfmt(self):
""" Get the record format for struct.unpack """
# Example field:
# ['First Name', '47', '64', '4', '2', '15', '""', '""']
# item 0
# item 1 is a number indicating the fieldtype
# item 2
# item 3 is the size of the field
# ...
flds = self.flds
# The input file comes from [what was originally] a DOS machine so will
# be little-endian, regardless of 'native' byte order of the host system
fmt = '<'
for fld in flds:
fldtyp = fld.type_
if fldtyp == 2 or fldtyp == 3 or fldtyp == 22 or fldtyp == 23:
fmt += 'i'
elif fldtyp == 31:
pass
elif fldtyp == 32 or fldtyp == 44 or fldtyp == 45:
fmt += '%ds' % fld.size
elif fldtyp == 41:
fmt += 'h'
elif fldtyp == 42 or fldtyp == 43 or fldtyp == 46 or fldtyp == 47:
fmt += 'i'
else:
pass # ???? Do we want to know?
return fmt
def get_fields(self):
""" Get the fields """
# For example from PG30-1.DEF
# n_fields=58
# f01=Person ID , 31, 6, 0, 1, 17, "", "INDI RFN"
# f02=Person change, 32, 10,10, 1, 68, "", "INDI CHAN DATE"
# f03=First name , 47, 64, 4, 2, 15, "", ""
n_fields = int(self.parms['n_fields'])
flds = []
for i in range(n_fields):
fld_name = 'f%02d' % (i+1)
fld = self.parms.get(fld_name, None)
flds.append(PG30DefTableField(fld_name, fld))
return flds
def get_mem_text(self, mems, i):
""" Normalize text. """
# Notice that Pro-Gen starts the mem numbering at 1.
if i <= 0:
# MEM index 0, just return an empty string
return ""
i -= 1
recno = mems[i][0] - 1
text = mems[i][1].decode('cp850')
while recno >= 0:
text += mems[recno][1].decode('cp850')
recno = mems[recno][0] - 1
text = text.replace('\033\r', '\n') # ESC-^M is newline
text = ESC_CTRLZ.sub('', text) # ESC-^Z is end of string
text = text.replace('\0', '') # There can be nul bytes. Remove them.
text = text.strip() # Strip leading/trailing whitespace
return text
def get_record_field_index(self, fldname):
""" Return the index number in the record tuple, based on the name. """
if not fldname in self.nam2idx:
raise ProgenError(_("Field '%(fldname)s' not found") % locals())
return self.nam2idx[fldname]
def convert_record_to_list(self, rec, mems):
""" Convert records to list. """
flds = []
for i, record in enumerate(rec):
typ = self.recflds[i].type_
if typ == 2 or typ == 3 or typ == 22 or typ == 23:
# Record field is record number
flds.append("%d" % record)
elif typ == 46 or typ == 47:
# Record field is memory type
flds.append(self.get_mem_text(mems, rec[i]))
else:
# Not a record number, not a memory type. It must be just text.
fld = record.strip()
fld = fld.decode('cp850') # Convert to unicode
flds.append(fld)
# print(', '.join(flds))
return flds
def get_field_names(self):
""" Return field names. """
ret = []
for fld in self.flds:
if fld.size != 0:
ret.append(fld.name)
return ret
def diag(self):
""" Diagnostic ... """
txt = self.name + '\n'
if 'n_fields' in self.parms:
txt += 'n_fields = %s\n' % self.parms['n_fields']
# Just grab a field
txt += '"%s"\n' % self.flds[1]
txt += 'recfmt = %s (length=%d)' % \
(self.recfmt, struct.calcsize(str(self.recfmt)))
return txt
class PG30Def(object):
"""
Utility class to read PG30-1.DEF and to get certain information from it.
"""
# The contents of the DEF file is separated in sections that start with
# [<section name>]. For example:
# [general]
# dateformat=DD-MM-YYYY
# pointerlength=4
# tables=2
def __init__(self, fname):
# Read the main DEF file (maybe throw a IOError)
lines = None
with open(fname, buffering=1, encoding='cp437', errors='strict') as frame:
lines = frame.readlines()
# Analyse the DEF lines
lines = [l.strip() for l in lines]
content = '\n'.join(lines)
parts = re.split(r'\n(?=\[)', content)
self.parts = {}
self.tables = {}
for prts in parts:
lines = prts.splitlines()
# Get section names (typically "PRO-GEN", "general",
# "Table_1", "Table_2", "Genealogical")
k = re.sub(r'\[(.*)\]', r'\1', lines[0])
# Store section contents in a hashtable using that section name
self.parts[k] = lines[1:]
self.tables[k] = PG30DefTable(k, self.parts[k])
def __getitem__(self, i):
return self.tables.get(i, None)
def __repr__(self):
return '\n'.join([self.tables[t].diag() for t in self.tables])
# Split surname prefixes
def _split_surname(surname):
""" Divides prefix from surname. """
for prefix in PREFIXES:
if surname.startswith(prefix):
return prefix.strip(), surname[len(prefix):].strip()
return '', surname
class ProgenParser(UpdateCallback):
"""
Main class to parse and import Pro-Gen files.
"""
def parse_progen_file(self):
"""
Parse and analyse the Pro-Gen file.
"""
if not (self.option['prim_person'] or
self.option['prim_family'] or
self.option['prim_child']):
# Nothing to import
return None
# Read the stub DEF file (maybe throw a IOError)
self.fname, dname = _get_defname(self.fname)
if not self.fname:
error_msg = ProgenError(_("Not a (right) DEF file: %(dname)s") % locals())
self.user.notify_error(_("Pro-Gen data error"), str(error_msg))
# close feedback about import progress (GUI)!
if self.uistate: self.progress.close()
return None
# start feedback about import progress (GUI / TXT)
self.__display_message(_('Initializing.'), _('Import from Pro-Gen'))
self.def_ = PG30Def(self.fname)
# Check correct languages (only 'de', 'en' and 'nl' accepted)
male_text = self.def_.tables['Genealogical']
male_text = male_text.parms['field_father'].lower()
female_text = self.def_.tables['Genealogical']
female_text = female_text.parms['field_mother'].lower()
# Double check on keywords
if male_text == "vater" and female_text == "mutter":
self.language = 0 # language = 'de'
elif male_text == "father" and female_text == "mother":
self.language = 1 # language = 'en'
elif male_text == "vader" and female_text == "moeder":
self.language = 2 # language = 'nl'
else:
# Raise a error message
error_msg = ProgenError(_("Not a supported Pro-Gen import file language"))
self.user.notify_error(_("Pro-Gen data error"), str(error_msg))
# close feedback about import progress (GUI)
if self.uistate: self.progress.close()
return None
self.mems = _read_mem(self.bname)
self.pers = _read_recs(self.def_['Table_1'], self.bname, self.mems)
self.rels = _read_recs(self.def_['Table_2'], self.bname, self.mems)
# calculate total amount of data
if not self.uistate:
# approx. (1x father, 1x mother) + 1.5x child & families
self.set_total(2.5 * len(self.pers) + len(self.rels))
self.dbase.disable_signals()
with DbTxn(_("Pro-Gen import"), self.dbase, batch=True) as self.trans:
self.create_tags()
if self.option['prim_person']:
self.create_persons()
if self.option['prim_family']:
self.create_families()
if self.option['prim_child']:
self.add_children()
self.__display_message(_('Saving.'))
self.dbase.enable_signals()
self.dbase.request_rebuild()
# close feedback about import progress (GUI)
if self.uistate: self.progress.close()
return self.info
def __init__(self, data_base, file_name, user, option):
"""
Pro-Gen defines his own set of (static) person and family identifiers.
"""
# Sometime their match the Gramps localisation, sometimes not. To be on
# a safe and uniform path person and family identifiers for (alphabetic)
# German (de), English (en) and Dutch (nl) language defined here.
self.bname, ext = os.path.splitext(file_name)
if ext.lower() in ('.per', '.rel', '.mem'):
file_name = self.bname + '.def'
self.dbase = data_base
self.fname = file_name
self.user = user
self.uistate = user.uistate
self.info = ImportInfo()
self.option = option
self.language = 0
self.mems = None # Memory area
self.pers = [] # List for raw person data
self.rels = [] # List for raw relation data
self.gid2id = {} # Maps person id to id
self.fid2id = {} # Maps family id to id
self.fm2fam = {} # Maps family id to family
self.pkeys = {} # Caching place handles
self.skeys = {} # Caching source handles
self.ckeys = {} # Caching citation handles
# Miscellaneous
self.trans = None # Transaction identifier
self.def_ = None # PG30 definitions
self.high_fam_id = -1
# Add Config import tag?
self.tagobject_list = {}
# Records in the PER file using PG30-1.DEF contain the following fields:
self.person_identifier = [
# F00: None
[""], # F00
# F01 - F15: Person ID, Change, First / Last Name, Gender,
# Call Name, Alias, Person Code, Titel 1/2/3,
# Father, Mother, Occupation
["Person_ID", "Person_record", "Persoon record"], # F01
["Person_Änderung", "Person_last_change", "Persoon gewijzigd"], # F02
["Vorname", "Given_name", "Voornaam"], # F03
["Nachname", "Surname", "Achternaam"], # F04
["Geschlecht", "Sex", "Geslacht"], # F05
["Patronym", "Patronym", "Patroniem"], # F06
["Rufname", "Call_name", "Roepnaam"], # F07
["Alias", "Alias", "Alias"], # F08
["Person_Code", "Person_code", "Persoon code"], # F09
["Titel1", "Title1", "Titel1"], # F10
["Titel2", "Title2", "Titel2"], # F11
["Titel3", "Title3", "Titel3"], # F12
["Vater", "Father", "Vader"], # F13
["Mutter", "Mother", "Moeder"], # F14
["Beruf", "Occupation", "Beroep"], # F15
# F16 - F17: Person Note, Info
["Person_Notiz", "Person_scratch", "Persoon klad"], # F16
["Person_Info", "Person_info", "Persoon info"], # F17
# F18 - F24: Address Date, Street, ZIP, Place, Country, Phone, Info
["Anschrift_Datum", "Address_date", "Adres datum"], # F18
["Anschrift_Straße", "Address_street", "Adres straat"], # F19
["Anschrift_PLZ", "Address_zip", "Adres postcode"], # F20
["Anschrift_Ort", "Address_place", "Adres plaats"], # F21
["Anschrift_Land", "Address_country", "Adres land"], # F22
["Anschrift_Telefon", "Address_phone", "Adres telefoon"], #
["Anschrift_Info", "Address_info", "Adres info"], # F24
# F25 - F31: Birth Date, Place, Time, Source, Reference, Text, Info
["Geburt_Datum", "Birth_date", "Geboorte datum"], # F25
["Geburt_Ort", "Birth_place", "Geboorte plaats"], # F26
["Geburt_Zeit", "Birth_time", "Geboorte tijd"], # F27
["Geburt_Quelle", "Birth_source", "Geboorte bron"], # F28
["Geburt_Akte", "Birth_ref", "Geboorte akte"], # F29
["Geburt_Text", "Birth_text", "Geboorte brontekst"], # F30
["Geburt_Info", "Birth_info", "Geboorte info"], # F31
# F32 - F39: Christening Date, Place, Religion, Witness, Source,
# Reference, Text, Info
["Taufe_Datum", "Christening_date", "Doop datum"], # F32
["Taufe_Ort", "Christening_place", "Doop plaats"], # F33
["Religion", "Religion", "Gezindte"], # F34
["Taufe_Paten", "Christening_witness", "Doop getuigen"], # F35
["Taufe_Quelle", "Christening_source", "Doop bron"], # F36
["Taufe_Akte", "Christening_ref", "Doop akte"], # F37
["Taufe_Text", "Christening_text", "Doop brontekst"], # F38
["Taufe_Info", "Christening_info", "Doop info"], # F39
# F40 - F46: Death Date, Place, Time, Source, Reference, Text, Info
["Sterbe_Datum", "Death_date", "Overlijden datum"], # F40
["Sterbe_Ort", "Death_place", "Overlijden plaats"], # F41
["Sterbe_Zeit", "Death_time", "Overlijden tijd"], # F42
["Sterbe_Quelle", "Death_source", "Overlijden bron"], # F43
["Sterbe_Akte", "Death_ref", "Overlijden akte"], # F44
["Sterbe_Text", "Death_text", "Overlijden brontekst"], # F45
["Sterbe_Info", "Death_info", "Overlijden info"], # F46
# F47 - F52: Cremation Date, Place, Source, Reference, Text, Info
["Einäscherung_Datum", "Cremation_date", "Crematie datum"], # F47
["Einäscherung_Ort", "Cremation_place", "Crematie plaats"], # F48
["Einäscherung_Quelle", "Cremation_source", "Crematie bron"], # F49
["Einäscherung_Akte", "Cremation_ref", "Crematie akte"], # F50
["Einäscherung_Text", "Cremation_text", "Crematie brontekst"], # F51
["Einäscherung_Info", "Cremation_info", "Crematie info"], # F52
# F53 - F58: Burial Date, Place, Source, Reference, Text, Info
["Beerdigung_Datum", "Burial_date", "Begrafenis datum"], # F53
["Beerdigung_Ort", "Burial_place", "Begrafenis plaats"], # F54
["Beerdigung_Quelle", "Burial_source", "Begrafenis bron"], # F55
["Beerdigung_Akte", "Burial_ref", "Begrafenis akte"], # F56
["Beerdigung_Text", "Burial_text", "Begrafenis brontekst"], # F57
["Beerdigung_Info", "Burial_info", "Begrafenis info"], # F58
]
# Records in the REL file using PG30-1.DEF contain the following fields:
self.family_identifier = [
# F00: None
[""], # F00
# F01 - F07: Relation ID, Change, Husband, Wife, Code, Note, Info
["Ehe_ID", "Relation_record", "Relatie record"], # F01
["Ehe_Änderung", "Relation_last_change", "Relatie gewijzigd"], # F02
["Ehemann", "Husband", "Man"], # F03
["Ehefrau", "Wife", "Vrouw"], # F04
["Ehe_Code", "Relation_code", "Relatie code"], # F05
["Ehe_Notiz", "Relation_scratch", "Relatie klad"], # F06
["Ehe_Info", "Relation_info", "Relatie info"], # F07
# F08 - F13: Civil Union Date, Place, Source, Reference, Text, Info
["Lebensgem_Datum", "Living_date", "Samenwonen datum"], # F08
["Lebensgem_Ort", "Living_place", "Samenwonen plaats"], # F09
["Lebensgem_Quelle", "Living_source", "Samenwonen bron"], # F10
["Lebensgem_Akte", "Living_ref", "Samenwonen akte"], # F11
["Lebensgem_Text", "Living_text", "Samenwonen brontekst"], # F12
["Lebensgem_Info", "Living_info", "Samenwonen info"], # F13
# F14 - F20: Marriage License Date, Place, Witness, Source, Record,
# Text, Info
["Aufgebot_Datum", "Banns_date", "Ondertrouw datum"], # F14
["Aufgebot_Ort", "Banns_place", "Ondertrouw plaats"], # F15
["Aufgebot_Zeugen", "Banns_witnesses", "Ondertrouw getuigen"], # F16
["Aufgebot_Quelle", "Banns_source", "Ondertrouw bron"], # F17
["Aufgebot_Akte", "Banns_ref", "Ondertrouw akte"], # F18
["Aufgebot_Text", "Banns_text", "Ondertrouw brontekst"], # F19
["Aufgebot_Info", "Banns_info", "Ondertrouw info"], # F20
# F14 - F20: Civil Marriage Date, Place, Witness, Source, Record,
# Text, Info
["Standesamt_Datum", "Civil_date", "Wettelijk datum"], # F21
["Standesamt_Ort", "Civil_place", "Wettelijk plaats"], # F22
["Standesamt_Zeugen", "Civil_witnesses", "Wettelijk getuigen"], # F23
["Standesamt_Quelle", "Civil_source", "Wettelijk bron"], # F24
["Standesamt_Akte", "Civil_ref", "Wettelijk akte"], # F25
["Standesamt_Text", "Civil_text", "Wettelijk brontekst"], # F26
["Standesamt_Info", "Civil_info", "Wettelijk info"], # F27
# F28 - F35: Church Wedding Date, Place, Church Name, Witness,
# Source, Reference, Text, Info
["Kirche_Datum", "Church_date", "Kerkelijk datum"], # F28
["Kirche_Ort", "Church_place", "Kerkelijk plaats"], # F29
["Kirche", "Church", "Kerk"], # F30
["Kirche_Zeugen", "Church_witnesses", "Kerkelijk getuigen"], # F31
["Kirche_Quelle", "Church_source", "Kerkelijk bron"], # F32
["Kirche_Akte", "Church_ref", "Kerkelijk akte"], # F33
["Kirche_Text", "Church_text", "Kerkelijk brontekst"], # F34
["Kirche_Info", "Church_info", "Kerkelijk info"], # F35
# F36 - F41: Divorce Date, Place, Source, Reference, Text, Info
["Scheidung_Datum", "Divorce_date", "Scheiding datum"], # F36
["Scheidung_Ort", "Divorce_place", "Scheiding plaats"], # F37
["Scheidung_Quelle", "Divorce_source", "Scheiding bron"], # F38
["Scheidung_Akte", "Divorce_ref", "Scheiding akte"], # F39
["Scheidung_Text", "Divorce_text", "Scheiding brontekst"], # F40
["Scheidung_Info", "Divorce_info", "Scheiding info"], # F41
]
# provide feedback about import progress (GUI / TXT)
if self.uistate:
self.progress = ProgressMeter(_("Import from Pro-Gen"), '',
parent=self.uistate.window)
else:
UpdateCallback.__init__(self, user.callback)
def __add_name(self, person, citationhandle, nametype,
firstname, prefix, surname, suffix):
"""
Add a new name to the object.
"""
name = Name()
name.set_type(nametype)
name.set_first_name(firstname)
sur_name = Surname()
sur_name.set_prefix(prefix)
sur_name.set_surname(surname)
name.add_surname(sur_name)
name.set_suffix(suffix)
if citationhandle:
name.add_citation(citationhandle)
person.add_alternate_name(name)
def __add_tag(self, tag, obj):
"""
Add the default tag to the object.
"""
if self.tagobject_list and (tag in self.tagobject_list):
obj.add_tag(self.tagobject_list[tag].handle)
def __find_from_handle(self, progen_id, table):
"""
Find a handle corresponding to the specified Pro-Gen ID.
"""
# The passed table contains the mapping. If the value is found, we
# return it, otherwise we create a new handle, store it, and return it.
intid = table.get(progen_id)
if not intid:
intid = create_id()
table[progen_id] = intid
return intid
def __find_person_handle(self, progen_id):
"""
Return the database handle associated with the person's Pro-Gen ID
"""
return self.__find_from_handle(progen_id, self.gid2id)
def __find_family_handle(self, progen_id):
"""
Return the database handle associated with the family's Pro-Gen ID
"""
return self.__find_from_handle(progen_id, self.fid2id)
def __find_or_create_person(self, progen_id):
"""
Finds or creates a Person based on the Pro-Gen ID.
"""
# If the ID is already used (= is in the database), return the item in
# DB. Otherwise, create a new person, assign the handle and Gramps ID.
person = Person()
intid = self.gid2id.get(progen_id)
if self.dbase.has_person_handle(intid):
person.unserialize(self.dbase.get_raw_person_data(intid))
else:
# create a new Person
gramps_id = self.dbase.id2user_format("I%06d" % progen_id)
if self.dbase.has_person_gramps_id(gramps_id):
gramps_id = self.dbase.find_next_person_gramps_id()
intid = self.__find_from_handle(progen_id, self.gid2id)
person.set_handle(intid)
person.set_gramps_id(gramps_id)
# add info for import statistic
self.info.add('new-object', PERSON_KEY, None)
return person
def __find_or_create_family(self, progen_id):
"""
Finds or creates a Family based on the Pro-Gen ID.
"""
family = Family()
intid = self.fid2id.get(progen_id)
if self.dbase.has_family_handle(intid):
family.unserialize(self.dbase.get_raw_family_data(intid))
else:
# create a new Family
gramps_id = self.dbase.fid2user_format("F%04d" % progen_id)
if self.dbase.has_family_gramps_id(gramps_id):
gramps_id = self.dbase.find_next_family_gramps_id()
intid = self.__find_from_handle(progen_id, self.fid2id)
family.set_handle(intid)
family.set_gramps_id(gramps_id)
# add info for import statistic
self.info.add('new-object', FAMILY_KEY, None)
return family
def __get_or_create_place(self, place_name):
"""
Finds or creates a Place based on the place name.
"""
if not place_name:
return None
if place_name in self.pkeys:
place = self.dbase.get_place_from_handle(self.pkeys[place_name])
else:
# create a new Place
place = Place()
place.set_name(PlaceName(value=place_name))
place.set_title(place_name)
self.__add_tag('place', place) # add tag to 'Place'
self.dbase.add_place(place, self.trans) # add & commit ...
self.pkeys[place_name] = place.get_handle()
# add info for import statistic
self.info.add('new-object', PLACE_KEY, None)
return place
def __get_or_create_citation(self, source_title, date_text,
page_text='', page_ref=''):
"""
Finds or creates Source & Citation based on:
Source, Name, Date, Page, Note, Attribute.
"""
if not source_title:
return None
# process Source
if not self.option['imp_source']: # No Source enabled
return None
if source_title in self.skeys: # source exists
source = self.dbase.get_source_from_handle(self.skeys[source_title])
else: # create a new source
source = Source()
source.set_title(source_title)
source.private = self.option['imp_source_priv']
self.__add_tag('source', source) # add tag to 'Source'
# process Attribute
if self.option['imp_source_attr']:
sattr = SrcAttribute()
sattr.set_type(_("Source"))
sattr.set_value(self.option['imp_source_attr'])
source.add_attribute(sattr)
self.dbase.add_source(source, self.trans) # add & commit ...
self.skeys[source_title] = source.get_handle()
# add info for import statistic
self.info.add('new-object', SOURCE_KEY, None)
# process Citation
if not self.option['imp_citation']: # No Citation enabled
return None
# process Volume/Page
page = source_title
if page_text or page_ref:
page = '%s %s' % (page_text, page_ref)
if page in self.ckeys: # citation exists
citation = self.dbase.get_citation_from_handle(self.ckeys[page])
else: # create a new citation
citation = Citation()
citation.set_reference_handle(source.get_handle())
citation.private = self.option['imp_citation_priv']
self.__add_tag('citation', citation) # add tag to 'Citation'
# process Date
date = self.__create_date_from_text(date_text)
if date:
citation.set_date_object(date)
# process Confidence
citation.set_confidence_level(self.option['imp_citation_conf'])
# process Page (substitute string directives)
if ('%Y' or '%m' or '%d' or '%H' or '%M' or '%S') in page:
page = time.strftime(page)
citation.set_page('%s' % page)
# process Note
imp_citation_note = '' # Not yet used
if imp_citation_note:
note = self.__create_note(imp_citation_note, NoteType.CUSTOM,
_("Pro-Gen Import"))
if note and note.handle:
citation.add_note(note.handle)
# process Attribute
if self.option['imp_citation_attr']:
sattr = SrcAttribute()
sattr.set_type(_("Citation"))
sattr.set_value(self.option['imp_citation_attr'])
citation.add_attribute(sattr)
self.dbase.add_citation(citation, self.trans) # add & commit ...
self.ckeys[page] = citation.get_handle()
# add info for import statistic
self.info.add('new-object', CITATION_KEY, None)
return citation
def __create_note(self, note_text, note_type, note_cust=''):
"""
Create an note base on Type and Text.
"""
if not note_text:
return None
if isinstance(note_text, list):
note_text = '\n'.join(note_text)
note = Note()
note.set(note_text)
note_type = NoteType()
note_type.set((note_type, note_cust))
self.__add_tag('note', note) # add tag to 'Note'
self.dbase.add_note(note, self.trans) # add & commit ...
# add info for import statistic
self.info.add('new-object', NOTE_KEY, None)
return note
def __create_attribute(self, attr_text, attr_type, attr_cust=''):
"""
Creates an attribute base on (Custom-)Type and Text.
"""
if not attr_text:
return None
attr = Attribute()
attr.set_type((attr_type, attr_cust))
attr.set_value(attr_text)
return attr
def __create_event_and_ref(self, type_, desc=None, date=None, place=None,
citation=None, note_text=None,
attr_text=None, attr_type=None, attr_cust=None):
"""
Finds or creates an Event based on the Type, Description, Date, Place,
Citation, Note and Time.
"""
event = Event()
event.set_type(EventType(type_))
self.__add_tag('event', event) # add tag to 'Event'
if desc:
event.set_description(desc)
if date:
event.set_date_object(date)
if place:
event.set_place_handle(place.get_handle())
if citation:
event.add_citation(citation.handle)
attr = self.__create_attribute(attr_text, attr_type, attr_cust)
if attr:
event.add_attribute(attr)
note = self.__create_note(note_text, NoteType.CUSTOM, "Info")
if note and note.handle:
event.add_note(note.handle)
self.dbase.add_event(event, self.trans) # add & commit ...
# add info for import statistic
self.info.add('new-object', EVENT_KEY, None)
event_ref = EventRef()
event_ref.set_reference_handle(event.get_handle())
return event, event_ref
__date_pat1 = re.compile(r'(?P<day>\d{1,2}) (.|-|=) (?P<month>\d{1,2}) (.|-|=) (?P<year>\d{2,4})',
re.VERBOSE)
__date_pat2 = re.compile(r'(?P<month>\d{1,2}) (.|-|=) (?P<year>\d{4})',
re.VERBOSE)
__date_pat3 = re.compile(r'(?P<year>\d{3,4})', re.VERBOSE)
__date_pat4_de = re.compile(r'(v|vor|n|nach|ca|circa|etwa|in|um|±) (\.|\s)* (?P<year>\d{3,4})',
re.VERBOSE)
__date_pat4_en = re.compile(r'(b|before|a|after|ab|about|between|±) (\.|\s)* (?P<year>\d{3,4})',
re.VERBOSE)
__date_pat4_nl = re.compile(r'(v|voor|vóór|na|ca|circa|rond|±) (\.|\s)* (?P<year>\d{3,4})',
re.VERBOSE)
__date_pat5 = re.compile(r'(oo|OO) (-|=) (oo|OO) (-|=) (?P<year>\d{2,4})',
re.VERBOSE)
__date_pat6 = re.compile(r'(?P<month>(%s)) (\.|\s)* (?P<year>\d{3,4})' % \
'|'.join(list(MONTHES.keys())),
re.VERBOSE | re.IGNORECASE)
def __create_date_from_text(self, date_text, diag_msg=None):
"""
Finds or creates a Date based on Text, an Offset and a Message.
"""
# Pro-Gen has a text field for the date.
# It can be anything (it should be dd-mm-yyyy), but we have seen:
# yyyy
# mm-yyyy
# before yyyy
# dd=mm-yyyy (typo I guess)
# 00-00-yyyy
# oo-oo-yyyy
# dd-mm-00 (does this mean we do not know about the year?)
# Function tries to parse the text and create a proper Gramps Date()
# object. If all else fails create a MOD_TEXTONLY Date() object.
dte_txt = date_text == _("Unknown")
if not (dte_txt or date_text) or date_text == '??':
return None
date = Date()
# dd-mm-yyyy
dte_mtch = self.__date_pat1.match(date_text)
if dte_mtch:
day = int(dte_mtch.group('day'))
month = int(dte_mtch.group('month'))
if month > 12:
month %= 12
year = int(dte_mtch.group('year'))
if day and month and year:
date.set_yr_mon_day(year, month, day)
else:
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN,
(day, month, year, 0))
return date
# mm-yyyy
dte_mtch = self.__date_pat2.match(date_text)
if dte_mtch:
month = int(dte_mtch.group('month'))
year = int(dte_mtch.group('year'))
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN,
(0, month, year, 0))
return date
# yyy or yyyy
dte_mtch = self.__date_pat3.match(date_text)
if dte_mtch:
year = int(dte_mtch.group('year'))
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN,
(0, 0, year, 0))
return date
# before|after|... yyyy
if self.language == 0: # 'de' language
dte_mtch = self.__date_pat4_de.match(date_text)
elif self.language == 1: # 'en' language
dte_mtch = self.__date_pat4_en.match(date_text)
elif self.language == 2: # 'nl' language
dte_mtch = self.__date_pat4_nl.match(date_text)
if dte_mtch:
year = int(dte_mtch.group('year'))
if dte_mtch.group(1) == 'v' or dte_mtch.group(1) == 'vor' or \
dte_mtch.group(1) == 'before' or \
dte_mtch.group(1) == 'voor' or dte_mtch.group(1) == 'vóór':
date.set(Date.QUAL_NONE, Date.MOD_BEFORE, Date.CAL_GREGORIAN,
(0, 0, year, 0))
elif dte_mtch.group(1) == 'n' or dte_mtch.group(1) == 'nach' or \
dte_mtch.group(1) == 'after' or \
dte_mtch.group(1) == 'na':
date.set(Date.QUAL_NONE, Date.MOD_AFTER, Date.CAL_GREGORIAN,
(0, 0, year, 0))
else:
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN,
(0, 0, year, 0))
return date
# oo-oo-yyyy
dte_mtch = self.__date_pat5.match(date_text)
if dte_mtch:
year = int(dte_mtch.group('year'))
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN,
(0, 0, year, 0))
return date
# mmm yyyy (textual month)
dte_mtch = self.__date_pat6.match(date_text)
if dte_mtch:
year = int(dte_mtch.group('year'))
month = MONTHES.get(dte_mtch.group('month'), 0)
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN,
(0, month, year, 0))
return date
# Hmmm. Just use the plain text.
LOG.warning(_("Date did not match: '%(text)s' (%(msg)s)"), \
{'text' : date_text.encode('utf-8'), 'msg' : diag_msg or ''})
date.set_as_text(date_text)
return date
def __create_desc_from_text(self, desc_txt):
"""
Creates a variation of a description depending on language
"""
desc = None
if desc_txt:
if self.language == 0: # 'de' language
desc = desc_txt + ' Uhr'
else:
desc = _('Time: %s') % desc_txt
return desc
def __display_message(self, gui_mesg, txt_mesg=None, gui_max=None):
"""
Display messaging depending of GUI / TXT.
"""
if self.uistate:
if gui_max: self.progress.set_pass(gui_mesg, gui_max)
else: self.progress.set_pass(gui_mesg)
else:
if txt_mesg: self.set_text(txt_mesg)
else: self.set_text(gui_mesg)
def create_tags(self):
"""
Creates tags to objects (if provide)
"""
for tagobj in TAGOBJECTS:
tagname = 'tag_%s' % tagobj
if self.option[tagname]:
# process tagname (substitute string directives)
tagname = '%s %s' % (_(tagobj).capitalize(), \
self.option[tagname])
tag = self.dbase.get_tag_from_name(tagname)
if not tag:
tag = Tag()
tag.set_name(tagname)
self.dbase.add_tag(tag, self.trans)
# add info for import statistic
self.info.add('new-object', TAG_KEY, None)
self.tagobject_list[tagobj] = tag
__rel_pat = re.compile(r'(r|w|)', re.VERBOSE)
def create_persons(self):
"""
Method to import Persons
"""
table = self.def_['Table_1']
LOG.info(table.get_field_names())
# We'll start with F02: Person last change
# Note: We like this to be computed just once.
person_ix = [0, 0]
for count in range(2, len(self.person_identifier)):
# We have seen some case insensitivity in DEF files ...
pid = self.person_identifier[count][self.language].lower()
pix = table.get_record_field_index(pid)
person_ix.append(pix)
# start feedback about import progress (GUI/TXT)
self.__display_message(_('Importing persons.'), gui_max=len(self.pers))
# Male / Female symbols
male_sym = self.def_.tables['Genealogical'].parms['male']
female_sym = self.def_.tables['Genealogical'].parms['female']
ind_id = 0
for i, rec in enumerate(self.pers):
# Update at the begin
self.progress.step() if self.uistate else self.update()
recflds = table.convert_record_to_list(rec, self.mems)
# Option: Original Individuals IDs
if self.option['opt_person-ident']:
ind_id = int(recflds[person_ix[1]]) # F01: INDI RFN
else:
ind_id += 1
# print(("Ind ID %d " % ind_id) + " ".join(("%s" % r) for r in rec))
person = self.__find_or_create_person(ind_id)
# process F03 Given Name, F07 Call Name
name = Name()
name.set_type(NameType.BIRTH)
first_name = recflds[person_ix[3]] # F03: TBD
if first_name:
# replace if necessary separators with ' '
first_name = re.sub(r'[,;]', ' ', first_name)
else:
# default first name 'Nomen nominandum'
first_name = 'N.N.'
name.set_first_name(first_name)
# process F04 Last Name
sur_prefix, sur_name = '', ''
if recflds[person_ix[4]]:
# F04: INDI NAME
sur_prefix, sur_name = _split_surname(recflds[person_ix[4]])
if not sur_name:
# default surname 'Nomen nominandum'
sur_name = 'N.N.'
surname = Surname()
surname.set_surname(sur_name)
if sur_prefix:
surname.set_prefix(sur_prefix)
name.add_surname(surname)
# process F06 Patronym
patronym = recflds[person_ix[6]] # F06: INDI _PATR
if patronym:
patronym_name = Surname()
patronym_name.set_surname(patronym)
patronym_name.set_origintype(NameOriginType.PATRONYMIC)
name.add_surname(patronym_name)
# process F10 - F12 Title(s)
title1 = recflds[person_ix[10]] # F10: INDI TITL
title2 = recflds[person_ix[11]] # F11: INDI _TITL2
title3 = recflds[person_ix[12]] # F12: INDI _TITL3
title = [_f for _f in [title1, title2, title3] if _f]
if title:
name.set_title(", ".join(title))
# General config: addtional individual citation
if self.option['imp_source_title']:
# Original individual ID from source
pageref = '[ID: I%06d] %s, %s' % (i +1, sur_name, first_name)
citation = self.__get_or_create_citation \
(self.option['imp_source_title'],
recflds[person_ix[2]], # F02: INDI CHAN DATE
self.option['imp_citation_page'], pageref)
if citation and citation.handle:
person.add_citation(citation.handle)
name.add_citation(citation.handle)
# add tag to 'Person'
self.__add_tag('person', person)
# create diagnose message
diag_msg = "%s: %s %s" % (person.gramps_id,
first_name.encode('utf-8'),
sur_name.encode('utf-8'))
# prcesss F25 Birth Date
birth_date = self.__create_date_from_text \
(recflds[person_ix[25]], diag_msg) # F25: ... DATE
# process F07 Call Name
if recflds[person_ix[7]]:
# F07: INDI NAME NICK/INDI NAME ALIA/INDI CHR NICK
name.set_call_name(recflds[person_ix[7]])
else:
nick_name = first_name.split(' ')
if birth_date and len(nick_name) > 1: # Two or more firstnames
number = 0 # Firstname number
if birth_date.dateval[2] < 1900: # 1900: Common edge date
number = 1
name.set_call_name(nick_name[number])
# set the Person in database
person.set_primary_name(name)
# process F05 Gender
gender = recflds[person_ix[5]] # F05: INDI SEX
if gender == male_sym:
gender = Person.MALE
elif gender == female_sym:
gender = Person.FEMALE
else:
gender = Person.UNKNOWN
person.set_gender(gender)
# process F08 Alias
# F08: INDI NAME _ALIA / INDI NAME COMM
alias = recflds[person_ix[8]]
if alias:
# expand separator with ' '
alias = re.sub(r'\.', '. ', alias)
alias_text = alias.split()
# two ways: Attribute-Nickname or AKA-Name
if len(alias_text) == 1:
attr = self.__create_attribute(alias,
AttributeType.NICKNAME)
if attr:
person.add_attribute(attr)
else:
self.__add_name(
person, citation.handle if citation else None,
NameType.AKA, ' '.join(alias_text[0:-1]),
'', alias_text[-1], '')
# process F09 Person Code
refn_code = recflds[person_ix[9]] # F09: INDI REFN/INDI CODE
if refn_code:
# We have seen some artefacts ...
rel_cde = self.__rel_pat.match(refn_code)
# Option: Reference code contains one/two letters
if self.option['opt_refn-code'] and rel_cde:
attr = self.__create_attribute(refn_code,
AttributeType.CUSTOM,
"REFN")
if attr:
person.add_attribute(attr)
# process F15 Occupation
occupation = recflds[person_ix[15]] # F15: INDI OCCU
if occupation:
dummy, event_ref = self.__create_event_and_ref \
(EventType.OCCUPATION, occupation)
if event_ref:
person.add_event_ref(event_ref)
# process F16 Person Comment, F17 Person Note
comm = recflds[person_ix[16]] # F16: INDI _COMM / INDI COMM
note = recflds[person_ix[17]] # F17: INDI NOTE
note_text = [_f for _f in [comm, note] if _f]
note = self.__create_note(note_text, NoteType.PERSON)
if note and note.handle:
person.add_note(note.handle)
# process F18 - F24 Address Date, Place, Street, ZIP, Country,
# Phone, Info
# GEDCOM symbols: INDI RESI ...
date = self.__create_date_from_text \
(recflds[person_ix[18]], diag_msg) # F18: ... DATE
street = recflds[person_ix[19]] # F19: ... ADDR
# F20: ... ADDR POST/INDI RESI POST
postal_code = recflds[person_ix[20]]
# F21: ... ADDR CITY/INDI RESI PLAC
place = self.__get_or_create_place(recflds[person_ix[21]])
# F22: ... ADDR CTRY/INDI RESI CTRY
country = recflds[person_ix[22]]
# F23: ... PHON/INDI PHON
phone = recflds[person_ix[23]]
# F24: I... NOTE / INDI ADDR
info = recflds[person_ix[24]]
address = None
if street or postal_code or country or phone:
# Create address
address = Address()
if date:
address.set_date_object(date)
if street:
address.set_street(street)
if recflds[person_ix[21]]:
address.set_city(recflds[person_ix[21]])
if postal_code: # Debugging!
address.set_postal_code(postal_code)
if country:
address.set_country(country)
if phone:
address.set_phone(phone)
# Option 1: add Notes to Address
note = self.__create_note(info, NoteType.ADDRESS)
if note and note.handle:
address.add_note(note.handle)
info = None
person.add_address(address)
if place:
desc = ''
if address and date:
desc = _('see address on ')
desc += displayer.display(date)
elif address:
desc = _('see also address')
dummy, resi_ref = self.__create_event_and_ref \
(EventType.RESIDENCE, desc, date, place, '', info)
if resi_ref:
person.add_event_ref(resi_ref)
# process F25 - F31 Birth Date, Place, Time, Source, Reference,
# Text, Info
# GEDCOM symbols: INDI BIRT ...
# date = self.__create_date_from_text \ # Birth Date processed above
# (recflds[person_ix[25]], diag_msg) # F25: ... DATE
place = self.__get_or_create_place \
(recflds[person_ix[26]]) # F26: ... PLAC
birth_time = recflds[person_ix[27]] # F27: ... TIME
source = recflds[person_ix[28]] # F28: ... SOUR / ... SOUR TITL
source_refn = recflds[person_ix[29]] # F29: ... SOUR REFN
source_text = recflds[person_ix[30]] # F30: ... SOUR TEXT
info = recflds[person_ix[31]] # F31: INDI ... NOTE
citation = self.__get_or_create_citation \
(source, recflds[person_ix[25]], source_refn)
if birth_date or place or info or citation:
desc = source_text
# Option: Birth time in description
if self.option['opt_birth-date']:
time_text = self.__create_desc_from_text(birth_time)
desc += '; %s' % time_text
dummy, birth_ref = self.__create_event_and_ref \
(EventType.BIRTH, desc, birth_date, place, citation, info,
birth_time, AttributeType.TIME)
if birth_ref:
person.set_birth_ref(birth_ref)
# process F32 - F37 Baptism / Christening Date, Place, Religion,
# Source, Reference, Text, Info
# GEDCOM symbols: INDI CHR ...
date = self.__create_date_from_text \
(recflds[person_ix[32]], diag_msg) # F32: ... DATE
place = self.__get_or_create_place \
(recflds[person_ix[33]]) # F33: ... PLAC
religion = recflds[person_ix[36]] # F34: ... RELI / INDI RELI
witness = recflds[person_ix[35]] # F35: ... _WITN / ... WITN
source = recflds[person_ix[36]] # F36: ... SOUR / ... SOUR TITL
source_refn = recflds[person_ix[37]] # F37: ... SOUR REFN
source_text = recflds[person_ix[38]] # F38: ... SOUR TEXT
info = recflds[person_ix[39]] # F39: ... NOTE
citation = self.__get_or_create_citation \
(source, recflds[person_ix[32]], source_refn)
if date or place or info or citation:
dummy, chris_ref = self.__create_event_and_ref \
(EventType.CHRISTEN, source_text, date, place, citation,
info, witness, AttributeType.CUSTOM, _("Godfather"))
if chris_ref:
person.add_event_ref(chris_ref)
# process F34 Religion
if religion:
citation = None
if source != religion:
citation = self.__get_or_create_citation \
(religion, recflds[person_ix[32]], source_refn)
dummy, reli_ref = self.__create_event_and_ref \
(EventType.RELIGION, '', date, '', citation)
if reli_ref:
person.add_event_ref(reli_ref)
# process F40 - F46 Death Date, Place, Time, Source, Reference,
# Text, Info
# GEDCOM symbols: INDI DEAT ...
date = self.__create_date_from_text \
(recflds[person_ix[40]], diag_msg) # F40: ... DATE
place = self.__get_or_create_place \
(recflds[person_ix[41]]) # F41: ... PLAC
death_time = recflds[person_ix[42]] # F42: ... TIME
source = recflds[person_ix[43]] # F43: ... SOUR / ... SOUR TITL
source_refn = recflds[person_ix[44]] # F44: ... SOUR REFN
source_text = recflds[person_ix[45]] # F45: ... SOUR TEXT
info = recflds[person_ix[46]] # F46: ... NOTE
citation = self.__get_or_create_citation \
(source, recflds[person_ix[40]], source_refn)
if date or place or info or citation:
desc = source_text
# Option: Death time in description
if self.option['opt_death-date']:
time_text = self.__create_desc_from_text(death_time)
desc += '; %s' % time_text
if not self.option['opt_death-cause']:
desc += ' (%s)' % info
dummy, death_ref = self.__create_event_and_ref \
(EventType.DEATH, desc, date, place, citation, None,
death_time, AttributeType.TIME)
if death_ref:
person.set_death_ref(death_ref)
# Option: Death info to Death cause
if source_text or (self.option['opt_death-cause'] and info):
desc = [_f for _f in [source_text, info] if _f]
desc = desc and '; '.join(desc) or None
if _('Death cause') in desc:
desc = desc[13:].strip()
dummy, event_ref = self.__create_event_and_ref \
(EventType.CAUSE_DEATH, desc)
if event_ref:
person.add_event_ref(event_ref)
# process F47 - F52 Cremation Date, Place, Source, Reference,
# Text, Info
# GEDCOM symbols: INDI CREM ...
date = self.__create_date_from_text \
(recflds[person_ix[47]], diag_msg) # F47: ... DATE
place = self.__get_or_create_place \
(recflds[person_ix[48]]) # F48: ... PLAC
source = recflds[person_ix[49]] # F49: ... SOUR / ... SOUR TITL
source_refn = recflds[person_ix[50]] # F50: ... SOUR REFN
source_text = recflds[person_ix[51]] # F51: ... SOUR TEXT
info = recflds[person_ix[52]] # F52: ... INFO
citation = self.__get_or_create_citation \
(source, recflds[person_ix[47]], source_refn)
if date or place or info or citation:
dummy, cremation_ref = self.__create_event_and_ref \
(EventType.CREMATION, source_text, date, place, citation,
info)
if cremation_ref:
person.add_event_ref(cremation_ref)
# process F53 Burial Date, F54 Burial Place, F55 Burial Source,
# F56 Burial Reference, F57 Burial Text, F58 Burial Info
# GEDCOM symbols: INDI BURI ...
date = self.__create_date_from_text \
(recflds[person_ix[53]], diag_msg) # F53: ... DATE
place = self.__get_or_create_place \
(recflds[person_ix[54]]) # F54: ... PLAC
source = recflds[person_ix[55]] # F49: ... SOUR / ... SOUR TITL
source_refn = recflds[person_ix[56]] # F50: ... SOUR REFN
source_text = recflds[person_ix[57]] # F51: ... SOUR TEXT
info = recflds[person_ix[58]] # F58: ... INFO
citation = self.__get_or_create_citation \
(source, recflds[person_ix[53]], source_refn)
if date or place or info or citation:
dummy, buri_ref = self.__create_event_and_ref \
(EventType.BURIAL, source_text, date, place, citation, info)
if buri_ref:
person.add_event_ref(buri_ref)
# commit the Person
self.dbase.commit_person(person, self.trans)
def create_families(self):
"""
Method to import Families
"""
table = self.def_['Table_2']
LOG.info(table.get_field_names())
# We'll start with F03: Husband
# Note: We like this to be computed just once.
family_ix = [0, 0]
for count in range(2, len(self.family_identifier)):
# We've seen some case insensitivity in DEF files ...
fid = self.family_identifier[count][self.language].lower()
fix = table.get_record_field_index(fid)
family_ix.append(fix)
# start feedback about import progress (GUI/TXT)
self.__display_message(_('Importing families.'), gui_max=len(self.rels))
fam_id = 0
for i, rec in enumerate(self.rels):
# Update at the begin
self.progress.step() if self.uistate else self.update()
husband = rec[family_ix[3]] # F03: FAM HUSB
wife = rec[family_ix[4]] # F04: FAM WIFE
if husband > 0 or wife > 0:
recflds = table.convert_record_to_list(rec, self.mems)
# Option: Original family IDs
if self.option['opt_family-ident']:
fam_id = int(recflds[family_ix[1]]) # F01: FAM RFN
else:
fam_id += 1
self.high_fam_id = fam_id
family = self.__find_or_create_family(fam_id)
# process F03 / F04 Husband / Wife
husband_handle = None
if husband > 0:
husband_handle = self.__find_person_handle(husband)
family.set_father_handle(husband_handle)
husband_person = self.dbase.get_person_from_handle(husband_handle)
husband_person.add_family_handle(family.get_handle())
self.dbase.commit_person(husband_person, self.trans)
wife_handle = None
if wife > 0:
wife_handle = self.__find_person_handle(wife)
family.set_mother_handle(wife_handle)
wife_person = self.dbase.get_person_from_handle(wife_handle)
wife_person.add_family_handle(family.get_handle())
self.dbase.commit_person(wife_person, self.trans)
# Optional: Husband changes Surname (e.g. marriage)
if (husband > 0) and self.option['opt_surname-male']:
citation_handle = wife_person.get_citation_list()[0] \
if husband_person.citation_list else None
self.__add_name(husband_person, citation_handle,
NameType.MARRIED,
husband_person.primary_name.get_first_name(),
husband_person.primary_name.surname_list[0].prefix,
wife_person.primary_name.get_surname(),
husband_person.primary_name.get_suffix())
# commit the Person
self.dbase.commit_person(husband_person, self.trans)
# Optional: Wife changes Surname (e.g. marriage)
if (wife > 0) and self.option['opt_surname-female']:
citation_handle = wife_person.get_citation_list()[0] \
if wife_person.citation_list else None
self.__add_name(wife_person, citation_handle,
NameType.MARRIED,
wife_person.primary_name.get_first_name(),
wife_person.primary_name.surname_list[0].prefix,
husband_person.primary_name.get_surname(),
wife_person.primary_name.get_suffix())
# commit the Person
self.dbase.commit_person(wife_person, self.trans)
self.fm2fam[husband_handle, wife_handle] = family
diag_msg = "%s: %s %s" % \
(family.gramps_id,
husband_person.gramps_id if husband_handle else "",
wife_person.gramps_id if wife_handle else "")
# Option: Addtional family citation
if self.option['imp_source_title']:
husband_name = husband_person.get_primary_name()
husband_name = husband_name.get_surname()
wife_name = wife_person.get_primary_name()
wife_name = wife_name.get_surname()
# Original family ID from source
pageref = '[ID: F%05d] %s -- %s' % \
(i +1, husband_name, wife_name)
citation = self.__get_or_create_citation \
(self.option['imp_source_title'],
recflds[family_ix[2]], # F02: FAM CHAN DATE
self.option['imp_citation_page'], pageref)
if citation and citation.handle:
family.add_citation(citation.handle)
# add tag to 'Family'
self.__add_tag('family', family)
# process F08 - F13 Civil Union Date, Place, Source,
# Reference, Text, Info
# GEDCOM symbols: FAM _LIV ...
date = self.__create_date_from_text \
(recflds[family_ix[8]], diag_msg) # F08: ... DATE
place = self.__get_or_create_place \
(recflds[family_ix[9]]) # F09: ... PLAC
# F10: ... SOUR/FAM _LIV SOUR TITL
source = recflds[family_ix[10]]
source_refn = recflds[family_ix[11]] # F11: ... SOUR REFN
source_text = recflds[family_ix[12]] # F12: ... SOUR TEXT
info = recflds[family_ix[13]] # F13: ... NOTE
citation = self.__get_or_create_citation \
(source, recflds[family_ix[8]], source_refn)
if date or place or info or citation:
evt_type = _('Civil union')
event, civu_ref = self.__create_event_and_ref \
(EventType.UNKNOWN, source_text, date, place, citation,
info)
event.set_type((EventType.CUSTOM, evt_type))
if civu_ref:
family.add_event_ref(civu_ref)
# Type of relation
famreltype = FamilyRelType.CIVIL_UNION
family.set_relationship(FamilyRelType(famreltype))
# process F14 - F20 Marriage License Date, Place, Witness,
# Source, Reference, Text, Info
# GEDCOM symbols: FAM MARB ...
# F14: ... DATE/FAM REGS DATE
date = self.__create_date_from_text \
(recflds[family_ix[14]], diag_msg)
# F15: ... PLAC/FAM REGS PLAC
place = self.__get_or_create_place(recflds[family_ix[15]])
# F16: ... _WITN/FAM MARB WITN
witness = recflds[family_ix[16]]
# F17: ... SOUR/FAM MARB SOUR TITL/FAM REGS SOUR
source = recflds[family_ix[17]]
# F18: ... SOUR REFN/FAM REGS SOUR REFN
source_refn = recflds[family_ix[18]]
# F19: ... SOUR TEXT
source_text = recflds[family_ix[19]]
# F20: ... NOTE
info = recflds[family_ix[20]]
citation = self.__get_or_create_citation \
(source, recflds[family_ix[14]], source_refn)
if date or place or info or citation:
desc = source_text
desc = [_f for _f in [source_text, info] if _f]
desc = desc and '; '.join(desc) or None
dummy, marl_ref = self.__create_event_and_ref \
(EventType.MARR_BANNS, desc, date, place, citation, '',
witness, AttributeType.WITNESS)
if marl_ref:
family.add_event_ref(marl_ref)
# process F21 - F27 Civil Marriage Date, Place, Witness,
# Source, Reference, Text, Info
# GEDCOM symbols: FAM MARR(Civil) ...
# F21: ... DATE/FAM MARR DATE
date = self.__create_date_from_text \
(recflds[family_ix[21]], diag_msg)
# F22: ... PLAC/FAM MARR PLAC
place = self.__get_or_create_place(recflds[family_ix[22]])
# F23: ... _WITN/FAM MARR _WITN/FAM MARR WITN/FAM WITN
witness = recflds[family_ix[23]]
# F24: ... SOUR/FAM MARR SOUR/FAM MARR SOUR TITL
source = recflds[family_ix[24]]
# F25: ... SOUR REFN/FAM MARR SOUR REFN
source_refn = recflds[family_ix[25]]
# F26: ... SOUR TEXT/FAM MARR SOUR TEXT
source_text = recflds[family_ix[26]]
info = recflds[family_ix[27]] # F27: ... NOTE
citation = self.__get_or_create_citation \
(source, recflds[family_ix[21]], source_refn)
if date or place or info or citation:
desc = source_text
if not desc:
# 'Civil' is widely accepted and language independent
desc = "Civil"
dummy, mar_ref = self.__create_event_and_ref \
(EventType.MARRIAGE, desc, date, place, citation, info,
witness, AttributeType.WITNESS)
if mar_ref:
family.add_event_ref(mar_ref)
# Type of relation
famreltype = FamilyRelType.MARRIED
family.set_relationship(FamilyRelType(famreltype))
# process F28 - F35 Church Wedding Date, Place, Church, Witness,
# Source, Reference, Text, Info
# GEDCOM symbols: FAM MARR(Church) ...
# F28: ... DATE / FAM ORDI DATE
wedding_date = self.__create_date_from_text \
(recflds[family_ix[28]], diag_msg)
# F29: ... DATE / FAM ORDI PLACE
place = self.__get_or_create_place(recflds[family_ix[29]])
# F30: ... _CHUR / FAM ORDI _CHUR / FAM ORDI RELI
church = recflds[family_ix[30]]
# F31: ... _WITN / FAM ORDI _WITN / FAM ORDI WITN
witness = recflds[family_ix[31]]
# F32: ... SOUR / FAM ORDI SOUR / FAM ORDI SOUR TITL
source = recflds[family_ix[32]]
# F33: ... SOUR REFN / FAM ORDI SOUR REFN
source_refn = recflds[family_ix[33]]
# F34: ... SOUR TEXT / FAM ORDI SOUR TEXT
source_text = recflds[family_ix[34]]
# F35 ... INFO
info = recflds[family_ix[35]]
citation = self.__get_or_create_citation \
(source, recflds[family_ix[28]], source_refn)
if wedding_date or place or info or citation:
desc = [_f for _f in [church, source_text] if _f]
desc = desc and '; '.join(desc) or None
if not desc:
desc = _('Wedding')
dummy, marc_ref = self.__create_event_and_ref \
(EventType.MARRIAGE, desc, wedding_date, place,
citation, info, witness, AttributeType.WITNESS)
if marc_ref:
family.add_event_ref(marc_ref)
# Type of relation
famreltype = FamilyRelType.MARRIED
family.set_relationship(FamilyRelType(famreltype))
# process F05 - F07 Relation Code, Note, Info
refn_code = recflds[family_ix[5]] # F05: FAM REFN / FAM CODE
if refn_code:
# We have seen some artefacts ...
rel_cde = self.__rel_pat.match(refn_code)
# Option: Reference code contains one/two letters
if self.option['opt_refn-code'] and rel_cde:
attr = self.__create_attribute(refn_code,
AttributeType.CUSTOM,
"REFN")
if attr:
family.add_attribute(attr)
comm = recflds[family_ix[6]] # F06: FAM _COMM/FAM COMM
note = recflds[family_ix[7]] # F07: FAM NOTE
note_text = [_f for _f in [comm, note] if _f]
if note_text:
cnt = None
if len(note_text) > 0:
note_cont = (' '.join(note_text)).split(' ')
else:
note_cont = note_text.split(' ')
if note_cont[0] == _('Residence'):
cnt = 1
elif note_cont[0] == _('future') and \
note_cont[1] == _('Residence'):
cnt = 2
else:
note = self.__create_note(note_text, NoteType.FAMILY)
if note and note.handle:
family.add_note(note.handle)
if cnt:
if wedding_date:
date_text = _('after') + ' ' + \
str(wedding_date.dateval[2]) # Wedding Year
# F28: ... DATE / FAM ORDI DATE
date = self.__create_date_from_text \
(date_text, diag_msg)
place_text = ''
# Add all elements of Note Content
for i in range(cnt, len(note_cont)):
place_text += note_cont[i] + ' '
place_text = place_text.rstrip() # Strip whitespace
place = self.__get_or_create_place(place_text)
dummy, place_ref = self.__create_event_and_ref \
(EventType.RESIDENCE, None, date, place, citation)
if place_ref:
family.add_event_ref(place_ref)
# process F36 - F41 Divorce Date, Place, Source, Text,
# Reference, Info
# GEDCOM symbols: FAM DIV ...
# F36: ... DATE / FAM DIVO DATE
date = self.__create_date_from_text \
(recflds[family_ix[36]], diag_msg)
# F37: ... PLAC / FAM DIVO PlAC
place = self.__get_or_create_place(recflds[family_ix[37]])
# F38: ... SOUR / FAM DIV SOUR TITL
source = recflds[family_ix[38]]
# F39: ... SOUR REFN
source_refn = recflds[family_ix[39]]
# F40: ... SOUR TEXT
source_text = recflds[family_ix[40]]
# F41: ... INFO
info = recflds[family_ix[41]]
citation = self.__get_or_create_citation \
(source, recflds[family_ix[36]], source_refn)
if date or place or info or citation:
desc = source_text
dummy, div_ref = self.__create_event_and_ref \
(EventType.DIVORCE, desc, date, place, citation, info)
if div_ref:
family.add_event_ref(div_ref)
# commit the Family
self.dbase.commit_family(family, self.trans)
# add info for import statistic
self.info.add('new-object', FAMILY_KEY, None)
def add_children(self):
"""
Method to add Children.
"""
# Once more to record the father and mother
table = self.def_['Table_1']
# We have seen some case insensitivity in DEF files ...
person_F13 = table.get_record_field_index \
(self.person_identifier[13][self.language].lower()) # F13: Father
person_F14 = table.get_record_field_index \
(self.person_identifier[14][self.language].lower()) # F14: Mother
# start feedback about import progress (GUI/TXT)
self.__display_message(_('Adding children.'),
gui_max=len(self.pers) *0.6)
ind_id = 0
for dummy, rec in enumerate(self.pers):
# Update at the begin
self.progress.step() if self.uistate else self.update()
father = rec[person_F13] # F13: Father
mother = rec[person_F14] # F14: Mother
if father > 0 or mother > 0:
recflds = table.convert_record_to_list(rec, self.mems)
# Option: Original Individuals IDs
if self.option['opt_person-ident']:
ind_id = int(recflds[0]) # F01: INDI RFN
else:
ind_id += 1
# Find the family with this Father and Mother
child_handle = self.__find_person_handle(ind_id)
father_handle = father > 0 and \
self.__find_person_handle(father) or None
mother_handle = mother > 0 and \
self.__find_person_handle(mother) or None
if father > 0 and not father_handle:
LOG.warning(_("Cannot find father for I%(person)s (Father=%(father))"), \
{'person':ind_id, 'father':father})
elif mother > 0 and not mother_handle:
LOG.warning(_("Cannot find mother for I%(person)s (Mother=%(mother))"), \
{'person':ind_id, 'mother':mother})
else:
family = self.fm2fam.get((father_handle, mother_handle), None)
if not family:
# Family not present in REL. Create a new one.
self.high_fam_id += 1
fam_id = self.high_fam_id
family = self.__find_or_create_family(fam_id)
if father_handle:
family.set_father_handle(father_handle)
try:
father_person = self.dbase.get_person_from_handle \
(father_handle)
father_person.add_family_handle(family.get_handle())
# commit the Father
self.dbase.commit_person(father_person, self.trans)
except HandleError:
LOG.warning("Failed to add father %s to child %s", \
father, ind_id)
if mother_handle:
family.set_mother_handle(mother_handle)
try:
mother_person = self.dbase.get_person_from_handle \
(mother_handle)
mother_person.add_family_handle(family.get_handle())
# commit the Mother
self.dbase.commit_person(mother_person, self.trans)
except HandleError:
LOG.warning("Failed to add mother %s to child %s", \
mother, ind_id)
if family:
childref = ChildRef()
childref.set_reference_handle(child_handle)
if childref:
family.add_child_ref(childref)
# commit the Family
self.dbase.commit_family(family, self.trans)
try:
child = self.dbase.get_person_from_handle(child_handle)
if child:
child.add_parent_family_handle(family.get_handle())
# commit the Child
self.dbase.commit_person(child, self.trans)
except HandleError:
LOG.warning("Failed to add child %s to family", ind_id)
| gramps-project/gramps | gramps/plugins/lib/libprogen.py | Python | gpl-2.0 | 84,341 | [
"Brian"
] | c384ace8ffbefbde19493cf64b4c3592f84b59aef2f9e50506da4618dda2af90 |
from octopus.sequence.runtime import *
from octopus.sequence.util import Trigger
from twisted.internet import reactor
def fn ():
print ("fn called")
return sequence(
log("fn called"),
set(v, False)
)
v = variable(False, "v", "v")
v2 = variable(False, "v", "v")
o1 = Trigger(v == True, fn)
o2 = Trigger(v2 == True, log("o2 triggered"), max_calls = 1)
s = sequence(
log("Loading o"),
wait("8s"),
set(v2, True),
wait("1s")
)
s.dependents.add(o1)
s.dependents.add(o2)
reactor.callLater(2, v.set, True)
reactor.callLater(4, v.set, True)
reactor.callLater(6, v.set, True)
run(s)
| richardingham/octopus | examples/test_on.py | Python | mit | 593 | [
"Octopus"
] | a5cf9a553da1598a03b2e79bf6a8f04525241739ac25364b8cacb637801c2f47 |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from .mooseutils import colorText, str2bool, find_moose_executable, runExe, check_configuration
from .mooseutils import find_moose_executable_recursive, run_executable
from .mooseutils import touch, unique_list, gold, make_chunks, camel_to_space
from .mooseutils import text_diff, unidiff, text_unidiff, run_profile, list_files, check_output, run_time
from .mooseutils import generate_filebase, recursive_update, fuzzyEqual, fuzzyAbsoluteEqual
from .gitutils import git_is_repo, git_commit, git_commit_message, git_merge_commits, git_ls_files
from .gitutils import git_root_dir, git_init_submodule, git_submodule_info, git_version
from .gitutils import git_authors, git_lines, git_committers, git_localpath, git_repo
from .gitutils import git_is_branch, git_is_config, git_remotes, git_add_and_fetch_remote, git_fetch_remote
from .message import mooseDebug, mooseWarning, mooseMessage, mooseError
from .MooseException import MooseException
from .eval_path import eval_path
from .levenshtein import levenshtein, levenshteinDistance
from .json_load import json_load, json_parse
from .jsondiff import JSONDiffer
from .civet_results import get_civet_results, get_civet_hashes
from .template import apply_template_arguments
try:
from .yaml_load import yaml_load, yaml_write, IncludeYamlFile
except:
pass
try:
from .MooseDataFrame import MooseDataFrame
from .PostprocessorReader import PostprocessorReader
from .VectorPostprocessorReader import VectorPostprocessorReader
except:
pass
try:
from .ImageDiffer import ImageDiffer
except:
pass
try:
import clang.cindex
from .MooseSourceParser import MooseSourceParser
except:
pass
| harterj/moose | python/mooseutils/__init__.py | Python | lgpl-2.1 | 1,976 | [
"MOOSE"
] | 0c7823ee3efa897be7090b1c84d348d2c2c878860124a9a45d1cb0492edabc5d |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#This file is part of pyAlienFX.
#
# pyAlienFX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyAlienFX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyAlienFX. If not, see <http://www.gnu.org/licenses/>.
# ./COPYING
#
# This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/ or send a letter
# to Creative Commons, 444 Castro Street, Suite 900, Mountain View, California, 94041, USA.
#
#PyALienFX Deamon
#The deamon will load the driver and the controller
#You can comunicate with the Deamon through TCP
#You can send packet so the deamon will control the AlienFX
#That way you can create GUI, plugins, ect to play with the AlienFX !
from AlienFX.AlienFXEngine import *
from socket import *
import sys
import os
BUFSIZ = 4096
HOST = 'localhost'
PORT = 25436 #ALIEN port as if you typed ALIEN on your phone ;)
ADDR = (HOST,PORT)
#LOGFILE = '/var/log/pydaemon.log'
#PIDFILE = '/var/run/pydaemon.pid'
#class Log:
#"""file like for writes with auto flush after each write
#to ensure that everything is logged, even during an
#unexpected exit."""
#def __init__(self, f):
#self.f = f
#def write(self, s):
#self.f.write(s)
#self.f.flush()
#def main():
##change to data directory if needed
##os.chdir("/root/data")
##redirect outputs to a logfile
##sys.stdout = sys.stderr = Log(open(LOGFILE, 'a+'))
#print "Starting Server"
##ensure the that the daemon runs a normal user
##os.setegid(1000) #set group first "pyAlienFX"
##os.seteuid(1000) #set user "pyAlienFX"
##start the user program here:
#Daemon = ServCmd()
class ServCmd:
def __init__(s):
print "Initializing Driver ..."
s.driver = AlienFX_Driver()
print "Initializing Controller ..."
s.controller = AlienFX_Controller(s.driver)
s.computer = s.driver.computer
s.__serv = socket( AF_INET,SOCK_STREAM)
s.__serv.bind((ADDR))
#s.__serv.settimeout(60)
s.__cli = None
s.__imlistening = 0
s.__improcessing = 0
s.__run()
def __run(s):
s.__imlistening = 1
while s.__imlistening:
try:
s.__listen()
except KeyboardInterrupt:
s.__serv.close()
sys.exit(0)
s.__improcessing = 1
while s.__improcessing:
s.__procCmd()
s.__cli.close()
s.__serv.close()
def __listen(s):
s.__serv.listen(5)
print '...listening'
try:
cli,addr = s.__serv.accept()
except KeyboardInterrupt:
print "EXIT"
s.__serv.close()
sys.exit(0)
s.__cli = cli
print '...connected: ', addr
def __procCmd(s):
try:
cmd = s.__cli.recv(BUFSIZ)
except KeyboardInterrupt:
s.__cli.close()
s.__serv.close()
sys.exit(0)
if not cmd: return
print cmd
s.__servCmd(cmd)
if s.__improcessing:
if cmd != "PING":
for c in cmd.split('|'):
command = c.split(',')[0]
arg = c.split(',')[1:]
if command == "Set_Loop":
action = arg[0]
s.controller.Set_Loop(action)
elif command == "Set_Loop_Conf":
if arg[0] == "True":
Save = True
elif arg[0] == "False":
Save = False
else:
Save = None
if arg[1]:
block = int(arg[1])
else:
block = None
if Save and block:
s.controller.Set_Loop_Conf(Save,block)
elif Save:
s.controller.Set_Loop_Conf(Save=Save)
elif block:
s.controller.Set_Loop_Conf(block=block)
elif command == "Add_Loop_Conf":
area,mode,color1,color2 = arg[0],arg[1],arg[2],arg[3]
if not color2:
color2 = None
elif color2 == "None":
color2 = None
if area and mode and color1:
s.controller.Add_Loop_Conf(area,mode,color1,color2)
elif command == "Add_Speed_Conf":
if arg[0]:
speed = int(arg[0])
s.controller.Add_Speed_Conf(speed)
else:
s.controller.Add_Speed_Conf()
elif command == "End_Loop_Conf":
s.controller.End_Loop_Conf()
elif command == "End_Transfert_Conf":
s.controller.End_Transfert_Conf()
elif command == "Write_Conf":
s.controller.Write_Conf()
elif command == "Set_Color":
Area,Color = arg[0],arg[1]
if arg[2]:
if arg[2] == "False":
Save = False
elif arg[2] == "True":
Save = True
else:
Save = None
else:
Save = None
if arg[3]:
if arg[3] == "False":
Apply = False
elif arg[3] == "True":
Apply = True
else:
Apply = None
else:
Apply = None
if arg[4]:
block = int(arg[4])
else:
block = None
if Save and Apply and block:
s.controller.Set_Color(Area, Color, Save = Save, Apply = Apply, block = block)
elif Save and Apply:
s.controller.Set_Color(Area, Color, Save = Save, Apply = Apply)
elif Save and block:
s.controller.Set_Color(Area, Color, Save = Save, block = block)
elif Apply and block:
s.controller.Set_Color(Area, Color, Apply = Apply, block = block)
elif Save:
s.controller.Set_Color(Area, Color, Save = Save)
elif Apply:
s.controller.Set_Color(Area, Color, Apply = Apply)
elif block:
s.controller.Set_Color(Area, Color, block = block)
elif command == "Set_Color_Blink":
Area,Color = arg[0],arg[1]
if arg[2]:
if arg[2] == "False":
Save = False
elif arg[2] == "True":
Save = True
else:
Save = None
else:
Save = None
if arg[3]:
if arg[3] == "False":
Apply = False
elif arg[3] == "True":
Apply = True
else:
Apply = None
else:
Apply = None
if arg[4]:
block = int(arg[4])
else:
block = None
if Save and Apply and block:
s.controller.Set_Color_Blink(Area, Color, Save = Save, Apply = Apply, block = block)
elif Save and Apply:
s.controller.Set_Color_Blink(Area, Color, Save = Save, Apply = Apply)
elif Save and block:
s.controller.Set_Color_Blink(Area, Color, Save = Save, block = block)
elif Apply and block:
s.controller.Set_Color_Blink(Area, Color, Apply = Apply, block = block)
elif Save:
s.controller.Set_Color_Blink(Area, Color, Save = Save)
elif Apply:
s.controller.Set_Color_Blink(Area, Color, Apply = Apply)
elif block:
s.controller.Set_Color_Blink(Area, Color, block = block)
elif command == "Set_Color_Morph":
Area,Color1,Color2 = arg[0],arg[1],arg[2]
if arg[3]:
if arg[3] == "False":
Save = False
elif arg[3] == "True":
Save = True
else:
Save = None
else:
Save = None
if arg[4]:
if arg[4] == "False":
Apply = False
elif arg[4] == "True":
Apply = True
else:
Apply = None
else:
Apply = None
if arg[5]:
block = int(arg[5])
else:
block = None
if Save and Apply and block:
s.controller.Set_Color_Morph(Area,Color1,Color2, Save = Save, Apply = Apply, block = block)
elif Save and Apply:
s.controller.Set_Color_Morph(Area,Color1,Color2, Save = Save, Apply = Apply)
elif Save and block:
s.controller.Set_Color_Morph(Area,Color1,Color2, Save = Save, block = block)
elif Apply and block:
s.controller.Set_Color_Morph(Area,Color1,Color2, Apply = Apply, block = block)
elif Save:
s.controller.Set_Color_Morph(Area,Color1,Color2, Save = Save)
elif Apply:
s.controller.Set_Color_Morph(Area,Color1,Color2, Apply = Apply)
elif block:
s.controller.Set_Color_Morph(Area,Color1,Color2, block = block)
elif command == "WaitForOk":
s.controller.WaitForOk()
elif command == "Get_State":
s.controller.Get_State()
elif command == "Reset":
res_cmd = int(arg[0],16)
s.controller.Reset(res_cmd)
s.__cli.send('executed')
elif cmd == "PING":
print "Received Ping => Sending PONG"
s.__cli.send('PONG')
print "sent"
def __servCmd(s,cmd):
cmd = cmd.strip()
if cmd == 'BYE':
s.__improcessing = 0
elif cmd == 'EXIT':
s.__improcessing = 0
s.__imlistening = 0
elif cmd == 'RESTART':
s.__improcessing = 0
s.__imlistening = 0
s.__init__()
if __name__ == "__main__":
Daemon = ServCmd()
#if __name__ == "__main__":
## do the UNIX double-fork magic, see Stevens' "Advanced
## Programming in the UNIX Environment" for details (ISBN 0201563177)
#try:
#pid = os.fork()
#if pid > 0:
## exit first parent
#sys.exit(0)
#except OSError, e:
#print "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
#sys.exit(1)
## decouple from parent environment
#os.chdir("/") #don't prevent unmounting....
#os.setsid()
#os.umask(0)
## do second fork
#try:
#pid = os.fork()
#if pid > 0:
## exit from second parent, print eventual PID before
##print "Daemon PID %d" % pid
#open(PIDFILE,'w').write("%d"%pid)
#sys.exit(0)
#except OSError, e:
#print "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
#sys.exit(1)
## start the daemon main loop
#main() | Findarato/pyalienfx | pyAlienFX_daemon.py | Python | gpl-3.0 | 9,699 | [
"VisIt"
] | c17b7a0d5cde68afcd4a1a2fbeb7e1164f9f61ae87eec2ffebfb643b15b5ae18 |
#!/usr/bin/env python
from __future__ import division
from optparse import OptionParser
import roslib
import rospy
import rosparam
import copy
# import cv: open cv 1 not used
import cv2
import numpy as np
import threading
import dynamic_reconfigure.server
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
from std_msgs.msg import Float32, Header, String
from multi_tracker.msg import Contourinfo, Contourlist
from multi_tracker.msg import Trackedobject, Trackedobjectlist
from multi_tracker.srv import resetBackgroundService, addImageToBackgroundService
import image_processing
from distutils.version import LooseVersion, StrictVersion
print 'Using open cv: ' + cv2.__version__
if StrictVersion(cv2.__version__.split('-')[0]) >= StrictVersion("3.0.0"):
OPENCV_VERSION = 3
print 'Open CV 3'
else:
OPENCV_VERSION = 2
print 'Open CV 2'
if 0:#OPENCV_VERSION == 3:
raise ImportError('cv bridge not compatible with opencv 3, killing live viewer')
# for basler ace cameras, use camera_aravis
# https://github.com/ssafarik/camera_aravis
# rosrun camera_aravis camnode
# default image: /camera/image_raw
# for firefley cameras, camera1394 does not provide timestamps but otherwise works
# use point grey drivers
# http://wiki.ros.org/pointgrey_camera_driver
# rosrun pointgrey_camera_driver camera_node
# default image: /camera/image_mono
# Trajectory class to aid in drawing colored tracked trajectories with opencv
class Trajectory(object):
def __init__(self, objid):
self.objid = objid
self.positions = []
self.color = None
self.covariances = []
self.popout = 0
def draw_trajectory(img, pts, color, thickness):
for i in range(len(pts)-3):
try:
cv2.line(img, (int(pts[i][0]), int(pts[i][1])), (int(pts[i+1][0]), int(pts[i+1][1])), color, thickness)
except:
pass
print 'could not draw trajectory line, length pts: ', len(pts), 'i: ', i
# The main tracking class, a ROS node
class LiveViewer:
def __init__(self, nodenum):
'''
Default image_topic for:
Basler ace cameras with camera_aravis driver: camera/image_raw
Pt Grey Firefly cameras with pt grey driver : camera/image_mono
'''
# default parameters (parameter server overides them)
self.params = { 'image_topic' : '/camera/image_mono',
'min_persistence_to_draw' : 10,
'max_frames_to_draw' : 50,
'camera_encoding' : 'mono8', # fireflies are bgr8, basler gige cams are mono8
'roi_l' : 0,
'roi_r' : -1,
'roi_b' : 0,
'roi_t' : -1,
'circular_mask_x' : 'none',
'circular_mask_y' : 'none',
'circular_mask_r' : 'none',
}
for parameter, value in self.params.items():
try:
# allows image processed view to be overlaid with tracked objects
p = '/multi_tracker/' + nodenum + '/liveviewer/' + parameter
self.params[parameter] = rospy.get_param(p)
except:
try:
p = '/multi_tracker/' + nodenum + '/tracker/' + parameter
self.params[parameter] = rospy.get_param(p)
except:
print 'Using default parameter: ', parameter, ' = ', value
# initialize the node
rospy.init_node('liveviewer_' + nodenum)
self.nodename = rospy.get_name().rstrip('/')
self.nodenum = nodenum
# initialize display
self.window_name = 'liveviewer_' + nodenum
self.subTrackedObjects = rospy.Subscriber('/multi_tracker/' + nodenum + '/tracked_objects', Trackedobjectlist, self.tracked_object_callback)
self.subContours = rospy.Subscriber('/multi_tracker/' + nodenum + '/contours', Contourlist, self.contour_callback)
self.cvbridge = CvBridge()
self.tracked_trajectories = {}
self.contours = None
self.window_initiated = False
# Subscriptions - subscribe to images, and tracked objects
self.image_mask = None
sizeImage = 128+1024*1024*3 # Size of header + data.
self.subImage = rospy.Subscriber(self.params['image_topic'], Image, self.image_callback, queue_size=5, buff_size=2*sizeImage, tcp_nodelay=True)
# for adding images to background
add_image_to_background_service_name = '/multi_tracker/' + self.nodenum + '/' + 'tracker/' + "add_image_to_background"
rospy.wait_for_service(add_image_to_background_service_name)
try:
self.add_image_to_background = rospy.ServiceProxy(add_image_to_background_service_name, addImageToBackgroundService)
except:
print 'could not connect to add image to background service - is tracker running?'
def reset_background(self, service_call):
self.reset_background_flag = True
return 1
def tracked_object_callback(self, tracked_objects):
for trajec in self.tracked_trajectories.values():
trajec.popout = 1
for tracked_object in tracked_objects.tracked_objects:
if tracked_object.persistence > self.params['min_persistence_to_draw']:
if tracked_object.objid not in self.tracked_trajectories.keys(): # create new object
self.tracked_trajectories.setdefault(tracked_object.objid, Trajectory(tracked_object.objid))
self.tracked_trajectories[tracked_object.objid].color = np.random.randint(0,255,3).tolist()
# update tracked objects
self.tracked_trajectories[tracked_object.objid].covariances.append(tracked_object.covariance)
self.tracked_trajectories[tracked_object.objid].positions.append([tracked_object.position.x, tracked_object.position.y])
# if it is a young object, let it grow to length 100
if len(self.tracked_trajectories[tracked_object.objid].positions) < self.params['max_frames_to_draw']:
self.tracked_trajectories[tracked_object.objid].popout = 0
# cull old objects
for objid, trajec in self.tracked_trajectories.items():
if trajec.popout:
trajec.positions.pop(0)
trajec.covariances.pop(0)
if len(trajec.positions) <= 1:
del(self.tracked_trajectories[objid])
def contour_callback(self, contours):
self.contours = contours
def image_callback(self, rosimg):
# Convert the image.
try:
img = self.cvbridge.imgmsg_to_cv2(rosimg, 'passthrough') # might need to change to bgr for color cameras
except CvBridgeError, e:
rospy.logwarn ('Exception converting background image from ROS to opencv: %s' % e)
img = np.zeros((320,240))
self.imgScaled = img[self.params['roi_b']:self.params['roi_t'], self.params['roi_l']:self.params['roi_r']]
self.shapeImage = self.imgScaled.shape # (height,width)
if self.params['circular_mask_x'] != 'none':
if self.image_mask is None:
self.image_mask = np.zeros_like(self.imgScaled)
cv2.circle(self.image_mask,(self.params['circular_mask_x'], self.params['circular_mask_y']),int(self.params['circular_mask_r']),[1,1,1],-1)
self.imgScaled = self.image_mask*self.imgScaled
# Image for display
if self.params['camera_encoding'] == 'mono8':
try:
self.imgOutput = cv2.cvtColor(self.imgScaled, cv2.COLOR_GRAY2RGB)
except:
self.imgOutput = self.imgScaled
print("To get rid of this error warning, set rosparam /multi_tracker/1/liveviewer/camera_encoding to something other than mono8 (e.g. color)")
elif self.params['camera_encoding'] == 'binary':
self.imgOutput = self.imgScaled
else:
self.imgOutput = self.imgScaled
# Draw ellipses from contours
if self.contours is not None:
for c, contour in enumerate(self.contours.contours):
# b = contour.area / (np.pi*a)
# b = ecc*a
if contour.ecc != 0: # eccentricity of ellipse < 1 but > 0
a = np.sqrt( contour.area / (np.pi*contour.ecc) )
b = contour.ecc*a
else: # eccentricity of circle is 0
a = 1
b = 1
center = (int(contour.x), int(contour.y))
angle = int(contour.angle)
axes = (int(np.min([a,b])), int(np.max([a,b])))
cv2.ellipse(self.imgOutput, center, axes, angle, 0, 360, (0,255,0), 2 )
# Display the image | Draw the tracked trajectories
for objid, trajec in self.tracked_trajectories.items():
if len(trajec.positions) > 5:
draw_trajectory(self.imgOutput, trajec.positions, trajec.color, 2)
cv2.circle(self.imgOutput,(int(trajec.positions[-1][0]),int(trajec.positions[-1][1])),int(trajec.covariances[-1]),trajec.color,2)
cv2.imshow(self.window_name, self.imgOutput)
if not self.window_initiated: # for some reason this approach works in opencv 3 instead of previous approach
cv2.setMouseCallback(self.window_name, self.on_mouse_click)
self.window_initiated = True
ascii_key = cv2.waitKey(1)
if ascii_key != -1:
self.on_key_press(ascii_key)
def on_mouse_click(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONUP:
print 'clicked pixel: ', [x, y]
def on_key_press(self, ascii_key):
key = chr(ascii_key)
if key == 'a':
resp = self.add_image_to_background()
print 'added image to background'
def Main(self):
while (not rospy.is_shutdown()):
rospy.spin()
cv2.destroyAllWindows()
#####################################################################################################
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--nodenum", type="str", dest="nodenum", default='1',
help="node number, for example, if running multiple tracker instances on one computer")
(options, args) = parser.parse_args()
liveviewer = LiveViewer(options.nodenum)
liveviewer.Main()
| florisvb/multi_tracker | nodes/liveviewer.py | Python | mit | 10,903 | [
"Firefly"
] | bfcad77327dd06d4f7aa66de7d5c517ed5aab1d3cb79e1f64b548370b49eb9b6 |
#!/usr/bin/python2
import httplib2
import os
import sys
import json
from apiclient.discovery import build
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the Google Cloud Console at
# https://cloud.google.com/console.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = ".client_secrets.json"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the Cloud Console
https://cloud.google.com/console
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
# This OAuth 2.0 access scope allows for read-only access to the authenticated
# user's account, but not other types of account access.
YOUTUBE_READONLY_SCOPE = "https://www.googleapis.com/auth/youtube.readonly"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
def get_video(youtube, video_id):
# Call the API's videos.list method to retrieve the video resource.
videos_list_response = youtube.videos().list(
id=video_id, part='id,snippet,contentDetails, fileDetails, recordingDetails'
).execute()
# If the response does not contain an array of "items" then the video was
# not found.
if not videos_list_response["items"]:
print "Video '%s' was not found." % video_id
sys.exit(1)
return videos_list_response["items"][0]
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
message=MISSING_CLIENT_SECRETS_MESSAGE,
scope=YOUTUBE_READONLY_SCOPE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
flags = argparser.parse_args()
credentials = run_flow(flow, storage, flags)
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
# Retrieve the contentDetails part of the channel resource for the
# authenticated user's channel.
channels_response = youtube.channels().list(
mine=True,
part="contentDetails"
).execute()
vid={}
vid['number_videos']=0
vid['videos']=[]
for channel in channels_response["items"]:
# From the API response, extract the playlist ID that identifies the list
# of videos uploaded to the authenticated user's channel.
uploads_list_id = channel["contentDetails"]["relatedPlaylists"]["uploads"]
print "Videos in list %s" % uploads_list_id
# Retrieve the list of videos uploaded to the authenticated user's channel.
playlistitems_list_request = youtube.playlistItems().list(
playlistId=uploads_list_id,
part="snippet,contentDetails",
maxResults=50
)
while playlistitems_list_request:
playlistitems_list_response = playlistitems_list_request.execute()
# Print information about each video.
for playlist_item in playlistitems_list_response["items"]:
title = playlist_item["snippet"]["title"]
video_id = playlist_item["snippet"]["resourceId"]["videoId"]
details=get_video(youtube,video_id)
try:
loc=details['recordingDetails']['location']
except:
loc=None
print "%s (%s) - %s" % (title, video_id, loc)
if loc:
d={}
d['longitude']=loc['longitude']
d['latitude']=loc['latitude']
d['altitude']=loc['altitude']
d['title']=title
d['url']="http://www.youtube.com/embed/%s"%(video_id)
vid['number_videos']+=1
vid['videos'].append(d)
playlistitems_list_request = youtube.playlistItems().list_next(
playlistitems_list_request, playlistitems_list_response)
print
json.dump(vid,open('/home/esterhui/pythion.com/files/gps/data/video.json','w'))
| SimplifyData/pypu | scripts/build_json_from_youtube.py | Python | gpl-2.0 | 4,612 | [
"VisIt"
] | 87a9c6481f51f7bcc9fcd5b8bcf502aa86490e4198e36b5669912b327c03b7d6 |
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""ElmSubmit configuration parameters."""
__revision__ = "$Id$"
import pkg_resources
from invenio.config import CFG_SITE_ADMIN_EMAIL, \
CFG_SITE_URL, CFG_SITE_NAME
# elmsubmit configuration file:
CFG_ELMSUBMIT_FILES = {
'mailprefix': 'mail',
'test_case_1': pkg_resources.resource_filename('invenio.testsuite', 'data/elmsubmit_tests_1.mbox'),
'test_case_2': pkg_resources.resource_filename('invenio.testsuite', 'data/elmsubmit_tests_2.mbox'),
}
# Messages we need to send to the user, before we've identified the
# correct language to talk to them in (so we assume English!):
# pylint: disable=C0301
CFG_ELMSUBMIT_NOLANGMSGS = {'bad_email': 'Your email could not be parsed correctly to discover a submission. Please check your email client is functioning correctly.',
'bad_submission': 'The submission data that you have provided could not be parsed correctly. Please visit <%s> for a description of the correct format.' % CFG_SITE_URL,
'missing_type': 'The submission data that you have provided does not contain a TYPE field. This is mandatory for all submissions.',
'unsupported_type': 'The TYPE field of your submission does not contain a recognized value.',
'missing_fields_1': 'Your submission of type',
'missing_fields_2': 'does not contain all the required fields:',
'bad_field': 'This field does not validate correctly:',
'correct_format': 'It must be formatted as follows:',
'missing_attachment': 'We could not find the following file attached to your submission email:',
'temp_problem': 'There is a temporary problem with %s\'s email submission interface. Please retry your submission again shortly.' % CFG_SITE_NAME}
CFG_ELMSUBMIT_SERVERS = {'smtp': 'localhost'}
CFG_ELMSUBMIT_PEOPLE = {'admin': CFG_SITE_ADMIN_EMAIL}
# fields required in the submission mail
CFG_ELMSUBMIT_REQUIRED_FIELDS = ['title',
'author',
'date',
'files']
# defines the mapping of metadata fields to their marc codes
# mapping code as a list means the first element is mapped to the first element
# of the list, and the rest to the second
CFG_ELMSUBMIT_MARC_MAPPING = {'author': ['100__a', '700__a'],
'title': '245__a',
'subtitle': '245__b',
'photocaption': '246__b',
'subject': '65017a',
'secondary_subject': '65027a',
'email': '8560_f',
'files': ['FFT__a', 'FFT__a'],
'affiliation': ['100__u', '700__u'],
'language': '041__a',
'abstract': '520__a',
'keywords': '6531_a',
'OAIid': '909COo',
'PrimaryReportNumber': '037__a',
'AdditionalReportNumber': '088__a',
'series': ['490__a','490__v'],
'year': '260__a',
'note': '500__a',
#test tags used in test cases
'test1': '111__a',
'test2': '111__b',
'test3': '111__c',
'test4': '111__d',
'test5': '111__e'
}
# the list of the fields determines which subfields should be joined into a
# single datafield
CFG_ELMSUBMIT_MARC_FIELDS_JOINED = {'700__': [['a', 'u']],
'100__': [['a', 'u']],
#test tags
'111__': [['a','c'],['b','d']]
}
| MSusik/invenio | invenio/legacy/elmsubmit/config.py | Python | gpl-2.0 | 4,355 | [
"VisIt"
] | 43dce4850a02ec614084847d613195cda9c278a3e9898ad8d56a7c14ec79b8a5 |
'''
libChEBIpy (c) University of Manchester 2015
libChEBIpy is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=too-many-arguments
from ._base_object import BaseObject
class Name(BaseObject):
'''Class representing a ChEBI name.'''
def __init__(self, name, typ, source, adapted, language):
self.__name = name
self.__typ = typ
self.__source = source
self.__adapted = adapted
self.__language = language
def get_name(self):
'''Returns name'''
return self.__name
def get_type(self):
'''Returns type'''
return self.__typ
def get_adapted(self):
'''Returns adapted'''
return self.__adapted
def get_language(self):
'''Returns language'''
return self.__language
def get_source(self):
'''Returns source'''
return self.__source
| libChEBI/libChEBIpy | libchebipy/_name.py | Python | mit | 981 | [
"VisIt"
] | ffd446e1d0d19b16001380b2b5c5e04117fb662c0dcc6af0a3c835b1e380a5ea |
import pycparser.c_ast as c_ast
from pycparserext.ext_c_generator import GnuCGenerator
class CodeGenerator(GnuCGenerator):
def __init__(self, emit_line_numbers=True):
super(CodeGenerator, self).__init__()
self.cur_filename = None
self.cur_line_number = 0
self.emit_line_numbers = emit_line_numbers
def visit_FileAST(self, n):
s = ''
for ext in n.ext:
s += self.emit_line_number(ext)
if isinstance(ext, c_ast.FuncDef):
s += self.visit(ext)
elif isinstance(ext, c_ast.Pragma):
s += self.visit(ext) + '\n'
self.cur_line_number += 1
else:
s += self.visit(ext) + ';\n'
self.cur_line_number += 1
return s
def visit_FuncDef(self, n):
s = self.visit(n.decl)
self.indent_level = 0
s += '\n'
self.cur_line_number += 1
param_decls = list()
if n.param_decls:
for p in n.param_decls:
param_decls.append(self.visit(p))
self.cur_line_number += 1
s += ';\n'.join(param_decls) + ';\n'
s += self.visit(n.body)
s += '\n'
self.cur_line_number += 1
return s
def visit_Compound(self, n):
s = self._make_indent() + '{\n'
self.cur_line_number += 1
self.indent_level += 2
if n.block_items:
s += ''.join(self._generate_stmt(stmt) for stmt in n.block_items)
self.indent_level -= 2
s += self._make_indent() + '}\n'
self.cur_line_number += 1
return s
def visit_If(self, n):
s = 'if ('
if n.cond: s += self.visit(n.cond)
s += ')\n'
self.cur_line_number += 1
s += self._generate_stmt(n.iftrue, add_indent=True)
if n.iffalse:
s += self._make_indent() + 'else\n'
self.cur_line_number += 1
s += self._generate_stmt(n.iffalse, add_indent=True)
return s
def visit_For(self, n):
s = 'for ('
if n.init: s += self.visit(n.init)
s += ';'
if n.cond: s += ' ' + self.visit(n.cond)
s += ';'
if n.next: s += ' ' + self.visit(n.next)
s += ')\n'
self.cur_line_number += 1
s += self._generate_stmt(n.stmt, add_indent=True)
return s
def visit_While(self, n):
s = 'while ('
if n.cond: s += self.visit(n.cond)
s += ')\n'
self.cur_line_number += 1
s += self._generate_stmt(n.stmt, add_indent=True)
return s
def visit_DoWhile(self, n):
s = 'do\n'
self.cur_line_number += 1
s += self._generate_stmt(n.stmt, add_indent=True)
s += self._make_indent() + 'while ('
if n.cond: s += self.visit(n.cond)
s += ');'
return s
def visit_Switch(self, n):
s = 'switch (' + self.visit(n.cond) + ')\n'
self.cur_line_number += 1
s += self._generate_stmt(n.stmt, add_indent=True)
return s
def visit_Case(self, n):
s = 'case ' + self.visit(n.expr) + ':\n'
self.cur_line_number += 1
for stmt in n.stmts:
s += self._generate_stmt(stmt, add_indent=True)
return s
def visit_Default(self, n):
s = 'default:\n'
self.cur_line_number += 1
for stmt in n.stmts:
s += self._generate_stmt(stmt, add_indent=True)
return s
def visit_Label(self, n):
if n.stmt is None:
s = n.name + ': ;\n'
else:
s = n.name + ':\n' + self._generate_stmt(n.stmt)
self.cur_line_number += 1
return s
def _generate_struct_union(self, n, name):
s = name + ' ' + (n.name or '')
if n.decls:
s += '\n'
self.cur_line_number += 1
s += self._make_indent()
self.indent_level += 2
s += '{\n'
self.cur_line_number += 1
for decl in n.decls:
s += self._generate_stmt(decl)
self.indent_level -= 2
s += self._make_indent() + '}'
return s
def _generate_stmt(self, n, add_indent=False):
s = self.emit_line_number(n)
s += super(CodeGenerator, self)._generate_stmt(n, add_indent)
if (len(s) > 0) and (s[-1] == '\n'):
self.cur_line_number += 1
return s
def emit_line_number(self, n):
if not self.emit_line_numbers:
return ''
if n is None:
return ''
if (n.coord.line != self.cur_line_number) or (n.coord.file != self.cur_filename):
self.cur_filename = n.coord.file
self.cur_line_number = n.coord.line
return '#line %s "%s"\n' % (self.cur_line_number, self.cur_filename)
return ''
| KivApple/c_ext | c_ext/codegen.py | Python | mit | 4,860 | [
"VisIt"
] | 635a1b5929e9272fa9325511a37c7815f09900f90b0da45f6b7486457565008e |
"""
nflgame is an API to retrieve and read NFL Game Center JSON data.
It can work with real-time data, which can be used for fantasy football.
nflgame works by parsing the same JSON data that powers NFL.com's live
GameCenter. Therefore, nflgame can be used to report game statistics while
a game is being played.
The package comes pre-loaded with game data from every pre- and regular
season game from 2009 up until the present (I try to update it every week).
Therefore, querying such data does not actually ping NFL.com.
However, if you try to search for data in a game that is being currently
played, the JSON data will be downloaded from NFL.com at each request (so be
careful not to inspect for data too many times while a game is being played).
If you ask for data for a particular game that hasn't been cached to disk
but is no longer being played, it will be automatically cached to disk
so that no further downloads are required.
Here's a quick teaser to find the top 5 running backs by rushing yards in the
first week of the 2013 season:
#!python
import nflgame
games = nflgame.games(2013, week=1)
players = nflgame.combine_game_stats(games)
for p in players.rushing().sort('rushing_yds').limit(5):
msg = '%s %d carries for %d yards and %d TDs'
print msg % (p, p.rushing_att, p.rushing_yds, p.rushing_tds)
And the output is:
L.McCoy 31 carries for 184 yards and 1 TDs
T.Pryor 13 carries for 112 yards and 0 TDs
S.Vereen 14 carries for 101 yards and 0 TDs
A.Peterson 18 carries for 93 yards and 2 TDs
R.Bush 21 carries for 90 yards and 0 TDs
Or you could find the top 5 passing plays in the same time period:
#!python
import nflgame
games = nflgame.games(2013, week=1)
plays = nflgame.combine_plays(games)
for p in plays.sort('passing_yds').limit(5):
print p
And the output is:
(DEN, DEN 22, Q4, 3 and 8) (4:42) (Shotgun) P.Manning pass
short left to D.Thomas for 78 yards, TOUCHDOWN. Penalty on
BAL-E.Dumervil, Defensive Offside, declined.
(DET, DET 23, Q3, 3 and 7) (5:58) (Shotgun) M.Stafford pass short
middle to R.Bush for 77 yards, TOUCHDOWN.
(NYG, NYG 30, Q2, 1 and 10) (2:01) (No Huddle, Shotgun) E.Manning
pass deep left to V.Cruz for 70 yards, TOUCHDOWN. Pass complete on
a fly pattern.
(NO, NO 24, Q2, 2 and 6) (5:11) (Shotgun) D.Brees pass deep left to
K.Stills to ATL 9 for 67 yards (R.McClain; R.Alford). Pass 24, YAC
43
(NYG, NYG 20, Q1, 1 and 10) (13:04) E.Manning pass short middle
to H.Nicks pushed ob at DAL 23 for 57 yards (M.Claiborne). Pass
complete on a slant pattern.
If you aren't a programmer, then the
[tutorial for non programmers](http://goo.gl/y05fVj) is for you.
If you need help, please come visit us at IRC/FreeNode on channel `#nflgame`.
If you've never used IRC before, then you can
[use a web client](http://webchat.freenode.net/?channels=%23nflgame).
(Enter any nickname you like, make sure the channel is `#nflgame`, fill in
the captcha and hit connect.)
Failing IRC, the second fastest way to get help is to
[open a new issue on the
tracker](https://github.com/BurntSushi/nflgame/issues/new).
There are several active contributors to nflgame that watch the issue tracker.
We tend to respond fairly quickly!
"""
try:
from collections import OrderedDict
except:
from ordereddict import OrderedDict # from PyPI
import itertools
import sys
if sys.version_info[0] != 2:
print("nflgame requires Python 2.6+ and does not yet work with Python 3")
print("You are running Python version {}.{}".format(
sys.version_info.major, sys.version_info.minor))
sys.exit(1)
import nflgame.game
import nflgame.live
import nflgame.player
import nflgame.sched
import nflgame.seq
from nflgame.version import __version__
assert OrderedDict # Asserting the import for static analysis.
VERSION = __version__ # Deprecated. Backwards compatibility.
NoPlayers = nflgame.seq.GenPlayerStats(None)
"""
NoPlayers corresponds to the identity element of a Players sequences.
Namely, adding it to any other Players sequence has no effect.
"""
players = nflgame.player._create_players()
"""
A dict of all players and meta information about each player keyed
by GSIS ID. (The identifiers used by NFL.com GameCenter.)
"""
teams = [
['ARI', 'Arizona', 'Cardinals', 'Arizona Cardinals'],
['ATL', 'Atlanta', 'Falcons', 'Atlanta Falcons'],
['BAL', 'Baltimore', 'Ravens', 'Baltimore Ravens'],
['BUF', 'Buffalo', 'Bills', 'Buffalo Bills'],
['CAR', 'Carolina', 'Panthers', 'Carolina Panthers'],
['CHI', 'Chicago', 'Bears', 'Chicago Bears'],
['CIN', 'Cincinnati', 'Bengals', 'Cincinnati Bengals'],
['CLE', 'Cleveland', 'Browns', 'Cleveland Browns'],
['DAL', 'Dallas', 'Cowboys', 'Dallas Cowboys'],
['DEN', 'Denver', 'Broncos', 'Denver Broncos'],
['DET', 'Detroit', 'Lions', 'Detroit Lions'],
['GB', 'Green Bay', 'Packers', 'Green Bay Packers', 'G.B.', 'GNB'],
['HOU', 'Houston', 'Texans', 'Houston Texans'],
['IND', 'Indianapolis', 'Colts', 'Indianapolis Colts'],
['JAC', 'Jacksonville', 'Jaguars', 'Jacksonville Jaguars', 'JAX'],
['KC', 'Kansas City', 'Chiefs', 'Kansas City Chiefs', 'K.C.', 'KAN'],
['MIA', 'Miami', 'Dolphins', 'Miami Dolphins'],
['MIN', 'Minnesota', 'Vikings', 'Minnesota Vikings'],
['NE', 'New England', 'Patriots', 'New England Patriots', 'N.E.', 'NWE'],
['NO', 'New Orleans', 'Saints', 'New Orleans Saints', 'N.O.', 'NOR'],
['NYG', 'Giants', 'New York Giants', 'N.Y.G.'],
['NYJ', 'Jets', 'New York Jets', 'N.Y.J.'],
['OAK', 'Oakland', 'Raiders', 'Oakland Raiders'],
['PHI', 'Philadelphia', 'Eagles', 'Philadelphia Eagles'],
['PIT', 'Pittsburgh', 'Steelers', 'Pittsburgh Steelers'],
['SD', 'San Diego', 'Chargers', 'San Diego Chargers', 'S.D.', 'SDG'],
['SEA', 'Seattle', 'Seahawks', 'Seattle Seahawks'],
['SF', 'San Francisco', '49ers', 'San Francisco 49ers', 'S.F.', 'SFO'],
['STL', 'St. Louis', 'Rams', 'St. Louis Rams', 'S.T.L.'],
['TB', 'Tampa Bay', 'Buccaneers', 'Tampa Bay Buccaneers', 'T.B.', 'TAM'],
['TEN', 'Tennessee', 'Titans', 'Tennessee Titans'],
['WAS', 'Washington', 'Redskins', 'Washington Redskins', 'WSH'],
]
"""
A list of all teams. Each item is a list of different ways to
describe a team. (i.e., JAC, JAX, Jacksonville, Jaguars, etc.).
The first item in each list is always the standard NFL.com
team abbreviation (two or three letters).
"""
def find(name, team=None):
"""
Finds a player (or players) with a name matching (case insensitive)
name and returns them as a list.
If team is not None, it is used as an additional search constraint.
"""
hits = []
for player in players.itervalues():
if player.name.lower() == name.lower():
if team is None or team.lower() == player.team.lower():
hits.append(player)
return hits
def standard_team(team):
"""
Returns a standard abbreviation when team corresponds to a team in
nflgame.teams (case insensitive). All known variants of a team name are
searched. If no team is found, None is returned.
"""
team = team.lower()
for variants in teams:
for variant in variants:
if team == variant.lower():
return variants[0]
return None
def games(year, week=None, home=None, away=None, kind='REG', started=False):
"""
games returns a list of all games matching the given criteria. Each
game can then be queried for player statistics and information about
the game itself (score, winner, scoring plays, etc.).
As a special case, if the home and away teams are set to the same team,
then all games where that team played are returned.
The kind parameter specifies whether to fetch preseason, regular season
or postseason games. Valid values are PRE, REG and POST.
The week parameter is relative to the value of the kind parameter, and
may be set to a list of week numbers.
In the regular season, the week parameter corresponds to the normal
week numbers 1 through 17. Similarly in the preseason, valid week numbers
are 1 through 4. In the post season, the week number corresponds to the
numerical round of the playoffs. So the wild card round is week 1,
the divisional round is week 2, the conference round is week 3
and the Super Bowl is week 4.
The year parameter specifies the season, and not necessarily the actual
year that a game was played in. For example, a Super Bowl taking place
in the year 2011 actually belongs to the 2010 season. Also, the year
parameter may be set to a list of seasons just like the week parameter.
Note that if a game's JSON data is not cached to disk, it is retrieved
from the NFL web site. A game's JSON data is *only* cached to disk once
the game is over, so be careful with the number of times you call this
while a game is going on. (i.e., don't piss off NFL.com.)
If started is True, then only games that have already started (or are
about to start in less than 5 minutes) will be returned. Note that the
started parameter requires pytz to be installed. This is useful when
you only want to collect stats from games that have JSON data available
(as opposed to waiting for a 404 error from NFL.com).
"""
return list(games_gen(year, week, home, away, kind, started))
def games_gen(year, week=None, home=None, away=None,
kind='REG', started=False):
"""
games returns a generator of all games matching the given criteria. Each
game can then be queried for player statistics and information about
the game itself (score, winner, scoring plays, etc.).
As a special case, if the home and away teams are set to the same team,
then all games where that team played are returned.
The kind parameter specifies whether to fetch preseason, regular season
or postseason games. Valid values are PRE, REG and POST.
The week parameter is relative to the value of the kind parameter, and
may be set to a list of week numbers.
In the regular season, the week parameter corresponds to the normal
week numbers 1 through 17. Similarly in the preseason, valid week numbers
are 1 through 4. In the post season, the week number corresponds to the
numerical round of the playoffs. So the wild card round is week 1,
the divisional round is week 2, the conference round is week 3
and the Super Bowl is week 4.
The year parameter specifies the season, and not necessarily the actual
year that a game was played in. For example, a Super Bowl taking place
in the year 2011 actually belongs to the 2010 season. Also, the year
parameter may be set to a list of seasons just like the week parameter.
Note that if a game's JSON data is not cached to disk, it is retrieved
from the NFL web site. A game's JSON data is *only* cached to disk once
the game is over, so be careful with the number of times you call this
while a game is going on. (i.e., don't piss off NFL.com.)
If started is True, then only games that have already started (or are
about to start in less than 5 minutes) will be returned. Note that the
started parameter requires pytz to be installed. This is useful when
you only want to collect stats from games that have JSON data available
(as opposed to waiting for a 404 error from NFL.com).
"""
infos = _search_schedule(year, week, home, away, kind, started)
if not infos:
return None
def gen():
for info in infos:
g = nflgame.game.Game(info['eid'])
if g is None:
continue
yield g
return gen()
def one(year, week, home, away, kind='REG', started=False):
"""
one returns a single game matching the given criteria. The
game can then be queried for player statistics and information about
the game itself (score, winner, scoring plays, etc.).
one returns either a single game or no games. If there are multiple games
matching the given criteria, an assertion is raised.
The kind parameter specifies whether to fetch preseason, regular season
or postseason games. Valid values are PRE, REG and POST.
The week parameter is relative to the value of the kind parameter, and
may be set to a list of week numbers.
In the regular season, the week parameter corresponds to the normal
week numbers 1 through 17. Similarly in the preseason, valid week numbers
are 1 through 4. In the post season, the week number corresponds to the
numerical round of the playoffs. So the wild card round is week 1,
the divisional round is week 2, the conference round is week 3
and the Super Bowl is week 4.
The year parameter specifies the season, and not necessarily the actual
year that a game was played in. For example, a Super Bowl taking place
in the year 2011 actually belongs to the 2010 season. Also, the year
parameter may be set to a list of seasons just like the week parameter.
Note that if a game's JSON data is not cached to disk, it is retrieved
from the NFL web site. A game's JSON data is *only* cached to disk once
the game is over, so be careful with the number of times you call this
while a game is going on. (i.e., don't piss off NFL.com.)
If started is True, then only games that have already started (or are
about to start in less than 5 minutes) will be returned. Note that the
started parameter requires pytz to be installed. This is useful when
you only want to collect stats from games that have JSON data available
(as opposed to waiting for a 404 error from NFL.com).
"""
infos = _search_schedule(year, week, home, away, kind, started)
if not infos:
return None
assert len(infos) == 1, 'More than one game matches the given criteria.'
return nflgame.game.Game(infos[0]['eid'])
def combine(games, plays=False):
"""
DEPRECATED. Please use one of nflgame.combine_{game,play,max}_stats
instead.
Combines a list of games into one big player sequence containing game
level statistics.
This can be used, for example, to get PlayerStat objects corresponding to
statistics across an entire week, some number of weeks or an entire season.
If the plays parameter is True, then statistics will be dervied from
play by play data. This mechanism is slower but will contain more detailed
statistics like receiver targets, yards after the catch, punt and field
goal blocks, etc.
"""
if plays:
return combine_play_stats(games)
else:
return combine_game_stats(games)
def combine_game_stats(games):
"""
Combines a list of games into one big player sequence containing game
level statistics.
This can be used, for example, to get GamePlayerStats objects corresponding
to statistics across an entire week, some number of weeks or an entire
season.
"""
return reduce(lambda ps1, ps2: ps1 + ps2,
[g.players for g in games if g is not None])
def combine_play_stats(games):
"""
Combines a list of games into one big player sequence containing play
level statistics.
This can be used, for example, to get PlayPlayerStats objects corresponding
to statistics across an entire week, some number of weeks or an entire
season.
This function should be used in lieu of combine_game_stats when more
detailed statistics such as receiver targets, yards after the catch and
punt/FG blocks are needed.
N.B. Since this combines *all* play data, this function may take a while
to complete depending on the number of games passed in.
"""
return reduce(lambda p1, p2: p1 + p2,
[g.drives.players() for g in games if g is not None])
def combine_max_stats(games):
"""
Combines a list of games into one big player sequence containing maximum
statistics based on game and play level statistics.
This can be used, for example, to get GamePlayerStats objects corresponding
to statistics across an entire week, some number of weeks or an entire
season.
This function should be used in lieu of combine_game_stats or
combine_play_stats when the best possible accuracy is desired.
"""
return reduce(lambda a, b: a + b,
[g.max_player_stats() for g in games if g is not None])
def combine_plays(games):
"""
Combines a list of games into one big play generator that can be searched
as if it were a single game.
"""
chain = itertools.chain(*[g.drives.plays() for g in games])
return nflgame.seq.GenPlays(chain)
def _search_schedule(year, week=None, home=None, away=None, kind='REG',
started=False):
"""
Searches the schedule to find the game identifiers matching the criteria
given.
The kind parameter specifies whether to fetch preseason, regular season
or postseason games. Valid values are PRE, REG and POST.
The week parameter is relative to the value of the kind parameter, and
may be set to a list of week numbers.
In the regular season, the week parameter corresponds to the normal
week numbers 1 through 17. Similarly in the preseason, valid week numbers
are 1 through 4. In the post season, the week number corresponds to the
numerical round of the playoffs. So the wild card round is week 1,
the divisional round is week 2, the conference round is week 3
and the Super Bowl is week 4.
The year parameter specifies the season, and not necessarily the actual
year that a game was played in. For example, a Super Bowl taking place
in the year 2011 actually belongs to the 2010 season. Also, the year
parameter may be set to a list of seasons just like the week parameter.
If started is True, then only games that have already started (or are
about to start in less than 5 minutes) will be returned. Note that the
started parameter requires pytz to be installed. This is useful when
you only want to collect stats from games that have JSON data available
(as opposed to waiting for a 404 error from NFL.com).
"""
infos = []
for info in nflgame.sched.games.itervalues():
y, t, w = info['year'], info['season_type'], info['week']
h, a = info['home'], info['away']
if year is not None:
if isinstance(year, list) and y not in year:
continue
if not isinstance(year, list) and y != year:
continue
if week is not None:
if isinstance(week, list) and w not in week:
continue
if not isinstance(week, list) and w != week:
continue
if home is not None and away is not None and home == away:
if h != home and a != home:
continue
else:
if home is not None and h != home:
continue
if away is not None and a != away:
continue
if t != kind:
continue
if started:
gametime = nflgame.live._game_datetime(info)
now = nflgame.live._now()
if gametime > now and (gametime - now).total_seconds() > 300:
continue
infos.append(info)
return infos
| icebluesun/nflgame | nflgame/__init__.py | Python | unlicense | 19,483 | [
"VisIt"
] | c09f626c711c6a6039a96968c9f50469c812a35e274422d35eff869182ac79f5 |
from __future__ import absolute_import, division, print_function
from jaspyx.context.block import BlockContext
from jaspyx.visitor import BaseVisitor
class While(BaseVisitor):
def visit_While(self, node):
self.indent()
self.output('while(')
self.visit(node.test)
self.output(') ')
self.block(node.body, context=BlockContext(self.stack[-1]))
self.output('\n')
| ztane/jaspyx | jaspyx/visitor/while_.py | Python | mit | 413 | [
"VisIt"
] | 6a859037328dd5be92b0b5edee0ef0ed3394611da0f255643cacf8f9177c1f40 |
import ast
import optparse
import sys
import typing
from core.cfg import *
from core.expressions import *
from core.statements import *
from visualization.graph_renderer import CfgRenderer
def main(args):
optparser = optparse.OptionParser(usage="python3.6 -m frontend.cfg_generator [options] [string]")
optparser.add_option("-f", "--file",
help="Read a code snippet from the specified file")
optparser.add_option("-l", "--label",
help="The label for the visualization")
options, args = optparser.parse_args(args)
if options.file:
with open(options.file) as instream:
code = instream.read()
label = options.file
elif len(args) == 2:
code = args[1] + "\n"
label = "<code read from command line parameter>"
else:
print("Expecting Python code on stdin...")
code = sys.stdin.read()
label = "<code read from stdin>"
if options.label:
label = options.label
cfg = source_to_cfg(code)
CfgRenderer().render(cfg, label=label)
class LooseControlFlowGraph:
class SpecialEdgeType(Enum):
BREAK = 1
CONTINUE = 2
def __init__(self, nodes: Set[Node] = None, in_node: Node = None, out_node: Node = None, edges: Set[Edge] = None,
loose_in_edges=None,
loose_out_edges=None, both_loose_edges=None):
"""Loose control flow graph representation.
This representation uses a complete (non-loose) control flow graph via aggregation and adds loose edges and
some transformations methods to combine, prepend and append loose control flow graphs. This class
intentionally does not provide access to the linked CFG. The completed CFG can be retrieved finally with
`eject()`.
:param nodes: optional set of nodes of the control flow graph
:param in_node: optional entry node of the control flow graph
:param out_node: optional exit node of the control flow graph
:param edges: optional set of edges of the control flow graph
:param loose_in_edges: optional set of loose edges that have no start yet and end inside this CFG
:param loose_out_edges: optional set of loose edges that start inside this CFG and have no end yet
:param both_loose_edges: optional set of loose edges, loose on both ends
"""
assert not in_node or not (loose_in_edges or both_loose_edges)
assert not out_node or not (loose_out_edges or both_loose_edges)
assert all([e.source is None for e in loose_in_edges or []])
assert all([e.target is None for e in loose_out_edges or []])
assert all([e.source is None and e.target is None for e in both_loose_edges or []])
self._cfg = ControlFlowGraph(nodes or set(), in_node, out_node, edges or set())
self._loose_in_edges = loose_in_edges or set()
self._loose_out_edges = loose_out_edges or set()
self._both_loose_edges = both_loose_edges or set()
self._special_edges = []
@property
def nodes(self) -> Dict[int, Node]:
return self._cfg.nodes
@property
def in_node(self) -> Node:
return self._cfg.in_node
@in_node.setter
def in_node(self, node):
self._cfg._in_node = node
@property
def out_node(self) -> Node:
return self._cfg.out_node
@out_node.setter
def out_node(self, node):
self._cfg._out_node = node
@property
def edges(self) -> Dict[Tuple[Node, Node], Edge]:
return self._cfg.edges
@property
def loose_in_edges(self) -> Set[Edge]:
return self._loose_in_edges
@property
def loose_out_edges(self) -> Set[Edge]:
return self._loose_out_edges
@property
def both_loose_edges(self) -> Set[Edge]:
return self._both_loose_edges
@property
def special_edges(self) -> List[Tuple[Edge, SpecialEdgeType]]:
return self._special_edges
def loose(self):
return len(self.loose_in_edges) or len(self.loose_out_edges) or len(self.both_loose_edges) or len(
self.special_edges)
def add_node(self, node):
self.nodes[node.identifier] = node
def add_edge(self, edge):
"""Add a (loose/normal) edge to this loose CFG.
"""
if not edge.source and not edge.target:
self.both_loose_edges.add(edge)
self._cfg._in_node = None
self._cfg._out_node = None
elif not edge.source:
self.loose_in_edges.add(edge)
self._cfg._in_node = None
elif not edge.target:
self.loose_out_edges.add(edge)
self._cfg._out_node = None
else:
self.edges[edge.source, edge.target] = edge
def combine(self, other):
assert not (self.in_node and other.in_node)
assert not (self.out_node and other.out_node)
self.nodes.update(other.nodes)
self.edges.update(other.edges)
self.loose_in_edges.update(other.loose_in_edges)
self.loose_out_edges.update(other.loose_out_edges)
self.both_loose_edges.update(other.both_loose_edges)
self.special_edges.extend(other.special_edges)
self._cfg._in_node = other.in_node or self.in_node # agree on in_node
self._cfg._out_node = other.out_node or self.out_node # agree on out_node
return self
def prepend(self, other):
other.append(self)
self.replace(other)
def append(self, other):
assert not (self.loose_out_edges and other.loose_in_edges)
assert not self.both_loose_edges or (not other.loose_in_edges and not other.both_loose_edges)
self.nodes.update(other.nodes)
self.edges.update(other.edges)
edge_added = False
if self.loose_out_edges:
edge_added = True
for e in self.loose_out_edges:
e._target = other.in_node
self.edges[(e.source, e.target)] = e # updated/created edge is not yet in edge dict -> add
# clear loose edge sets
self._loose_out_edges = set()
elif other.loose_in_edges:
edge_added = True
for e in other.loose_in_edges:
e._source = self.out_node
self.edges[(e.source, e.target)] = e # updated/created edge is not yet in edge dict -> add
# clear loose edge set
other._loose_in_edges = set()
if self.both_loose_edges:
edge_added = True
for e in self.both_loose_edges:
e._target = other.in_node
self.add_edge(e) # updated/created edge is not yet in edge dict -> add
# clear loose edge set
self._both_loose_edges = set()
elif other.both_loose_edges:
edge_added = True
for e in other.both_loose_edges:
e._source = self.out_node
self.add_edge(e) # updated/created edge is not yet in edge dict -> add
# clear loose edge set
other._both_loose_edges = set()
if not edge_added:
# neither of the CFGs has loose ends -> add unconditional edge
e = Unconditional(self.out_node, other.in_node)
self.edges[(e.source, e.target)] = e # updated/created edge is not yet in edge dict -> add
# in any case, transfer loose_out_edges of other to self
self.loose_out_edges.update(other.loose_out_edges)
self.special_edges.extend(other.special_edges)
self._cfg._out_node = other.out_node
return self
def eject(self) -> ControlFlowGraph:
if self.loose():
raise TypeError('This control flow graph is still loose and can not eject a complete control flow graph!')
return self._cfg
def replace(self, other):
self.__dict__.update(other.__dict__)
def _dummy(id_gen):
return Basic(id_gen.next)
def _dummy_cfg(id_gen):
dummy = _dummy(id_gen)
return LooseControlFlowGraph({dummy}, dummy, dummy, set())
class NodeIdentifierGenerator:
"""
A helper class to generate a increasing sequence of node identifiers.
"""
def __init__(self):
"""
Creates a sequencer which will return 1 as the first id.
"""
self._next = 0
@property
def next(self):
self._next += 1
return self._next
class CfgFactory:
"""
A helper class that encapsulates a partial CFG and possibly some statements not yet attached to CFG.
Whenever the
method `complete_basic_block()` is called, it is ensured that all unattached statements are properly attached to
partial CFG. The partial CFG can be retrieved at any time by property `cfg`.
"""
def __init__(self, id_gen):
self._stmts = []
self._cfg = None
self._id_gen = id_gen
@property
def cfg(self):
return self._cfg
def prepend_cfg(self, other):
if self._cfg is not None:
self._cfg.prepend(other)
else:
self._cfg = other
return self._cfg
def append_cfg(self, other):
if self._cfg is not None:
if self._cfg.loose_out_edges and other.loose_in_edges:
self._cfg.append(_dummy_cfg(self._id_gen))
self._cfg.append(other)
else:
self._cfg = other
return self._cfg
def add_stmts(self, stmts):
"""
Adds statements to the currently open block.
:param stmts: a single statement or an iterable of statements
:return:
"""
if isinstance(stmts, (List, Tuple)):
self._stmts.extend(list(stmts))
else:
self._stmts.append(stmts)
def complete_basic_block(self):
if self._stmts:
block = Basic(self._id_gen.next, self._stmts)
self.append_cfg(LooseControlFlowGraph({block}, block, block, set()))
self._stmts = []
def incomplete_block(self):
return len(self._stmts) > 0
# noinspection PyPep8Naming
class CfgVisitor(ast.NodeVisitor):
"""
This AST visitor generates a CFG recursively.
Overwritten methods return either a partial CFG or a statement/expression, depending on the type of node.
"""
def __init__(self):
super().__init__()
self._id_gen = NodeIdentifierGenerator()
def visit_Num(self, node):
pp = ProgramPoint(node.lineno, node.col_offset)
l = self._ensure_stmt(pp, Literal(int, node.n))
return l
def visit_Str(self, node):
pp = ProgramPoint(node.lineno, node.col_offset)
l = self._ensure_stmt(pp, Literal(str, node.s))
return l
def visit_Name(self, node):
pp = ProgramPoint(node.lineno, node.col_offset)
# TODO remove this name hack when type inferences work
typ = list if node.id.startswith("list") else int
v = self._ensure_stmt(pp, VariableIdentifier(typ, node.id))
return v
def visit_Assign(self, node):
pp = ProgramPoint(node.lineno, node.col_offset)
value = self._ensure_stmt_visit(node.value)
stmts = [Assignment(pp, self._ensure_stmt_visit(target), value) for
target in node.targets]
return stmts
def visit_AugAssign(self, node):
pp = ProgramPoint(node.lineno, node.col_offset)
value = self._ensure_stmt_visit(node.value)
target = self._ensure_stmt_visit(node.target)
operand_call = Call(pp, type(node.op).__name__.lower(), [target, value], int)
stmt = Assignment(pp, target, operand_call)
return [stmt]
def visit_Module(self, node):
start_cfg = _dummy_cfg(self._id_gen)
body_cfg = self._translate_body(node.body, allow_loose_in_edges=True, allow_loose_out_edges=True)
end_cfg = _dummy_cfg(self._id_gen)
return start_cfg.append(body_cfg).append(end_cfg)
def visit_If(self, node):
def extend_special_edges(cfg):
"""extend special edges with IF_OUT edges and additional necessary dummy nodes"""
for special_edge, edge_type in cfg.special_edges:
dummy = _dummy(self._id_gen)
cfg.add_node(dummy)
# add a new IF_OUT edge where the special edge is at the moment, ending in new dummy node
cfg.add_edge(Unconditional(special_edge.source, dummy, Edge.Kind.IF_OUT))
# change position of special edge to be AFTER the new dummy
special_edge._source = dummy
body_cfg = self._translate_body(node.body)
pp_test = ProgramPoint(node.test.lineno, node.test.col_offset)
test = self.visit(node.test)
neg_test = Call(pp_test, "not", [test], bool)
body_cfg.add_edge(Conditional(None, test, body_cfg.in_node, Edge.Kind.IF_IN))
if body_cfg.out_node: # if control flow can exit the body at all, add an unconditional IF_OUT edge
body_cfg.add_edge(Unconditional(body_cfg.out_node, None, Edge.Kind.IF_OUT))
if node.orelse: # if there is else branch
orelse_cfg = self._translate_body(node.orelse)
orelse_cfg.add_edge(Conditional(None, neg_test, orelse_cfg.in_node, Edge.Kind.IF_IN))
if orelse_cfg.out_node: # if control flow can exit the else at all, add an unconditional IF_OUT edge
orelse_cfg.add_edge(Unconditional(orelse_cfg.out_node, None, Edge.Kind.IF_OUT))
extend_special_edges(orelse_cfg)
else:
orelse_cfg = LooseControlFlowGraph()
orelse_cfg.add_edge(Conditional(None, neg_test, None, Edge.Kind.DEFAULT))
extend_special_edges(body_cfg)
cfg = body_cfg.combine(orelse_cfg)
return cfg
def visit_While(self, node):
header_node = Loop(self._id_gen.next)
cfg = self._translate_body(node.body)
body_in_node = cfg.in_node
body_out_node = cfg.out_node
pp_test = ProgramPoint(node.test.lineno, node.test.col_offset)
test = self.visit(node.test)
neg_test = Call(pp_test, "not", [test], bool)
cfg.add_node(header_node)
cfg.in_node = header_node
cfg.add_edge(Conditional(header_node, test, body_in_node, Edge.Kind.LOOP_IN))
cfg.add_edge(Conditional(header_node, neg_test, None))
if body_out_node: # if control flow can exit the body at all, add an unconditional LOOP_OUT edge
cfg.add_edge(Unconditional(body_out_node, header_node, Edge.Kind.LOOP_OUT))
if node.orelse: # if there is else branch
orelse_cfg = self._translate_body(node.orelse)
if orelse_cfg.out_node: # if control flow can exit the else at all, add an unconditional DEFAULT edge
orelse_cfg.add_edge(Unconditional(orelse_cfg.out_node, None, Edge.Kind.DEFAULT))
cfg.append(orelse_cfg)
for special_edge, edge_type in cfg.special_edges:
if edge_type == LooseControlFlowGraph.SpecialEdgeType.CONTINUE:
cfg.add_edge(Unconditional(special_edge.source, header_node, Edge.Kind.LOOP_OUT))
elif edge_type == LooseControlFlowGraph.SpecialEdgeType.BREAK:
cfg.add_edge(Unconditional(special_edge.source, None, Edge.Kind.LOOP_OUT))
cfg.special_edges.clear()
return cfg
def visit_Break(self, _):
dummy = _dummy(self._id_gen)
cfg = LooseControlFlowGraph({dummy}, dummy, None)
# the type of the special edge is not yet known, may be also an IF_OUT first, before LOOP_OUT
# so set type to DEFAULT for now but remember the special type of this edge separately
cfg.special_edges.append(
(Unconditional(dummy, None, Edge.Kind.DEFAULT), LooseControlFlowGraph.SpecialEdgeType.BREAK)
)
return cfg
def visit_Continue(self, _):
dummy = _dummy(self._id_gen)
cfg = LooseControlFlowGraph({dummy}, dummy, None)
# the type of the special edge is not yet known, may be also an IF_OUT first, before LOOP_OUT
# so set type to DEFAULT for now but remember the special type of this edge separately
cfg.special_edges.append(
(Unconditional(dummy, None, Edge.Kind.DEFAULT), LooseControlFlowGraph.SpecialEdgeType.CONTINUE)
)
return cfg
def visit_UnaryOp(self, node):
pp = ProgramPoint(node.lineno, node.col_offset)
operand = self.visit(node.operand)
return Call(pp, type(node.op).__name__.lower(), [operand], int)
def visit_BinOp(self, node):
pp = ProgramPoint(node.lineno, node.col_offset)
return Call(pp, type(node.op).__name__.lower(),
[self._ensure_stmt_visit(node.left), self._ensure_stmt_visit(node.right)], int)
def visit_BoolOp(self, node):
pp = ProgramPoint(node.lineno, node.col_offset)
return Call(pp, type(node.op).__name__.lower(),
[self._ensure_stmt_visit(val) for val in node.values], bool)
def visit_Compare(self, node):
pp = ProgramPoint(node.lineno, node.col_offset)
last_comp = self._ensure_stmt_visit(node.comparators[0])
result = Call(pp, type(node.ops[0]).__name__.lower(),
[self._ensure_stmt_visit(node.left),
last_comp],
bool)
for op, comp in list(zip(node.ops, node.comparators))[1:]:
cur_call = Call(pp, type(op).__name__.lower(),
[last_comp,
self._ensure_stmt_visit(comp)],
bool)
result = Call(pp, 'and',
[result,
cur_call],
bool)
return result
# noinspection PyMethodMayBeStatic
def visit_NameConstant(self, node):
return Literal(bool, str(node.value))
def visit_Expr(self, node):
return self.visit(node.value)
def visit_Call(self, node):
pp = ProgramPoint(node.lineno, node.col_offset)
if isinstance(node.func, ast.Name):
if node.func.id == 'int':
typ = int
elif node.func.id == 'bool':
typ = bool
else:
typ = typing.Any
return Call(pp, node.func.id, [self.visit(arg) for arg in node.args], typ)
elif isinstance(node.func, ast.Attribute):
# visit the attribute access
attribute_access = self.visit(node.func)
# pass the receiver as the first argument
# NOTE: since this is a call, we do not use the attribute_access
# object since a method call in Python comes as an attribute access in the AST
return Call(pp, attribute_access.name, [attribute_access.receiver] + [self.visit(arg) for arg in node.args],
typing.Any)
else:
raise NotImplementedError(
f"The call statement receiver of type {str(type(node.func))} is not yet translatable to CFG!")
def visit_Attribute(self, node):
pp = ProgramPoint(node.lineno, node.col_offset)
receiver = self.visit(node.value)
return AttributeAccess(pp, receiver, node.attr, typing.Any)
def visit_List(self, node):
pp = ProgramPoint(node.lineno, node.col_offset)
return ListDisplayStmt(pp, [self.visit(e) for e in node.elts])
def visit_Subscript(self, node):
pp = ProgramPoint(node.lineno, node.col_offset)
if isinstance(node.slice, ast.Index):
return IndexStmt(pp, self._ensure_stmt_visit(node.value, pp), self._ensure_stmt_visit(node.slice.value, pp))
elif isinstance(node.slice, ast.Slice):
return SliceStmt(pp, self._ensure_stmt_visit(node.value, pp),
self._ensure_stmt_visit(node.slice.lower, pp),
self._ensure_stmt_visit(node.slice.step, pp) if node.slice.step else None,
self._ensure_stmt_visit(node.slice.upper, pp))
else:
raise NotImplementedError(f"The statement {str(type(node.slice))} is not yet translatable to CFG!")
def generic_visit(self, node):
print(type(node).__name__)
super().generic_visit(node)
def _translate_body(self, body, allow_loose_in_edges=False, allow_loose_out_edges=False):
cfg_factory = CfgFactory(self._id_gen)
for child in body:
if isinstance(child, (ast.Assign, ast.AugAssign, ast.Expr)):
cfg_factory.add_stmts(self.visit(child))
elif isinstance(child, ast.If):
cfg_factory.complete_basic_block()
if_cfg = self.visit(child)
cfg_factory.append_cfg(if_cfg)
elif isinstance(child, ast.While):
cfg_factory.complete_basic_block()
while_cfg = self.visit(child)
cfg_factory.append_cfg(while_cfg)
elif isinstance(child, ast.Break):
cfg_factory.complete_basic_block()
break_cfg = self.visit(child)
cfg_factory.append_cfg(break_cfg)
elif isinstance(child, ast.Continue):
cfg_factory.complete_basic_block()
cont_cfg = self.visit(child)
cfg_factory.append_cfg(cont_cfg)
elif isinstance(child, ast.Pass):
if cfg_factory.incomplete_block():
pass
else:
cfg_factory.append_cfg(_dummy_cfg(self._id_gen))
else:
raise NotImplementedError(f"The statement {str(type(child))} is not yet translatable to CFG!")
cfg_factory.complete_basic_block()
if not allow_loose_in_edges and cfg_factory.cfg and cfg_factory.cfg.loose_in_edges:
cfg_factory.prepend_cfg(_dummy_cfg(self._id_gen))
if not allow_loose_out_edges and cfg_factory.cfg and cfg_factory.cfg.loose_out_edges:
cfg_factory.append_cfg(_dummy_cfg(self._id_gen))
return cfg_factory.cfg
@staticmethod
def _ensure_stmt(pp, expr):
if isinstance(expr, Statement):
return expr
elif isinstance(expr, Literal):
return LiteralEvaluation(pp, expr)
elif isinstance(expr, VariableIdentifier):
return VariableAccess(pp, expr)
else:
raise NotImplementedError(f"The expression {str(type(expr))} is not yet translatable to CFG!")
def _ensure_stmt_visit(self, node, pp=None):
result = self.visit(node)
pp = pp if pp else ProgramPoint(node.lineno, node.col_offset)
return CfgVisitor._ensure_stmt(pp, result)
def ast_to_cfg(root_node):
"""
Create the control flow graph from a ast node.
:param root_node: the root node of the AST to be translated to CFG
:return: the CFG of the passed AST.
"""
loose_cfg = CfgVisitor().visit(root_node)
return loose_cfg.eject()
def source_to_cfg(code):
"""
Parses the given code and creates its control flow graph.
:param code: the code as a string
:return: the CFG of code
"""
root_node = ast.parse(code)
return ast_to_cfg(root_node)
if __name__ == '__main__':
main(sys.argv)
| gitsimon/spadup-lyra | frontend/cfg_generator.py | Python | mpl-2.0 | 23,329 | [
"VisIt"
] | 7de862e1fd265a7f3b83d8f5aafb923a438083bee4cc7802673740c5896e6486 |
from enum import Enum
import arrow
class Region(Enum):
brazil = "BR"
europe_north_east = "EUNE"
europe_west = "EUW"
japan = "JP"
korea = "KR"
latin_america_north = "LAN"
latin_america_south = "LAS"
north_america = "NA"
oceania = "OCE"
turkey = "TR"
russia = "RU"
@property
def platform(self) -> "Platform":
return getattr(Platform, self.name)
@property
def default_locale(self) -> str:
return DEFAULT_LOCALE[self]
@staticmethod
def from_platform(platform):
try:
return platform.region
except AttributeError:
return Platform(platform).region
@property
def timezone(self) -> str:
tzs = {
"NA": "GMT-8",
"LAN": "GMT-7",
"LAS": "GMT-5",
"BR": "GMT-4",
"EUW": "GMT-2",
"TR": "GMT-0",
"EUNE": "GMT+1",
"RU": "GMT+3",
"KR": "GMT+6",
"JP": "GMT+7",
"OCE": "GMT+8",
}
return tzs[self.value]
@property
def continent(self) -> "Continent":
if self is Region.brazil:
return Continent.americas
if self is Region.europe_north_east:
return Continent.europe
if self is Region.europe_west:
return Continent.europe
if self is Region.japan:
return Continent.asia
if self is Region.korea:
return Continent.asia
if self is Region.latin_america_north:
return Continent.americas
if self is Region.latin_america_south:
return Continent.americas
if self is Region.north_america:
return Continent.americas
if self is Region.oceania:
return (
Continent.americas
) # OCE content is managed by Americas server (as per https://i.imgur.com/FUyf5kv.png), this breaks OCE queries if set to Asia
if self is Region.turkey:
return Continent.europe
if self is Region.russia:
return Continent.europe
class Platform(Enum):
brazil = "BR1"
europe_north_east = "EUN1"
europe_west = "EUW1"
japan = "JP1"
korea = "KR"
latin_america_north = "LA1"
latin_america_south = "LA2"
north_america = "NA1"
oceania = "OC1"
turkey = "TR1"
russia = "RU"
@property
def region(self) -> "Region":
return getattr(Region, self.name)
@property
def default_locale(self) -> str:
return DEFAULT_LOCALE[self]
@staticmethod
def from_region(region):
try:
return region.platform
except AttributeError:
return Region(region).platform
@property
def continent(self):
return self.region.continent
DEFAULT_LOCALE = {
Region.brazil: "pt_BR",
Platform.brazil: "pt_BR",
Region.europe_north_east: "en_GB",
Platform.europe_north_east: "en_GB",
Region.europe_west: "en_GB",
Platform.europe_west: "en_GB",
Region.japan: "ja_JP",
Platform.japan: "ja_JP",
Region.korea: "ko_KR",
Platform.korea: "ko_KR",
Region.latin_america_north: "es_MX",
Platform.latin_america_north: "es_MX",
Region.latin_america_south: "es_AR",
Platform.latin_america_south: "es_AR",
Region.north_america: "en_US",
Platform.north_america: "en_US",
Region.oceania: "en_AU",
Platform.oceania: "en_AU",
Region.turkey: "tr_TR",
Platform.turkey: "tr_TR",
Region.russia: "ru_RU",
Platform.russia: "ru_RU",
}
class Continent(Enum):
americas = "AMERICAS"
asia = "ASIA"
europe = "EUROPE"
class Key(Enum):
Q = "Q"
W = "W"
E = "E"
R = "R"
class Resource(Enum):
mana = "Mana"
courage = "Courage"
energy = "Energy"
fury = "Fury"
rage = "Rage"
flow = "Flow"
ferocity = "Ferocity"
heat = "Heat"
shield = "Shield"
blood_well = "Blood Well"
crimson_rush = "Crimson Rush"
none = "None"
no_cost = "No Cost"
class Side(Enum):
blue = 100
red = 200
class MatchType(Enum): # TODO: Can we combine with GameType somehow?
ranked = "ranked"
normal = "normal"
tourney = "tourney"
tutorial = "tutorial"
class GameMode(Enum):
aram = "ARAM"
ascension = "ASCENSION"
classic = "CLASSIC"
showdown = "FIRSTBLOOD"
poro_king = "KINGPORO"
dominion = "ODIN"
one_for_all = "ONEFORALL"
tutorial = "TUTORIAL"
tutorial_1 = "TUTORIAL_MODULE_1"
tutorial_2 = "TUTORIAL_MODULE_2"
tutorial_3 = "TUTORIAL_MODULE_3"
nexus_siege = "SIEGE"
assassinate = "ASSASSINATE"
dark_star = "DARKSTAR"
all_random_summoners_rift = "ARSR"
urf = "URF"
doom_bots = "DOOMBOTSTEEMO"
star_guardian = "STARGUARDIAN"
project = "PROJECT"
overcharge = "OVERCHARGE"
all_random_urf_snow = "SNOWURF"
practice_tool = "PRACTICETOOL"
nexus_blitz = "NEXUSBLITZ"
odyssey = "ODYSSEY"
utlbook = "ULTBOOK"
class MasteryTree(Enum):
cunning = "Cunning"
ferocity = "Ferocity"
resolve = "Resolve"
class Tier(Enum):
challenger = "CHALLENGER"
grandmaster = "GRANDMASTER"
master = "MASTER"
diamond = "DIAMOND"
platinum = "PLATINUM"
gold = "GOLD"
silver = "SILVER"
bronze = "BRONZE"
iron = "IRON"
unranked = "UNRANKED"
def __str__(self):
return self.name.title()
@staticmethod
def _order():
return {
Tier.challenger: 9,
Tier.grandmaster: 8,
Tier.master: 7,
Tier.diamond: 6,
Tier.platinum: 5,
Tier.gold: 4,
Tier.silver: 3,
Tier.bronze: 2,
Tier.iron: 1,
}
def __lt__(self, other):
return self._order()[self] < other._order()[other]
def __gt__(self, other):
return self._order()[self] > other._order()[other]
def __le__(self, other):
return self._order()[self] <= other._order()[other]
def __ge__(self, other):
return self._order()[self] >= other._order()[other]
class Division(Enum):
one = "I"
two = "II"
three = "III"
four = "IV"
def __str__(self):
return self.value
@staticmethod
def _order():
return {Division.one: 4, Division.two: 3, Division.three: 2, Division.four: 1}
def __lt__(self, other):
return self._order()[self] < other._order()[other]
def __gt__(self, other):
return self._order()[self] > other._order()[other]
def __le__(self, other):
return self._order()[self] <= other._order()[other]
def __ge__(self, other):
return self._order()[self] >= other._order()[other]
class Rank:
def __init__(self, tier: Tier, division: Division):
self.tuple = (tier, division)
self.tier = tier
self.division = division
def __str__(self):
return "<{} {}>".format(self.tuple[0], self.tuple[1])
def __eq__(self, other):
return self.tuple == other.tuple
def __ne__(self, other):
return self.tuple != other.tuple
def __lt__(self, other):
return self.tuple < other.tuple
def __gt__(self, other):
return self.tuple > other.tuple
def __le__(self, other):
return self.tuple <= other.tuple
def __ge__(self, other):
return self.tuple >= other.tuple
class Season(Enum):
preseason_3 = "PRESEASON3"
season_3 = "SEASON3"
preseason_4 = "PRESEASON2014"
season_4 = "SEASON2014"
preseason_5 = "PRESEASON2015"
season_5 = "SEASON2015"
preseason_6 = "PRESEASON2016"
season_6 = "SEASON2016"
preseason_7 = "PRESEASON2017"
season_7 = "SEASON2017"
preseason_8 = "PRESEASON2018"
season_8 = "SEASON2018"
preseason_9 = "PRESEASON2019"
season_9 = "SEASON2019"
@property
def id(self):
return SEASON_IDS[self]
def from_id(id: int):
return {i: season for season, i in SEASON_IDS.items()}[id]
def start(self, region: Region) -> arrow.Arrow:
from .core import Patch
if Patch._Patch__patches is None:
Patch.__load__()
for patch in Patch._Patch__patches[region]:
if patch.season == self:
return patch.start
def end(self, region: Region) -> arrow.Arrow:
from .core import Patch
for patch in reversed(Patch._Patch__patches[region]):
if patch.season == self:
return patch.end
SEASON_IDS = {
Season.preseason_3: 0,
Season.season_3: 1,
Season.preseason_4: 2,
Season.season_4: 3,
Season.preseason_5: 4,
Season.season_5: 5,
Season.preseason_6: 6,
Season.season_6: 7,
Season.preseason_7: 8,
Season.season_7: 9,
Season.preseason_8: 10,
Season.season_8: 11,
Season.preseason_9: 12,
Season.season_9: 13,
}
class GameType(Enum):
custom = "CUSTOM_GAME"
tutorial = "TUTORIAL_GAME"
matched = "MATCHED_GAME"
class Lane(Enum):
top_lane = "TOP_LANE"
mid_lane = "MID_LANE"
bot_lane = "BOT_LANE"
jungle = "JUNGLE"
utility = "UTILITY"
def from_match_naming_scheme(string: str):
return {
"BOTTOM": Lane.bot_lane,
"MIDDLE": Lane.mid_lane,
"MID": Lane.mid_lane,
"TOP": Lane.top_lane,
"JUNGLE": Lane.jungle,
"UTILITY": Lane.utility,
"NONE": None,
}[string]
class Role(Enum):
duo = "DUO"
duo_carry = "DUO_CARRY"
duo_support = "DUO_SUPPORT"
none = "NONE"
solo = "SOLO"
def from_match_naming_scheme(string: str):
return {
"DUO": Role.duo,
"DUO_CARRY": Role.duo_carry,
"DUO_SUPPORT": Role.duo_support,
"NONE": Role.none,
"SOLO": Role.solo,
}[string]
class Position(Enum):
top = "TOP"
middle = "MIDDLE"
jungle = "JUNGLE"
bottom = "BOTTOM"
utility = "UTILITY"
apex = "APEX"
none = "NONE"
def from_league_naming_scheme(string: str):
return {
"TOP": Position.top,
"MIDDLE": Position.middle,
"JUNGLE": Position.jungle,
"BOTTOM": Position.bottom,
"UTILITY": Position.support,
"NONE": Position.none,
}
class SummonersRiftArea(Enum):
none = "NONE"
nexus_blue = "NEXUS_BLUE"
nexus_red = "NEXUS_RED"
top_lane_blue = "TOP_LANE_BLUE"
top_lane_purple = "TOP_LANE_PURPLE"
top_lane_red = "TOP_LANE_RED"
mid_lane_blue = "MID_LANE_BLUE"
mid_lane_purple = "MID_LANE_PURPLE"
mid_lane_red = "MID_LANE_RED"
bot_lane_blue = "BOT_LANE_BLUE"
bot_lane_purple = "BOT_LANE_PURPLE"
bot_lane_red = "BOT_LANE_RED"
jungle_top_blue = "JUNGLE_TOP_BLUE"
jungle_top_red = "JUNGLE_TOP_RED"
jungle_bot_blue = "JUNGLE_BOT_BLUE"
jungle_bot_red = "JUNGLE_BOT_RED"
river_top = "RIVER_TOP"
river_bot = "RIVER_BOT"
def get_side(self) -> Side:
if "BLUE" in self.value:
return Side.blue
elif "RED" in self.value:
return Side.red
else:
return None
def get_lane(self) -> Lane:
if "TOP" in self.value:
return Lane.top_lane
elif "MID" in self.value:
return Lane.mid_lane
elif "BOT" in self.value:
return Lane.bot_lane
elif "JUNGLE" in self.value:
return Lane.jungle
else:
return None
@staticmethod
def from_position(position: "Position") -> "SummonersRiftArea":
from .core.match import Position
x, y = position.x, position.y
# Load the map if it isn't already loaded
try:
map = SummonersRiftArea.__map
except AttributeError:
import os
from PIL import Image
script_dir = os.path.dirname(__file__)
rel_path = "./resources/summonersRiftAreas.png"
map = Image.open(os.path.join(script_dir, rel_path))
SummonersRiftArea.__map_size = map.size
map = map.load()
SummonersRiftArea.__map = map
image_width, image_height = SummonersRiftArea.__map_size
min_x = -120
min_y = -120
max_x = 14870
max_y = 14980
width = max_x - min_x
height = max_y - min_y
x = round((x - min_x) / width * (image_width - 1))
y = round(abs(y - min_y - height) / height * (image_height - 1))
rgb = map[x, y][0]
color_mapping = {
0: SummonersRiftArea.none,
10: SummonersRiftArea.nexus_blue,
20: SummonersRiftArea.nexus_red,
30: SummonersRiftArea.top_lane_blue,
40: SummonersRiftArea.top_lane_purple,
50: SummonersRiftArea.top_lane_red,
60: SummonersRiftArea.mid_lane_blue,
70: SummonersRiftArea.mid_lane_purple,
80: SummonersRiftArea.mid_lane_red,
90: SummonersRiftArea.bot_lane_blue,
100: SummonersRiftArea.bot_lane_purple,
110: SummonersRiftArea.bot_lane_red,
120: SummonersRiftArea.jungle_top_blue,
130: SummonersRiftArea.jungle_top_red,
140: SummonersRiftArea.jungle_bot_blue,
150: SummonersRiftArea.jungle_bot_red,
160: SummonersRiftArea.river_top,
170: SummonersRiftArea.river_bot,
}
return color_mapping.get(rgb, SummonersRiftArea.none)
class Tower(Enum):
OUTER = "OUTER_TURRET"
INNER = "INNER_TURRET"
BASE = "BASE_TURRET"
NEXUS = "NEXUS_TURRET"
UNDEFINED = "UNDEFINED_TURRET"
# References for Queues:
# https://developer.riotgames.com/game-constants.html
# https://discussion.developer.riotgames.com/articles/3482/multiple-queueids-are-being-updated-with-patch-719.html
# https://github.com/stelar7/L4J8/blob/master/src/main/java/no/stelar7/api/l4j8/basic/constants/types/GameQueueType.java
class Queue(Enum):
custom = "CUSTOM" # 0
deprecated_blind_fives = "NORMAL_5x5_BLIND" # 2
deprecated_ranked_solo_fives = "CLASSIC" # 4
deprecated_ranked_premade_fives = "RANKED_PREMADE_5x5" # 6
deprecated_coop_ai_fives = "BOT_5x5" # 7
deprecated_blind_threes = "NORMAL_3x3" # 8
deprecated_ranked_premade_threes = "RANKED_PREMADE_3x3" # 9
deprecated_ranked_flex_threes = "RANKED_FLEX_TT_DEPRECATED" # 9 # There are two different queue names with ID 9... This one was replaced with queue 470. There is therefore no corresponding queue with ID 9 for this Queue, and instead the Queue with ID 470 will be used when this name is requested, even for very old games. In addition, there are two queues with the name "RANKED_FLEX_TT"; in order to avoid a name conflict, we renamed this one.
deprecated_draft_fives = "NORMAL_5x5_DRAFT" # 14
deprecated_blind_dominion = "ODIN_5x5_BLIND" # 16
deprecated_draft_dominion = "ODIN_5x5_DRAFT" # 17
deprecated_coop_ai_dominion = "BOT_ODIN_5x5" # 25
deprecated_coop_ai_intro_fives = "BOT_5x5_INTRO_DEPRECATED" # 31 # There are two queues with the name "BOT_5x5_INTRO" so this one has been renamed in order to avoid a conflict.
deprecated_coop_ai_beginner_fives = "BOT_5x5_BEGINNER_DEPRECATED" # 32 # There are two queues with the name "BOT_5x5_BEGINNER" so this one has been renamed in order to avoid a conflict.
deprecated_coop_ai_intermediate_fives = "BOT_5x5_INTERMEDIATE_DEPRECATED" # 33 # There are two queues with the name "BOT_5x5_INTERMEDIATE" so this one has been renamed in order to avoid a conflict.
deprecated_ranked_team_threes = "RANKED_TEAM_3x3" # 41
deprecated_ranked_team_fives = "RANKED_TEAM_5x5" # 42
deprecated_coop_ai_threes = "BOT_TT_3x3" # 52
deprecated_team_builder_fives = "GROUP_FINDER_5x5" # 61
deprecated_aram = "ARAM_5x5" # 65
one_for_all = "ONEFORALL_5x5" # 70
showdown_1v1 = "FIRSTBLOOD_1x1" # 72
showdown_2v2 = "FIRSTBLOOD_2x2" # 73
hexakill_summoners_rift = "SR_6x6" # 75
urf = "URF_5x5" # 76
mirror_mode_fives = "ONEFORALL_MIRRORMODE_5x5" # 78
urf_coop_ai = "BOT_URF_5x5" # 83
deprecated_doom_bots_rank_1 = "NIGHTMARE_BOT_5x5_RANK1" # 91
deprecated_doom_bots_rank_2 = "NIGHTMARE_BOT_5x5_RANK2" # 92
deprecated_doom_bots_rank_5 = "NIGHTMARE_BOT_5x5_RANK5" # 93
ascension = "ASCENSION_5x5" # 96
hexakill_twisted_treeline = "HEXAKILL" # 98
aram_butchers_bridge = "BILGEWATER_ARAM_5x5" # 100
deprecated_poro_king = "KING_PORO_5x5" # 300
nemesis_draft = "COUNTER_PICK" # 310
black_market_brawlers = "BILGEWATER_5x5" # 313
deprecated_nexus_siege = "SIEGE" # 315
definitely_not_dominion = "DEFINITELY_NOT_DOMINION_5x5" # 317
deprecated_all_random_urf = "ARURF_5X5" # 318
all_random_summoners_rift = "ARSR_5x5" # 325
normal_draft_fives = "TEAM_BUILDER_DRAFT_UNRANKED_5x5" # 400
deprecated_ranked_fives = "TEAM_BUILDER_DRAFT_RANKED_5x5" # 410
# TODO Evidently we originally had 420 as the commented out queue name below, but it may have changed?
# TODO But the queue name sent to the Leagues endpoint needs to be RANKED_SOLO_5x5 for ranked solo games.
ranked_solo_fives = "RANKED_SOLO_5x5" # 420
blind_fives = "NORMAL_5V5_BLIND_PICK" # 430
ranked_flex_fives = "RANKED_FLEX_SR" # 440
aram = "ARAM" # 450
blind_threes = "NORMAL_3X3_BLIND_PICK" # 460
blood_hunt_assassin = "ASSASSINATE_5x5" # 600
dark_star = "DARKSTAR_3x3" # 610
ranked_flex_threes = "RANKED_FLEX_TT" # 470
clash = "CLASH" # 700
coop_ai_intermediate_threes = "BOT_3X3_INTERMEDIATE" # 800
coop_ai_intro_threes = "BOT_3X3_INTRO" # 810
coop_ai_beginner_threes = "BOT_3X3_BEGINNER" # 820
coop_ai_intro_fives = "BOT_5X5_INTRO" # 830
coop_ai_beginner_fives = "BOT_5X5_BEGINNER" # 840
coop_ai_intermediate_fives = "BOT_5X5_INTERMEDIATE" # 850
all_random_urf = "ARURF_5X5" # 900
project = "PROJECT" # 910
poro_king = "KINGPORO" # 920
nexus_siege = "NEXUS_SIEGE" # 940
doom_bots_difficult = "NIGHTMARE_BOT_5X5_VOTE" # 950
doom_bots = "NIGHTMARE_BOT_5X5" # 960
guardian_invasion_normal = "INVASION_NORMAL" # 980
guardian_invasion_onslaught = "INVASION_ONSLAUGHT" # 990
overcharge = "OVERCHARGE" # 1000
all_random_urf_snow = "SNOWURF" # 1010
one_for_all_rapid = "ONEFORALL_RAPID_5x5" # 1020
odyssey_intro = "ODYSSEY_INTRO" # 1030
odyssey_cadet = "ODYSSEY_CADET" # 1040
odyssey_crewmember = "ODYSSEY_CREWMEMBER" # 1050
odyssey_captain = "ODYSSEY_CAPTAIN" # 1060
odyssey_onslaught = "ODYSSEY_ONSLAUGHT" # 1070
ranked_tft = "RANKED_TFT" # 1100
normal_tft = "NORMAL_TFT" # 1090
deprecated_nexus_blitz = "NEXUS_BLITZ" # 1200
nexus_blitz = "NEXUS_BLITZ" # 1300
ultimate_spellbook = "ULTIMATE_SPELLBOOK" # 1400
tutorial1 = "TUTORIAL_1" # Summoner's Rift Tutorial 1
tutorial2 = "TUTORIAL_2" # Summoner's Rift Tutorial 2
tutorial3 = "TUTORIAL_3" # Summoner's Rift Tutorial 3
def from_id(id: int):
return {i: season for season, i in QUEUE_IDS.items()}[id]
@property
def id(self):
return QUEUE_IDS[self]
QUEUE_IDS = {
Queue.custom: 0, # Custom games
Queue.deprecated_blind_fives: 2, # Summoner's Rift 5v5 Blind Pick games Deprecated in patch 7.19 in favor of queueId 430
Queue.deprecated_ranked_solo_fives: 4, # Summoner's Rift 5v5 Ranked Solo games Deprecated in favor of queueId 420
Queue.deprecated_ranked_premade_fives: 6, # Summoner's Rift 5v5 Ranked Premade games Game mode deprecated
Queue.deprecated_coop_ai_fives: 7, # Summoner's Rift Co-op vs AI games Deprecated in favor of queueId 32 and 33
Queue.deprecated_blind_threes: 8, # Twisted Treeline 3v3 Normal games Deprecated in patch 7.19 in favor of queueId 460
Queue.deprecated_ranked_premade_threes: 9, # Twisted Treeline 3v3 Ranked Flex games Deprecated in patch 7.19 in favor of queueId 470
Queue.deprecated_draft_fives: 14, # Summoner's Rift 5v5 Draft Pick games Deprecated in favor of queueId 400
Queue.deprecated_blind_dominion: 16, # Crystal Scar 5v5 Dominion Blind Pick games Game mode deprecated
Queue.deprecated_draft_dominion: 17, # Crystal Scar 5v5 Dominion Draft Pick games Game mode deprecated
Queue.deprecated_coop_ai_dominion: 25, # Crystal Scar Dominion Co-op vs AI games Game mode deprecated
Queue.deprecated_coop_ai_intro_fives: 31, # Summoner's Rift Co-op vs AI Intro Bot games Deprecated in patch 7.19 in favor of queueId 830
Queue.deprecated_coop_ai_beginner_fives: 32, # Summoner's Rift Co-op vs AI Beginner Bot games Deprecated in patch 7.19 in favor of queueId 840
Queue.deprecated_coop_ai_intermediate_fives: 33, # Summoner's Rift Co-op vs AI Intermediate Bot games Deprecated in patch 7.19 in favor of queueId 850
Queue.deprecated_ranked_team_threes: 41, # Twisted Treeline 3v3 Ranked Team games Game mode deprecated
Queue.deprecated_ranked_team_fives: 42, # Summoner's Rift 5v5 Ranked Team games Game mode deprecated
Queue.deprecated_coop_ai_threes: 52, # Twisted Treeline Co-op vs AI games Deprecated in patch 7.19 in favor of queueId 800
Queue.deprecated_team_builder_fives: 61, # Summoner's Rift 5v5 Team Builder games Game mode deprecated
Queue.deprecated_aram: 65, # Howling Abyss 5v5 ARAM games Deprecated in patch 7.19 in favor of queueId 450
Queue.one_for_all: 70, # Summoner's Rift One for All games
Queue.showdown_1v1: 72, # Howling Abyss 1v1 Snowdown Showdown games
Queue.showdown_2v2: 73, # Howling Abyss 2v2 Snowdown Showdown games
Queue.hexakill_summoners_rift: 75, # Summoner's Rift 6v6 Hexakill games
Queue.urf: 76, # Summoner's Rift Ultra Rapid Fire games
Queue.mirror_mode_fives: 78, # Summoner's Rift Mirrored One for All
Queue.urf_coop_ai: 83, # Summoner's Rift Co-op vs AI Ultra Rapid Fire games
Queue.deprecated_doom_bots_rank_1: 91, # Summoner's Rift Doom Bots Rank 1 games Deprecated in patch 7.21 in favor of queueId 950
Queue.deprecated_doom_bots_rank_2: 92, # Summoner's Rift Doom Bots Rank 2 games Deprecated in patch 7.21 in favor of queueId 950
Queue.deprecated_doom_bots_rank_5: 93, # Summoner's Rift Doom Bots Rank 5 games Deprecated in patch 7.21 in favor of queueId 950
Queue.ascension: 96, # Crystal Scar Ascension games
Queue.hexakill_twisted_treeline: 98, # Twisted Treeline 6v6 Hexakill games
Queue.aram_butchers_bridge: 100, # Butcher's Bridge 5v5 ARAM games
Queue.deprecated_poro_king: 300, # Howling Abyss King Poro games Deprecated in patch 7.19 in favor of queueId 920
Queue.nemesis_draft: 310, # Summoner's Rift Nemesis games
Queue.black_market_brawlers: 313, # Summoner's Rift Black Market Brawlers games
Queue.deprecated_nexus_siege: 315, # Summoner's Rift Nexus Siege games Deprecated in patch 7.19 in favor of queueId 940
Queue.definitely_not_dominion: 317, # Crystal Scar Definitely Not Dominion games
Queue.deprecated_all_random_urf: 318, # Summoner's Rift All Random URF games Game mode deprecated in patch 8.10 in favor is queueId 900
Queue.all_random_summoners_rift: 325, # Summoner's Rift All Random games
Queue.normal_draft_fives: 400, # Summoner's Rift 5v5 Draft Pick games
Queue.deprecated_ranked_fives: 410, # Summoner's Rift 5v5 Ranked Dynamic games Game mode deprecated in patch 6.22
Queue.ranked_solo_fives: 420, # Summoner's Rift 5v5 Ranked Solo games
Queue.blind_fives: 430, # Summoner's Rift 5v5 Blind Pick games
Queue.ranked_flex_fives: 440, # Summoner's Rift 5v5 Ranked Flex games
Queue.aram: 450, # Howling Abyss 5v5 ARAM games
Queue.blind_threes: 460, # Twisted Treeline 3v3 Blind Pick games
Queue.ranked_flex_threes: 470, # Twisted Treeline 3v3 Ranked Flex games
Queue.blood_hunt_assassin: 600, # Summoner's Rift Blood Hunt Assassin games
Queue.dark_star: 610, # Cosmic Ruins Dark Star games
Queue.clash: 700, # Summoner's Rift Clash games
Queue.coop_ai_intermediate_threes: 800, # Twisted Treeline Co-op vs. AI Intermediate Bot games
Queue.coop_ai_intro_threes: 810, # Twisted Treeline Co-op vs. AI Intro Bot games
Queue.coop_ai_beginner_threes: 820, # Twisted Treeline Co-op vs. AI Beginner Bot games
Queue.coop_ai_intro_fives: 830, # Summoner's Rift Co-op vs. AI Intro Bot games
Queue.coop_ai_beginner_fives: 840, # Summoner's Rift Co-op vs. AI Beginner Bot games
Queue.coop_ai_intermediate_fives: 850, # Summoner's Rift Co-op vs. AI Intermediate Bot games
Queue.all_random_urf: 900, # Summoner's Rift All Random URF games
Queue.project: 910,
Queue.poro_king: 920, # Howling Abyss Legend of the Poro King
Queue.nexus_siege: 940, # Summoner's Rift Nexus Siege games
Queue.doom_bots_difficult: 950, # Summoner's Rift Doom Bots games /w difficulty voting
Queue.doom_bots: 960, # Summoner's Rift Doom Bots games
Queue.guardian_invasion_normal: 980, # Valoran City Park Star Guardian Invasion: Normal games
Queue.guardian_invasion_onslaught: 990, # Valoran City Park Star Guardian Invasion: Onslaught games
Queue.overcharge: 1000, # Overcharge, PROJECT: Hunters games
Queue.all_random_urf_snow: 1010, # Summoner's Rift, Snow ARURF games
Queue.one_for_all_rapid: 1020, # Summoner's Rift One for All games (increased gold and exp gain)
Queue.odyssey_intro: 1030, # Odyssey: Extraction
Queue.odyssey_cadet: 1040, # Odyssey: Extraction
Queue.odyssey_crewmember: 1050, # Odyssey: Extraction
Queue.odyssey_captain: 1060, # Odyssey: Extraction
Queue.odyssey_onslaught: 1070, # Odyssey: Extraction
Queue.ranked_tft: 1100, # Convergence, Ranked Teamfight Tactics games
Queue.normal_tft: 1090, # Convergence, Normal Teamfight Tactics games
Queue.deprecated_nexus_blitz: 1200, # Nexus Blitz map Nexus Blitz Deprecated in patch 9.2 in favor of queueId 1300
Queue.nexus_blitz: 1300, # Nexus Blitz map Nexus Blitz
Queue.ultimate_spellbook: 1400, # Summoner's Rift Ultimate Spellbook
Queue.tutorial1: 2000, # Summoner's Rift Tutorial 1
Queue.tutorial2: 2010, # Summoner's Rift Tutorial 2
Queue.tutorial3: 2020, # Summoner's Rift Tutorial 3
}
RANKED_QUEUES = {
Queue.deprecated_ranked_solo_fives, # Summoner's Rift 5v5 Ranked Solo games Deprecated in favor of queueId 420
Queue.deprecated_ranked_premade_fives, # Summoner's Rift 5v5 Ranked Premade games Game mode deprecated
Queue.deprecated_ranked_premade_threes, # Twisted Treeline 3v3 Ranked Flex games Deprecated in patch 7.19 in favor of queueId 470
Queue.deprecated_ranked_team_threes, # Twisted Treeline 3v3 Ranked Team games Game mode deprecated
Queue.deprecated_ranked_team_fives, # Summoner's Rift 5v5 Ranked Team games Game mode deprecated
Queue.deprecated_ranked_fives, # Summoner's Rift 5v5 Ranked Dynamic games Game mode deprecated in patch 6.22
Queue.ranked_solo_fives, # Summoner's Rift 5v5 Ranked Solo games
Queue.ranked_flex_fives, # Summoner's Rift 5v5 Ranked Flex games
Queue.ranked_flex_threes, # Twisted Treeline 3v3 Ranked Flex games
Queue.ranked_tft, # Convergence Ranked Teamfight Tactics games
}
| meraki-analytics/cassiopeia | cassiopeia/data.py | Python | mit | 27,636 | [
"CRYSTAL"
] | 0f79cb9190cebe06ed7c1f660181dc1fe4ee753dba35211dc9a0c4eef23903dc |
# Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from mox3 import mox
import netaddr
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import importutils
from oslo_utils import netutils
import six
import testtools
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import ipv6
from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
from nova import objects
from nova.objects import network as network_obj
from nova.objects import virtual_interface as vif_obj
from nova import quota
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_ldap
from nova.tests.unit import fake_network
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_fixed_ip
from nova.tests.unit.objects import test_floating_ip
from nova.tests.unit.objects import test_network
from nova.tests.unit.objects import test_service
from nova.tests.unit import utils as test_utils
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
HOST = "testhost"
FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
fake_inst = fake_instance.fake_db_instance
networks = [{'id': 0,
'uuid': FAKEUUID,
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'dhcp_server': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'},
{'id': 1,
'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'dhcp_server': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '192.168.1.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '2001:db9:0:1::10',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': 0},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': 0},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_uuid': 0}]
class FlatNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = network_manager.FlatManager(host=HOST)
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_get_instance_nw_info_fake(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
for i, vif in enumerate(nw_info):
nid = i + 1
check = {'bridge': 'fake_br%d' % nid,
'cidr': '192.168.%s.0/24' % nid,
'cidr_v6': '2001:db8:0:%x::/64' % nid,
'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
'multi_host': False,
'injected': False,
'bridge_interface': None,
'vlan': None,
'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
'gateway': '192.168.%d.1' % nid,
'gateway_v6': '2001:db8:0:1::1',
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
'vif_type': net_model.VIF_TYPE_BRIDGE,
'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'ovs_interfaceid': None,
'qbh_params': None,
'qbg_params': None,
'should_create_vlan': False,
'should_create_bridge': False,
'ip': '192.168.%d.%03d' % (nid, nid + 99),
'ip_v6': '2001:db8:0:1:dcad:beff:feef:%x' % nid,
'netmask': '255.255.255.0',
'netmask_v6': 64,
'physical_network': None,
}
network = vif['network']
net_v4 = vif['network']['subnets'][0]
net_v6 = vif['network']['subnets'][1]
vif_dict = dict(bridge=network['bridge'],
cidr=net_v4['cidr'],
cidr_v6=net_v6['cidr'],
id=vif['id'],
multi_host=network.get_meta('multi_host', False),
injected=network.get_meta('injected', False),
bridge_interface=
network.get_meta('bridge_interface'),
vlan=network.get_meta('vlan'),
broadcast=str(net_v4.as_netaddr().broadcast),
dhcp_server=network.get_meta('dhcp_server',
net_v4['gateway']['address']),
dns=[ip['address'] for ip in net_v4['dns']],
gateway=net_v4['gateway']['address'],
gateway_v6=net_v6['gateway']['address'],
label=network['label'],
mac=vif['address'],
rxtx_cap=vif.get_meta('rxtx_cap'),
vif_type=vif['type'],
vif_devname=vif.get('devname'),
vif_uuid=vif['id'],
ovs_interfaceid=vif.get('ovs_interfaceid'),
qbh_params=vif.get('qbh_params'),
qbg_params=vif.get('qbg_params'),
should_create_vlan=
network.get_meta('should_create_vlan', False),
should_create_bridge=
network.get_meta('should_create_bridge',
False),
ip=net_v4['ips'][i]['address'],
ip_v6=net_v6['ips'][i]['address'],
netmask=str(net_v4.as_netaddr().netmask),
netmask_v6=net_v6.as_netaddr()._prefixlen,
physical_network=
network.get_meta('physical_network', None))
self.assertThat(vif_dict, matchers.DictMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0])
ip['network'] = dict(test_network.fake_network,
**networks[0])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_valid_fixed_ipv6(self):
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'2001:db9:0:1::10')]
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[2])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(4, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_reserved_start_end(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, dhcp_server='192.168.0.11',
allowed_start='192.168.0.10',
allowed_end='192.168.0.245')
self.assertEqual(1, len(nets))
network = nets[0]
# gateway defaults to beginning of allowed_start
self.assertEqual('192.168.0.10', network['gateway'])
# vpn_server doesn't conflict with dhcp_start
self.assertEqual('192.168.0.12', network['vpn_private_address'])
# dhcp_start doesn't conflict with dhcp_server
self.assertEqual('192.168.0.13', network['dhcp_start'])
# NOTE(vish): 10 from the beginning, 10 from the end, and
# 1 for the gateway, 1 for the dhcp server,
# 1 for the vpn server
self.assertEqual(23, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_reserved_start_out_of_range(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AddressOutOfRange,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 256, allowed_start='192.168.1.10')
def test_validate_reserved_end_invalid(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidAddress,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 256, allowed_end='invalid')
def test_validate_cidr_invalid(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidCidr,
self.network.create_networks,
context_admin, 'fake', 'invalid', False,
1, 256)
def test_validate_non_int_size(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidIntValue,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 'invalid')
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'')]
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
None)]
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
def test_get_instance_nw_info(self, get):
def make_ip(index):
vif = objects.VirtualInterface(uuid=index, address=index)
network = objects.Network(uuid=index,
bridge=index,
label=index,
project_id=index,
injected=False,
netmask='255.255.255.0',
dns1=None,
dns2=None,
cidr_v6=None,
gateway_v6=None,
broadcast_v6=None,
netmask_v6=None,
rxtx_base=None,
gateway='192.168.%s.1' % index,
dhcp_server='192.168.%s.1' % index,
broadcast='192.168.%s.255' % index,
cidr='192.168.%s.0/24' % index)
return objects.FixedIP(virtual_interface=vif,
network=network,
floating_ips=objects.FloatingIPList(),
address='192.168.%s.2' % index)
objs = [make_ip(index) for index in ('3', '1', '2')]
get.return_value = objects.FixedIPList(objects=objs)
nw_info = self.network.get_instance_nw_info(self.context, None,
None, None)
for i, vif in enumerate(nw_info):
self.assertEqual(objs[i].network.bridge, vif['network']['bridge'])
@mock.patch.object(objects.Network, 'get_by_id')
def test_add_fixed_ip_instance_using_id_without_vpn(self, get_by_id):
# Allocate a fixed ip from a network and assign it to an instance.
# Network is given by network id.
network_id = networks[0]['id']
with mock.patch.object(self.network,
'allocate_fixed_ip') as allocate_fixed_ip:
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
network_id)
# Assert that we fetched the network by id, not uuid
get_by_id.assert_called_once_with(self.context,
network_id, project_only='allow_none')
# Assert that we called allocate_fixed_ip for the given network and
# instance. We should not have requested a specific address from the
# network.
allocate_fixed_ip.assert_called_once_with(self.context, FAKEUUID,
get_by_id.return_value,
address=None)
@mock.patch.object(objects.Network, 'get_by_uuid')
def test_add_fixed_ip_instance_using_uuid_without_vpn(self, get_by_uuid):
# Allocate a fixed ip from a network and assign it to an instance.
# Network is given by network uuid.
network_uuid = networks[0]['uuid']
with mock.patch.object(self.network,
'allocate_fixed_ip') as allocate_fixed_ip,\
mock.patch.object(self.context, 'elevated',
return_value=mock.sentinel.elevated):
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
network_uuid)
# Assert that we fetched the network by uuid, not id, and with elevated
# context
get_by_uuid.assert_called_once_with(mock.sentinel.elevated,
network_uuid)
# Assert that we called allocate_fixed_ip for the given network and
# instance. We should not have requested a specific address from the
# network.
allocate_fixed_ip.assert_called_once_with(self.context,
FAKEUUID,
get_by_uuid.return_value,
address=None)
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(2, len(names))
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(1, len(names))
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(1, len(addresses))
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_mini_dns_driver_with_mixed_case(self):
zone1 = "example.org"
driver = self.network.instance_dns_manager
driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(1, len(addresses))
for n in addresses:
driver.delete_entry(n, zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(0, len(addresses))
def test_allocate_fixed_ip_instance_dns(self):
# Test DNS entries are created when allocating a fixed IP.
# Allocate a fixed IP to an instance. Ensure that dns entries have been
# created for the instance's name and uuid.
network = network_obj.Network._from_db_object(
self.context, network_obj.Network(), test_network.fake_network)
network.save = mock.MagicMock()
# Create a minimal instance object
instance_params = {
'display_name': HOST,
'security_groups': []
}
instance = fake_instance.fake_instance_obj(
context.RequestContext('ignore', 'ignore'),
expected_attrs=instance_params.keys(), **instance_params)
instance.save = mock.MagicMock()
# We don't specify a specific address, so we should get a FixedIP
# automatically allocated from the pool. Fix its value here.
fip = objects.FixedIP(address='192.168.0.101')
fip.save = mock.MagicMock()
with mock.patch.object(objects.Instance, 'get_by_uuid',
return_value=instance),\
mock.patch.object(objects.FixedIP, 'associate_pool',
return_value=fip):
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
instance_manager = self.network.instance_dns_manager
expected_addresses = ['192.168.0.101']
# Assert that we have a correct entry by instance display name
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(expected_addresses, addresses)
# Assert that we have a correct entry by instance uuid
addresses = instance_manager.get_entries_by_name(FAKEUUID,
self.network.instance_dns_domain)
self.assertEqual(expected_addresses, addresses)
def test_allocate_floating_ip(self):
self.assertIsNone(self.network.allocate_floating_ip(self.context,
1, None))
def test_deallocate_floating_ip(self):
self.assertIsNone(self.network.deallocate_floating_ip(self.context,
1, None))
def test_associate_floating_ip(self):
self.assertIsNone(self.network.associate_floating_ip(self.context,
None, None))
def test_disassociate_floating_ip(self):
self.assertIsNone(self.network.disassociate_floating_ip(self.context,
None, None))
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(1, res[0]['id'])
self.assertEqual(0, res[1]['id'])
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_allocate_calculates_quota_auth(self, util_method, reserve,
get_by_uuid):
inst = objects.Instance()
inst['uuid'] = 'nosuch'
get_by_uuid.return_value = inst
usages = {'fixed_ips': {'in_use': 10, 'reserved': 1}}
reserve.side_effect = exception.OverQuota(overs='testing',
quotas={'fixed_ips': 10},
usages=usages)
util_method.return_value = ('foo', 'bar')
self.assertRaises(exception.FixedIpLimitExceeded,
self.network.allocate_fixed_ip,
self.context, 123, {'uuid': 'nosuch'})
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_deallocate_calculates_quota_auth(self, util_method, reserve,
get_by_address):
inst = objects.Instance(uuid='fake-uuid')
fip = objects.FixedIP(instance_uuid='fake-uuid',
virtual_interface_id=1)
get_by_address.return_value = fip
util_method.return_value = ('foo', 'bar')
# This will fail right after the reserve call when it tries
# to look up the fake instance we created above
self.assertRaises(exception.InstanceNotFound,
self.network.deallocate_fixed_ip,
self.context, '1.2.3.4', instance=inst)
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1,
vif_id=1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.virtual_interface.VirtualInterface'
'.get_by_instance_and_network')
@mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
@mock.patch('nova.objects.fixed_ip.FixedIP.save')
def test_allocate_fixed_ip_cleanup(self,
mock_fixedip_save,
mock_fixedip_associate,
mock_fixedip_disassociate,
mock_vif_get,
mock_instance_get):
address = netaddr.IPAddress('1.2.3.4')
fip = objects.FixedIP(instance_uuid='fake-uuid',
address=address,
virtual_interface_id=1)
mock_fixedip_associate.return_value = fip
instance = objects.Instance(context=self.context)
instance.create()
mock_instance_get.return_value = instance
mock_vif_get.return_value = vif_obj.VirtualInterface(
instance_uuid='fake-uuid', id=1)
with test.nested(
mock.patch.object(self.network, '_setup_network_on_host'),
mock.patch.object(self.network, 'instance_dns_manager'),
mock.patch.object(self.network,
'_do_trigger_security_group_members_refresh_for_instance')
) as (mock_setup_network, mock_dns_manager, mock_ignored):
mock_setup_network.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=address)
mock_dns_manager.delete_entry.assert_has_calls([
mock.call(instance.display_name, ''),
mock.call(instance.uuid, '')
])
mock_fixedip_disassociate.assert_called_once_with(self.context)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.virtual_interface.VirtualInterface'
'.get_by_instance_and_network')
@mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate_pool')
@mock.patch('nova.network.manager.NetworkManager._add_virtual_interface')
def test_allocate_fixed_ip_create_new_vifs(self,
mock_add,
mock_fixedip_associate,
mock_fixedip_disassociate,
mock_vif_get,
mock_instance_get):
address = netaddr.IPAddress('1.2.3.4')
fip = objects.FixedIP(instance_uuid='fake-uuid',
address=address,
virtual_interface_id=1000)
net = {'cidr': '24', 'id': 1, 'uuid': 'nosuch'}
instance = objects.Instance(context=self.context)
instance.create()
vif = objects.VirtualInterface(context,
id=1000,
address='00:00:00:00:00:00',
instance_uuid=instance.uuid,
network_id=net['id'],
uuid='nosuch')
mock_fixedip_associate.return_value = fip
mock_add.return_value = vif
mock_instance_get.return_value = instance
mock_vif_get.return_value = None
with test.nested(
mock.patch.object(self.network, '_setup_network_on_host'),
mock.patch.object(self.network, 'instance_dns_manager'),
mock.patch.object(self.network,
'_do_trigger_security_group_members_refresh_for_instance')
) as (mock_setup_network, mock_dns_manager, mock_ignored):
self.network.allocate_fixed_ip(self.context, instance['uuid'],
net)
mock_add.assert_called_once_with(self.context, instance['uuid'],
net['id'])
self.assertEqual(fip.virtual_interface_id, vif.id)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch.object(db, 'virtual_interface_get_by_instance_and_network',
return_value=None)
@mock.patch('nova.objects.fixed_ip.FixedIP')
def test_allocate_fixed_ip_add_vif_fails(self, mock_fixedip,
mock_get_vif, mock_instance_get):
# Tests that we don't try to do anything with fixed IPs if
# _add_virtual_interface fails.
instance = fake_instance.fake_instance_obj(self.context)
mock_instance_get.return_value = instance
network = {'cidr': '24', 'id': 1,
'uuid': '398399b3-f696-4859-8695-a6560e14cb02'}
vif_error = exception.VirtualInterfaceMacAddressException()
# mock out quotas because we don't care in this test
with mock.patch.object(self.network, 'quotas_cls', objects.QuotasNoOp):
with mock.patch.object(self.network, '_add_virtual_interface',
side_effect=vif_error):
self.assertRaises(
exception.VirtualInterfaceMacAddressException,
self.network.allocate_fixed_ip, self.context,
'9d2ee1e3-ffad-4e5f-81ff-c96dd97b0ee0', network)
self.assertFalse(mock_fixedip.called, str(mock_fixedip.mock_calls))
class FlatDHCPNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(FlatDHCPNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class VlanNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
def test_quota_driver_type(self):
self.assertEqual(objects.QuotasNoOp,
self.network.quotas_cls)
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
network_id=mox.IgnoreArg(),
reserved=True,
virtual_interface_id=vifs[0]['id']
).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network,
vpn=True)
def test_allocate_fixed_ip(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None,
virtual_interface_id=vifs[0]['id']
).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
@mock.patch('nova.network.manager.VlanManager._setup_network_on_host')
@mock.patch('nova.network.manager.VlanManager.'
'_validate_instance_zone_for_dns_domain')
@mock.patch('nova.network.manager.VlanManager.'
'_do_trigger_security_group_members_refresh_for_instance')
@mock.patch('nova.network.manager.VlanManager._add_virtual_interface')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
@mock.patch('nova.objects.VirtualInterface.get_by_instance_and_network')
def test_allocate_fixed_ip_return_none(self, mock_get,
mock_associate, mock_get_uuid, mock_add, mock_trigger,
mock_validate, mock_setup):
net = {'cidr': '24', 'id': 1, 'uuid': 'nosuch'}
fip = objects.FixedIP(instance_uuid='fake-uuid',
address=netaddr.IPAddress('1.2.3.4'),
virtual_interface_id=1)
instance = objects.Instance(context=self.context)
instance.create()
vif = objects.VirtualInterface(self.context,
id=1000,
address='00:00:00:00:00:00',
instance_uuid=instance.uuid,
network_id=net['id'],
uuid='nosuch')
mock_associate.return_value = fip
mock_add.return_value = vif
mock_get.return_value = None
mock_get_uuid.return_value = instance
mock_validate.return_value = False
self.network.allocate_fixed_ip(self.context_admin, instance.uuid, net)
mock_add.assert_called_once_with(self.context_admin, instance.uuid,
net['id'])
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1,
vif_id=1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address_vpn(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch',
'vpn_private_address': netaddr.IPAddress('1.2.3.4')
}, vpn=1)
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1, reserved=True,
vif_id=1)
@mock.patch.object(db, 'virtual_interface_get_by_instance_and_network',
return_value=None)
@mock.patch('nova.objects.fixed_ip.FixedIP')
def test_allocate_fixed_ip_add_vif_fails(self, mock_fixedip,
mock_get_vif):
# Tests that we don't try to do anything with fixed IPs if
# _add_virtual_interface fails.
vif_error = exception.VirtualInterfaceMacAddressException()
with mock.patch.object(self.network, '_add_virtual_interface',
side_effect=vif_error):
self.assertRaises(exception.VirtualInterfaceMacAddressException,
self.network.allocate_fixed_ip, self.context,
'9d2ee1e3-ffad-4e5f-81ff-c96dd97b0ee0',
networks[0])
self.assertFalse(mock_fixedip.called, str(mock_fixedip.mock_calls))
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_duplicate_vlan_raises(self):
# VLAN 100 is already used and we force the network to be created
# in that vlan (vlan=100).
self.assertRaises(exception.DuplicateVlan,
self.network.create_networks,
self.context_admin, label="fake", num_networks=1,
vlan=100, cidr='192.168.0.1/24', network_size=100)
def test_vlan_start(self):
# VLAN 100 and 101 are used, so this network shoud be created in 102
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(102, networks[0]["vlan"])
def test_vlan_start_multiple(self):
# VLAN 100 and 101 are used, so these networks shoud be created in 102
# and 103
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(102, networks[0]["vlan"])
self.assertEqual(103, networks[1]["vlan"])
def test_vlan_start_used(self):
# VLAN 100 and 101 are used, but vlan_start=99.
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=99, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(102, networks[0]["vlan"])
def test_vlan_parameter(self):
# vlan parameter could not be greater than 4094
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan=4095, cidr='192.168.0.1/24')
error_msg = 'The vlan number cannot be greater than 4094'
self.assertIn(error_msg, six.text_type(exc))
# vlan parameter could not be less than 1
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan=0, cidr='192.168.0.1/24')
error_msg = 'The vlan number cannot be less than 1'
self.assertIn(error_msg, six.text_type(exc))
def test_vlan_be_integer(self):
# vlan must be an integer
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan='fake', cidr='192.168.0.1/24')
error_msg = 'vlan must be an integer'
self.assertIn(error_msg, six.text_type(exc))
def test_vlan_multiple_without_dhcp_server(self):
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual("192.168.3.1", networks[0]["dhcp_server"])
self.assertEqual("192.168.3.129", networks[1]["dhcp_server"])
def test_vlan_multiple_with_dhcp_server(self):
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100, dhcp_server='192.168.3.1')
self.assertEqual("192.168.3.1", networks[0]["dhcp_server"])
self.assertEqual("192.168.3.1", networks[1]["dhcp_server"])
def test_validate_networks(self):
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db_fixed1 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[1]['id'],
network=dict(test_network.fake_network,
**networks[1]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed1)
db_fixed2 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[0]['id'],
network=dict(test_network.fake_network,
**networks[0]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed2)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')]
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id + '1')
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
ctxt = context.RequestContext(None, None,
is_admin=True)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id='testproject')
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network, '_floating_ip_pool_exists',
lambda _x, _y: True)
def fake_allocate_address(*args, **kwargs):
return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
fake_allocate_address)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.commit')
def test_deallocate_floating_ip(self, mock_commit, mock_reserve):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip)
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=1)
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
mock_reserve.return_value = 'reserve'
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
mock_commit.assert_called_once_with(ctxt, 'reserve',
project_id='testproject')
@mock.patch('nova.db.fixed_ip_get')
def test_associate_floating_ip(self, fixed_get):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
network=test_network.fake_network)
# floating ip that's already associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1)
# floating ip that isn't associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise processutils.ProcessExecutionError('',
'Cannot find device "em0"\n')
def fake9(*args, **kwargs):
raise test.TestingException()
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
'1.2.3.4',
'1.2.3.5',
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
instance_uuid='fake_uuid',
network=test_network.fake_network)
# doesn't raise because we exit early if the address is the same
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), '1.2.3.4')
# raises because we call disassociate which is mocked
self.assertRaises(test.TestingException,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
'new')
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_add_floating_ip_nat_before_bind(self):
# Tried to verify order with documented mox record/verify
# functionality, but it doesn't seem to work since I can't make it
# fail. I'm using stubs and a flag for now, but if this mox feature
# can be made to work, it would be a better way to test this.
#
# self.mox.StubOutWithMock(self.network.driver,
# 'ensure_floating_forward')
# self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
#
# self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg())
# self.network.driver.bind_floating_ip(mox.IgnoreArg(),
# mox.IgnoreArg())
# self.mox.ReplayAll()
nat_called = [False]
def fake_nat(*args, **kwargs):
nat_called[0] = True
def fake_bind(*args, **kwargs):
self.assertTrue(nat_called[0])
self.stubs.Set(self.network.driver,
'ensure_floating_forward',
fake_nat)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fakeiface',
'fakenet')
@mock.patch('nova.db.floating_ip_get_all_by_host')
@mock.patch('nova.db.fixed_ip_get')
def _test_floating_ip_init_host(self, fixed_get, floating_get,
public_interface, expected_arg):
floating_get.return_value = [
dict(test_floating_ip.fake_floating_ip,
interface='foo',
address='1.2.3.4'),
dict(test_floating_ip.fake_floating_ip,
interface='fakeiface',
address='1.2.3.5',
fixed_ip_id=1),
dict(test_floating_ip.fake_floating_ip,
interface='bar',
address='1.2.3.6',
fixed_ip_id=2),
]
def fixed_ip_get(_context, fixed_ip_id, get_network):
if fixed_ip_id == 1:
return dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network=test_network.fake_network)
raise exception.FixedIpNotFound(id=fixed_ip_id)
fixed_get.side_effect = fixed_ip_get
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.flags(public_interface=public_interface)
self.network.l3driver.add_floating_ip(netaddr.IPAddress('1.2.3.5'),
netaddr.IPAddress('1.2.3.4'),
expected_arg,
mox.IsA(objects.Network))
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
self.mox.VerifyAll()
def test_floating_ip_init_host_without_public_interface(self):
self._test_floating_ip_init_host(public_interface=False,
expected_arg='fakeiface')
def test_floating_ip_init_host_with_public_interface(self):
self._test_floating_ip_init_host(public_interface='fooiface',
expected_arg='fooiface')
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# floating ip that is associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
project_id=ctxt.project_id)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False,
host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
auto_assigned=True,
project_id=ctxt.project_id)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
# raises because auto_assigned floating IP cannot be disassociated
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None,
virtual_interface_id=vifs[0]['id']
).AndReturn(fixed)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
def test_ip_association_and_allocation_of_other_project(self, net_get,
fixed_get):
"""Makes sure that we cannot deallocaate or disassociate
a public ip of other project.
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
float_ip = db.floating_ip_create(context1.elevated(),
{'address': '1.2.3.4',
'project_id': context1.project_id})
float_addr = float_ip['address']
instance = db.instance_create(context1,
{'project_id': 'project1'})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['uuid']).address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
# Associate the IP with non-admin user context
self.assertRaises(exception.Forbidden,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.Forbidden,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.Forbidden,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed(self, fixed_update, net_get, fixed_get):
"""Verify that release is called properly.
Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return vifs[0]
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.mox.StubOutWithMock(linux_net, 'release_dhcp')
linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address,
'DE:AD:BE:EF:00:00')
self.mox.ReplayAll()
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def _deallocate_fixed_with_dhcp(self, mock_dev_exists, fixed_update,
net_get, fixed_get):
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return vifs[0]
with test.nested(
mock.patch.object(db, 'virtual_interface_get', vif_get),
mock.patch.object(
utils, 'execute',
side_effect=processutils.ProcessExecutionError()),
) as (_vif_get, _execute):
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1,
instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(
test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.network.deallocate_fixed_ip(context1, fix_addr.address,
'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
mock_dev_exists.assert_called_once_with(networks[1]['bridge'])
if mock_dev_exists.return_value:
_execute.assert_called_once_with('dhcp_release',
networks[1]['bridge'],
fix_addr.address,
'DE:AD:BE:EF:00:00',
run_as_root=True)
@mock.patch('nova.network.linux_net.device_exists', return_value=True)
def test_deallocate_fixed_with_dhcp(self, mock_dev_exists):
self._deallocate_fixed_with_dhcp(mock_dev_exists)
@mock.patch('nova.network.linux_net.device_exists', return_value=False)
def test_deallocate_fixed_without_dhcp(self, mock_dev_exists):
self._deallocate_fixed_with_dhcp(mock_dev_exists)
def test_deallocate_fixed_deleted(self):
# Verify doesn't deallocate deleted fixed_ip from deleted network.
def teardown_network_on_host(_context, network):
if network['id'] == 0:
raise test.TestingException()
self.stubs.Set(self.network, '_teardown_network_on_host',
teardown_network_on_host)
context1 = context.RequestContext('user', 'project1')
elevated = context1.elevated()
instance = db.instance_create(context1,
{'project_id': 'project1'})
network = db.network_create_safe(elevated, networks[0])
_fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fix_addr = _fix_addr.address
db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
elevated.read_deleted = 'yes'
delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
values = {'address': fix_addr,
'network_id': network.id,
'instance_uuid': delfixed['instance_uuid']}
db.fixed_ip_create(elevated, values)
elevated.read_deleted = 'no'
elevated.read_deleted = 'yes'
deallocate = self.network.deallocate_fixed_ip
self.assertRaises(test.TestingException, deallocate, context1,
fix_addr, 'fake')
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get):
"""Verify that deallocate doesn't raise when no vif is returned.
Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return None
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
fixed_update.return_value = fixed_get.return_value
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get):
# Verify IP is not deallocated if the security group refresh fails.
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = objects.FixedIP.associate_pool(elevated, 1,
instance['uuid'])
def fake_refresh(instance_uuid):
raise test.TestingException()
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
fake_refresh)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.assertRaises(test.TestingException,
self.network.deallocate_fixed_ip,
context1, str(fix_addr.address), 'fake')
self.assertFalse(fixed_update.called)
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(1, res[0]['id'])
self.assertEqual(0, res[1]['id'])
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class _TestDomainObject(object):
def __init__(self, **kwargs):
for k, v in six.iteritems(kwargs):
self.__setattr__(k, v)
class CommonNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.flags(ipv6_backend='rfc2462')
ipv6.reset_backend()
def test_validate_instance_zone_for_dns_domain(self):
domain = 'example.com'
az = 'test_az'
domains = {
domain: _TestDomainObject(
domain=domain,
availability_zone=az)}
def dnsdomain_get(context, instance_domain):
return domains.get(instance_domain)
self.stubs.Set(db, 'dnsdomain_get', dnsdomain_get)
fake_instance = {'uuid': FAKEUUID,
'availability_zone': az}
manager = network_manager.NetworkManager()
res = manager._validate_instance_zone_for_dns_domain(self.context,
fake_instance)
self.assertTrue(res)
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None,
extra_reserved=None, bottom_reserved=0,
top_reserved=0):
return None
def test_get_instance_nw_info_client_exceptions(self):
manager = network_manager.NetworkManager()
self.mox.StubOutWithMock(manager.db,
'fixed_ip_get_by_instance')
manager.db.fixed_ip_get_by_instance(
self.context, FAKEUUID).AndRaise(exception.InstanceNotFound(
instance_id=FAKEUUID))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
manager.get_instance_nw_info,
self.context, FAKEUUID, 'fake_rxtx_factor', HOST)
@mock.patch('nova.db.instance_get')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_deallocate_for_instance_passes_host_info(self, fixed_get,
instance_get):
manager = fake_network.FakeNetworkManager()
db = manager.db
instance_get.return_value = fake_inst(uuid='ignoreduuid')
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network_id=123)]
manager.deallocate_for_instance(
ctx, instance=objects.Instance._from_db_object(self.context,
objects.Instance(), instance_get.return_value))
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host')
], manager.deallocate_fixed_ip_calls)
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_deallocate_for_instance_passes_host_info_with_update_dns_entries(
self, fixed_get):
self.flags(update_dns_entries=True)
manager = fake_network.FakeNetworkManager()
db = manager.db
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network_id=123)]
with mock.patch.object(manager.network_rpcapi,
'update_dns') as mock_update_dns:
manager.deallocate_for_instance(
ctx, instance=fake_instance.fake_instance_obj(ctx))
mock_update_dns.assert_called_once_with(ctx, ['123'])
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host')
], manager.deallocate_fixed_ip_calls)
def test_deallocate_for_instance_with_requested_networks(self):
manager = fake_network.FakeNetworkManager()
db = manager.db
db.virtual_interface_delete_by_instance = mock.Mock()
ctx = context.RequestContext('igonre', 'igonre')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in [('123', '1.2.3.4'), ('123', '4.3.2.1'),
('123', None)]])
manager.deallocate_for_instance(
ctx,
instance=fake_instance.fake_instance_obj(ctx),
requested_networks=requested_networks)
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host')
], manager.deallocate_fixed_ip_calls)
def test_deallocate_for_instance_with_update_dns_entries(self):
self.flags(update_dns_entries=True)
manager = fake_network.FakeNetworkManager()
db = manager.db
db.virtual_interface_delete_by_instance = mock.Mock()
ctx = context.RequestContext('igonre', 'igonre')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in [('123', '1.2.3.4'), ('123', '4.3.2.1')]])
with mock.patch.object(manager.network_rpcapi,
'update_dns') as mock_update_dns:
manager.deallocate_for_instance(
ctx,
instance=fake_instance.fake_instance_obj(ctx),
requested_networks=requested_networks)
mock_update_dns.assert_called_once_with(ctx, ['123'])
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host')
], manager.deallocate_fixed_ip_calls)
@mock.patch('nova.db.fixed_ip_get_by_instance')
@mock.patch('nova.db.fixed_ip_disassociate')
def test_remove_fixed_ip_from_instance(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
get.return_value = [
dict(test_fixed_ip.fake_fixed_ip, **x)
for x in manager.db.fixed_ip_get_by_instance(None,
FAKEUUID)]
manager.remove_fixed_ip_from_instance(self.context, FAKEUUID,
HOST,
'10.0.0.1')
self.assertEqual('10.0.0.1', manager.deallocate_called)
disassociate.assert_called_once_with(self.context, '10.0.0.1')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_remove_fixed_ip_from_instance_bad_input(self, get):
manager = fake_network.FakeNetworkManager()
get.return_value = []
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, HOST, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/25', cidrs)
self.assertIn('192.168.0.128/25', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/24')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_smaller_subnet_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.9/25')]
# CidrConflict: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/25')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
get_all.return_value = [dict(test_network.fake_network, id=1,
cidr='192.168.2.9/29')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/27', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_all_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
in_use = [dict(test_network.fake_network, **values) for values in
[{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]]
get_all.return_value = in_use
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
3, 64, None, None, None, None, None)
# CidrConflict: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
cidr='192.168.0.0/24')]
# CidrConflict: cidr already in use
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', returned_cidrs)
self.assertIn('192.168.1.0/24', returned_cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_conflict_existing_supernet(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/8')]
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
# CidrConflict: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_create_networks_with_uuid(self):
cidr = '192.168.0.0/24'
uuid = FAKEUUID
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
'fd00::/48', None, None, None, None, None]
kwargs = {'uuid': uuid}
nets = manager.create_networks(*args, **kwargs)
self.assertEqual(1, len(nets))
net = nets[0]
self.assertEqual(uuid, net['uuid'])
@mock.patch('nova.db.network_get_all')
def test_create_networks_cidr_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/24')]
args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 10, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip_regex(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(_vifs), len(res))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(1, len(res))
self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(1, len(res))
self.assertEqual(_vifs[2]['instance_uuid'], res[0]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(2, len(res))
self.assertEqual(_vifs[0]['instance_uuid'], res[0]['instance_uuid'])
self.assertEqual(_vifs[1]['instance_uuid'], res[1]['instance_uuid'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(2, len(res))
self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid'])
self.assertEqual(_vifs[2]['instance_uuid'], res[1]['instance_uuid'])
@mock.patch('nova.db.network_get')
def test_get_instance_uuids_by_ipv6_regex(self, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
def _network_get(context, network_id, **args):
return dict(test_network.fake_network,
**manager.db.network_get(context, network_id))
network_get.side_effect = _network_get
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(_vifs), len(res))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(1, len(res))
self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(1, len(res))
self.assertEqual(_vifs[2]['instance_uuid'], res[0]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(2, len(res))
self.assertEqual(_vifs[0]['instance_uuid'], res[0]['instance_uuid'])
self.assertEqual(_vifs[1]['instance_uuid'], res[1]['instance_uuid'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(2, len(res))
self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid'])
self.assertEqual(_vifs[2]['instance_uuid'], res[1]['instance_uuid'])
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(1, len(res))
self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(1, len(res))
self.assertEqual(_vifs[2]['instance_uuid'], res[0]['instance_uuid'])
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network, **networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(uuid, network['uuid'])
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
@mock.patch('nova.db.network_get_all')
def test_get_all_networks(self, get_all):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get_all.return_value = [dict(test_network.fake_network, **net)
for net in networks]
output = manager.get_all_networks(fake_context)
self.assertEqual(2, len(networks))
self.assertEqual('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
output[0]['uuid'])
self.assertEqual('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
output[1]['uuid'])
@mock.patch('nova.db.network_get_by_uuid')
@mock.patch('nova.db.network_disassociate')
def test_disassociate_network(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
disassociate.return_value = True
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network,
**networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_disassociate_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
def _test_init_host_dynamic_fixed_range(self, net_manager):
self.flags(fake_network=True,
routing_source_ip='172.16.0.1',
metadata_host='172.16.0.1',
public_interface='eth1',
dmz_cidr=['10.0.3.0/24'])
binary_name = linux_net.get_binary_name()
# Stub out calls we don't want to really run, mock the db
self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
lambda *args: None)
self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
lambda *args: None)
self.mox.StubOutWithMock(db, 'network_get_all_by_host')
fake_networks = [dict(test_network.fake_network, **n)
for n in networks]
db.network_get_all_by_host(mox.IgnoreArg(),
mox.IgnoreArg()
).MultipleTimes().AndReturn(fake_networks)
self.mox.ReplayAll()
net_manager.init_host()
# Get the iptables rules that got created
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[0]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[0]['cidr'],
networks[0]['cidr']),
'[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[1]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[1]['cidr'],
networks[1]['cidr'])]
# Compare the expected rules against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
# Add an additional network and ensure the rules get configured
new_network = {'id': 2,
'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc',
'label': 'test2',
'injected': False,
'multi_host': False,
'cidr': '192.168.2.0/24',
'cidr_v6': '2001:dba::/64',
'gateway_v6': '2001:dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.2.1',
'dhcp_server': '192.168.2.1',
'broadcast': '192.168.2.255',
'dns1': '192.168.2.1',
'dns2': '192.168.2.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.2.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}
new_network_obj = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **new_network))
ctxt = context.get_admin_context()
net_manager._setup_network_on_host(ctxt, new_network_obj)
# Get the new iptables rules that got created from adding a new network
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
# Add the new expected rules to the old ones
expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, new_network['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
'! --ctstate DNAT -j ACCEPT' % (binary_name,
new_network['cidr'],
new_network['cidr'])]
# Compare the expected rules (with new network) against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
def test_flatdhcpmanager_dynamic_fixed_range(self):
"""Test FlatDHCPManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
def test_vlanmanager_dynamic_fixed_range(self):
"""Test VlanManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
@mock.patch('nova.objects.quotas.Quotas.rollback')
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
@mock.patch('nova.network.manager.NetworkManager.'
'_do_trigger_security_group_members_refresh_for_instance')
def test_fixed_ip_cleanup_rollback(self, fake_trig,
fixed_get, rollback):
manager = network_manager.NetworkManager()
fake_trig.side_effect = test.TestingException
self.assertRaises(test.TestingException,
manager.deallocate_fixed_ip,
self.context, 'fake', 'fake',
instance=fake_inst(uuid='ignoreduuid'))
rollback.assert_called_once_with()
def test_fixed_cidr_out_of_range(self):
manager = network_manager.NetworkManager()
ctxt = context.get_admin_context()
self.assertRaises(exception.AddressOutOfRange,
manager.create_networks, ctxt, label="fake",
cidr='10.1.0.0/24', fixed_cidr='10.1.1.0/25')
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.NoDBTestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes.
"""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return test_network.fake_network
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(address, rval)
class TestFloatingIPManager(floating_ips.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(AllocateTestCase, self).setUp()
dns = 'nova.network.noop_dns_driver.NoopDNSDriver'
self.flags(instance_dns_manager=dns)
self.useFixture(test.SampleNetworks())
self.conductor = self.start_service(
'conductor', manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.network = self.start_service('network')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.user_context = context.RequestContext('testuser',
'testproject')
def test_allocate_for_instance(self):
address = "10.10.10.10"
self.flags(auto_assign_floating_ip=True)
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
inst = objects.Instance(context=self.context)
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create()
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.user_context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=None)
self.assertEqual(1, len(nw_info))
fixed_ip = nw_info.fixed_ips()[0]['address']
self.assertTrue(netutils.is_valid_ipv4(fixed_ip))
self.network.deallocate_for_instance(self.context,
instance=inst)
def test_allocate_for_instance_illegal_network(self):
networks = db.network_get_all(self.context)
requested_networks = []
for network in networks:
# set all networks to other projects
db.network_update(self.context, network['id'],
{'host': self.network.host,
'project_id': 'otherid'})
requested_networks.append((network['uuid'], None))
# set the first network to our project
db.network_update(self.context, networks[0]['id'],
{'project_id': self.user_context.project_id})
inst = objects.Instance(context=self.context)
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create()
self.assertRaises(exception.NetworkNotFoundForProject,
self.network.allocate_for_instance, self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=self.context.project_id, macs=None,
requested_networks=requested_networks)
def test_allocate_for_instance_with_mac(self):
available_macs = set(['ca:fe:de:ad:be:ef'])
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
assigned_macs = [vif['address'] for vif in nw_info]
self.assertEqual(1, len(assigned_macs))
self.assertEqual(available_macs.pop(), assigned_macs[0])
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
host=self.network.host,
project_id=project_id)
def test_allocate_for_instance_not_enough_macs(self):
available_macs = set()
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
self.assertRaises(exception.VirtualInterfaceCreateException,
self.network.allocate_for_instance,
self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP."""
REQUIRES_LOCKING = True
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.service_get_by_host_and_binary')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_disassociate_floating_ip_multi_host_calls(self, floating_get,
service_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=12)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
service_get.return_value = test_service.fake_service
self.stubs.Set(self.network.servicegroup_api,
'service_is_up',
lambda _x: True)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_disassociate_floating_ip')
self.network.network_rpcapi._disassociate_floating_ip(
ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
self.mox.ReplayAll()
self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_associate_floating_ip_multi_host_calls(self, floating_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=None)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_associate_floating_ip')
self.network.network_rpcapi._associate_floating_ip(
ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
'instance-uuid')
self.mox.ReplayAll()
self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
def test_double_deallocation(self):
instance_ref = db.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_deallocate_floating_ip_quota_rollback(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake)
self.mox.StubOutWithMock(db, 'floating_ip_deallocate')
self.mox.StubOutWithMock(self.network,
'_floating_ip_owned_by_project')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
quota.QUOTAS.reserve(self.context,
floating_ips=-1,
project_id='testproject').AndReturn('fake-rsv')
self.network._floating_ip_owned_by_project(self.context,
mox.IgnoreArg())
db.floating_ip_deallocate(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
quota.QUOTAS.rollback(self.context, 'fake-rsv',
project_id='testproject')
self.mox.ReplayAll()
self.network.deallocate_floating_ip(self.context, '10.0.0.1')
def test_deallocation_deleted_instance(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance(context=self.context)
instance.project_id = self.project_id
instance.deleted = True
instance.create()
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
def test_deallocation_duplicate_floating_ip(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance(context=self.context)
instance.project_id = self.project_id
instance.create()
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10',
'deleted': True})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_get_by_address')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_start(self, floating_update, floating_get,
fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
floating_get.side_effect = fake_floating_ip_get_by_address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
def fake_clean_conntrack(fixed_ip):
if not str(fixed_ip) == "10.0.0.2":
raise exception.FixedIpInvalid(address=fixed_ip)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
fake_remove_floating_ip)
self.stubs.Set(self.network.driver, 'clean_conntrack',
fake_clean_conntrack)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_start(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
rxtx_factor=3,
project_id=self.project_id,
source='fake_source',
dest='fake_dest')
self.assertEqual(2, called['count'])
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_finish(self, floating_update, fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_add_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
fake_add_floating_ip)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_finish(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
host='fake_dest',
rxtx_factor=3,
project_id=self.project_id,
source='fake_source')
self.assertEqual(2, called['count'])
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(2, len(entries))
self.assertEqual(name1, entries[0])
self.assertEqual(name2, entries[1])
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEqual(1, len(entries))
self.assertEqual(address1, entries[0])
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(1, len(entries))
self.assertEqual(name2, entries[0])
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
self.network.create_public_dns_domain(self.context, domain1,
'testproject')
self.network.create_public_dns_domain(self.context, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEqual(2, len(domains))
self.assertEqual(domain1, domains[0]['domain'])
self.assertEqual(domain2, domains[1]['domain'])
self.assertEqual('testproject', domains[0]['project'])
self.assertEqual('fakeproject', domains[1]['project'])
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEqual(1, len(entries))
self.assertEqual(address1, entries[0])
self.network.delete_dns_domain(self.context, domain1)
self.network.delete_dns_domain(self.context, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEqual(2, len(entries))
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
# Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
# Create a VIF with aa:aa:aa:aa:aa:aa
crash_test_dummy_vif = {
'address': macs[1],
'instance_uuid': 'fake_uuid',
'network_id': 123,
'uuid': 'fake_uuid',
}
self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
# Hand out a collision first, then a legit MAC
def fake_gen_mac():
return macs.pop()
self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
# SQLite doesn't seem to honor the uniqueness constraint on the
# address column, so fake the collision-avoidance here
def fake_vif_save(vif):
if vif.address == crash_test_dummy_vif['address']:
raise db_exc.DBError("If you're smart, you'll retry!")
# NOTE(russellb) The VirtualInterface object requires an ID to be
# set, and we expect it to get set automatically when we do the
# save.
vif.id = 1
self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
# Attempt to add another and make sure that both MACs are consumed
# by the retry loop
self.network._add_virtual_interface(ctxt, 'fake_uuid', 123)
self.assertEqual([], macs)
def test_deallocate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.deallocate_floating_ip,
self.context, '1.2.3.4')
def test_associate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.associate_floating_ip,
self.context, '1.2.3.4', '10.0.0.1')
def test_disassociate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.disassociate_floating_ip,
self.context, '1.2.3.4')
def test_get_floating_ip_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
exception.FloatingIpNotFound(id='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.get_floating_ip,
self.context, 'fake-id')
def _test_associate_floating_ip_failure(self, stdout, expected_exception):
def _fake_catchall(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
network=test_network.fake_network)
def _fake_add_floating_ip(*args, **kwargs):
raise processutils.ProcessExecutionError(stdout)
self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate',
_fake_catchall)
self.stubs.Set(self.network.db, 'floating_ip_disassociate',
_fake_catchall)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
_fake_add_floating_ip)
self.assertRaises(expected_exception,
self.network._associate_floating_ip, self.context,
'1.2.3.4', '1.2.3.5', '', '')
def test_associate_floating_ip_failure(self):
self._test_associate_floating_ip_failure(None,
processutils.ProcessExecutionError)
def test_associate_floating_ip_failure_interface_not_found(self):
self._test_associate_floating_ip_failure('Cannot find device',
exception.NoFloatingIpInterface)
@mock.patch('nova.objects.FloatingIP.get_by_address')
def test_get_floating_ip_by_address(self, mock_get):
mock_get.return_value = mock.sentinel.floating
self.assertEqual(mock.sentinel.floating,
self.network.get_floating_ip_by_address(
self.context,
mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
@mock.patch('nova.objects.FloatingIPList.get_by_project')
def test_get_floating_ips_by_project(self, mock_get):
mock_get.return_value = mock.sentinel.floatings
self.assertEqual(mock.sentinel.floatings,
self.network.get_floating_ips_by_project(
self.context))
mock_get.assert_called_once_with(self.context, self.context.project_id)
@mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
def test_get_floating_ips_by_fixed_address(self, mock_get):
mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'),
objects.FloatingIP(address='5.6.7.8')]
self.assertEqual(['1.2.3.4', '5.6.7.8'],
self.network.get_floating_ips_by_fixed_address(
self.context, mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
@mock.patch('nova.db.floating_ip_get_pools')
def test_floating_ip_pool_exists(self, floating_ip_get_pools):
floating_ip_get_pools.return_value = [{'name': 'public'}]
self.assertTrue(self.network._floating_ip_pool_exists(self.context,
'public'))
@mock.patch('nova.db.floating_ip_get_pools')
def test_floating_ip_pool_does_not_exist(self, floating_ip_get_pools):
floating_ip_get_pools.return_value = []
self.assertFalse(self.network._floating_ip_pool_exists(self.context,
'public'))
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
self.network.create_private_dns_domain(self.context, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEqual(1, len(domains))
self.assertEqual(domain1, domains[0]['domain'])
self.assertEqual(zone1, domains[0]['availability_zone'])
self.network.delete_dns_domain(self.context, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.NoDBTestCase):
"""Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.network.ldapdns.ldap',
fake_ldap))
dns_class = 'nova.network.ldapdns.LdapDNS'
self.driver = importutils.import_object(dns_class)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'associateddomain': ['root'],
'dc': ['root']}
self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
super(LdapDNSTestCase, self).tearDown()
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(2, len(domains))
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(2, len(entries))
self.assertEqual(name1, entries[0])
self.assertEqual(name2, entries[1])
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEqual(1, len(entries))
self.assertEqual(address1, entries[0])
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(2, len(entries))
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEqual(1, len(entries))
self.assertEqual(name2, entries[0])
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
class NetworkManagerNoDBTestCase(test.NoDBTestCase):
"""Tests nova.network.manager.NetworkManager without a database."""
def setUp(self):
super(NetworkManagerNoDBTestCase, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
self.manager = network_manager.NetworkManager()
@mock.patch.object(objects.FixedIP, 'get_by_address')
def test_release_fixed_ip_not_associated(self, mock_fip_get_by_addr):
# Tests that the method is a no-op when the fixed IP is not associated
# to an instance.
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fake_network.next_fixed_ip(1))
fip.instance_uuid = None
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(self.context, fip.address)
self.assertFalse(mock_disassociate.called,
str(mock_disassociate.mock_calls))
@mock.patch.object(objects.FixedIP, 'get_by_address')
def test_release_fixed_ip_allocated(self, mock_fip_get_by_addr):
# Tests that the fixed IP is not disassociated if it's allocated.
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fake_network.next_fixed_ip(1))
fip.leased = False
fip.allocated = True
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(self.context, fip.address)
self.assertFalse(mock_disassociate.called,
str(mock_disassociate.mock_calls))
@mock.patch.object(objects.FixedIP, 'get_by_address')
@mock.patch.object(objects.VirtualInterface, 'get_by_address')
def test_release_fixed_ip_mac_matches_associated_instance(self,
mock_vif_get_by_addr,
mock_fip_get_by_addr):
# Tests that the fixed IP is disassociated when the mac passed to
# release_fixed_ip matches the VIF which has the same instance_uuid
# as the instance associated to the FixedIP object. Also tests
# that the fixed IP is marked as not leased in the database if it was
# currently leased.
instance = fake_instance.fake_instance_obj(self.context)
fip = fake_network.next_fixed_ip(1)
fip['instance_uuid'] = instance.uuid
fip['leased'] = True
vif = fip['virtual_interface']
vif['instance_uuid'] = instance.uuid
vif = objects.VirtualInterface._from_db_object(
self.context, objects.VirtualInterface(), vif)
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
mock_fip_get_by_addr.return_value = fip
mock_vif_get_by_addr.return_value = vif
with mock.patch.object(fip, 'save') as mock_fip_save:
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(
self.context, fip.address, vif.address)
mock_fip_save.assert_called_once_with()
self.assertFalse(fip.leased)
mock_vif_get_by_addr.assert_called_once_with(self.context, vif.address)
mock_disassociate.assert_called_once_with()
@mock.patch.object(objects.FixedIP, 'get_by_address')
@mock.patch.object(objects.VirtualInterface, 'get_by_address',
return_value=None)
def test_release_fixed_ip_vif_not_found_for_mac(self, mock_vif_get_by_addr,
mock_fip_get_by_addr):
# Tests that the fixed IP is disassociated when the fixed IP is marked
# as deallocated and there is no VIF found in the database for the mac
# passed in.
fip = fake_network.next_fixed_ip(1)
fip['leased'] = False
mac = fip['virtual_interface']['address']
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
mock_fip_get_by_addr.return_value = fip
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(self.context, fip.address, mac)
mock_vif_get_by_addr.assert_called_once_with(self.context, mac)
mock_disassociate.assert_called_once_with()
@mock.patch.object(objects.FixedIP, 'get_by_address')
def test_release_fixed_ip_no_mac(self, mock_fip_get_by_addr):
# Tests that the fixed IP is disassociated when the fixed IP is
# deallocated and there is no mac address passed in (like before
# the network rpc api version bump to pass it in).
fip = fake_network.next_fixed_ip(1)
fip['leased'] = False
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
mock_fip_get_by_addr.return_value = fip
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(self.context, fip.address)
mock_disassociate.assert_called_once_with()
@mock.patch.object(objects.FixedIP, 'get_by_address')
@mock.patch.object(objects.VirtualInterface, 'get_by_address')
def test_release_fixed_ip_mac_mismatch_associated_instance(self,
mock_vif_get_by_addr,
mock_fip_get_by_addr):
# Tests that the fixed IP is not disassociated when the VIF for the mac
# passed to release_fixed_ip does not have an instance_uuid that
# matches fixed_ip.instance_uuid.
old_instance = fake_instance.fake_instance_obj(self.context)
new_instance = fake_instance.fake_instance_obj(self.context)
fip = fake_network.next_fixed_ip(1)
fip['instance_uuid'] = new_instance.uuid
fip['leased'] = False
vif = fip['virtual_interface']
vif['instance_uuid'] = old_instance.uuid
vif = objects.VirtualInterface._from_db_object(
self.context, objects.VirtualInterface(), vif)
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
mock_fip_get_by_addr.return_value = fip
mock_vif_get_by_addr.return_value = vif
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(
self.context, fip.address, vif.address)
mock_vif_get_by_addr.assert_called_once_with(self.context, vif.address)
self.assertFalse(mock_disassociate.called,
str(mock_disassociate.mock_calls))
@mock.patch.object(objects.FixedIP, 'get_by_address')
@mock.patch.object(objects.VirtualInterface, 'get_by_id')
@mock.patch.object(objects.Quotas, 'reserve')
def test_deallocate_fixed_ip_explicit_disassociate(self,
mock_quota_reserve,
mock_vif_get_by_id,
mock_fip_get_by_addr):
# Tests that we explicitly call FixedIP.disassociate when the fixed IP
# is not leased and has an associated instance (race with dnsmasq).
self.flags(force_dhcp_release=True)
fake_inst = fake_instance.fake_instance_obj(self.context)
fip = fake_network.next_fixed_ip(1)
fip['instance_uuid'] = fake_inst.uuid
fip['leased'] = False
vif = fip['virtual_interface']
vif['instance_uuid'] = fake_inst.uuid
vif = objects.VirtualInterface._from_db_object(
self.context, objects.VirtualInterface(), vif)
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
fip.network = fake_network.fake_network_obj(self.context,
fip.network_id)
mock_fip_get_by_addr.return_value = fip
mock_vif_get_by_id.return_value = vif
@mock.patch.object(self.manager,
'_do_trigger_security_group_members_refresh_for_instance')
@mock.patch.object(self.manager,
'_validate_instance_zone_for_dns_domain',
return_value=False)
@mock.patch.object(self.manager, '_teardown_network_on_host')
@mock.patch.object(fip, 'save')
@mock.patch.object(fip, 'disassociate')
def do_test(mock_disassociate, mock_fip_save,
mock_teardown_network_on_host, mock_validate_zone,
mock_trigger_secgroup_refresh):
self.assertEqual(fake_inst.uuid, fip.instance_uuid)
self.assertFalse(fip.leased)
self.manager.deallocate_fixed_ip(
self.context, fip['address'], instance=fake_inst)
mock_trigger_secgroup_refresh.assert_called_once_with(
fake_inst.uuid)
mock_teardown_network_on_host.assert_called_once_with(self.context,
fip.network)
mock_disassociate.assert_called_once_with()
do_test()
| devendermishrajio/nova | nova/tests/unit/network/test_manager.py | Python | apache-2.0 | 167,357 | [
"FEFF"
] | 07f4e358306ed2f770cc70f91b626e8708f4fcee3513f7faca754a02a5cdb394 |
import importlib as il
import os.path
import inspect
import re
import logging
import json
from .ParserData import Struct
from .ParserData import ParseContainer
from .ParserData import StructEncoder
from .QCBase import GenFormatter, VarNames as V
class Parser(object):
FIND_MAX_LINES = 100
def __init__(self, output, *, software=None, to_console=True,
to_file=False, log_file="CCParser.log", to_json=False,
json_file="CCParser.json", overwrite_file=True,
overwrite_vals=True, use_numpy=True):#cf. PEP-3102
""" Parser constructor.
Parameters
----------
output : string
Output filename.
software : string
Name of quantum chemistry software suite (default: None).
to_console : bool
Whether to print log output to screen (default: True).
to_file : bool
Whether to write log output to file (default: False).
log_file : string
Name of output log file (default: ``CCParser.log``).
to_json : bool
Whether to dump CCParser.results to JSON file.
json_file : string
Name of JSON output file.
"""
self.f_output = output
self.logger = logging.getLogger("CCParser")
self.config = dict(to_console=to_console, to_file=to_file,
log_file=log_file, to_json=to_json,
json_file=json_file, overwrite_file=overwrite_file,
overwrite_vals=overwrite_vals, use_numpy=use_numpy)
self.setupLogger()
self.logger.warning("CCParser starts...")
# determine software
if software != None:
self.software = software
else:
self.find_software()
self.output_basename = os.path.basename(output)
self.read_output()# read output to memory
self.load_methods()# software dependent import
self.results = Struct()# Set up container
for i, line in enumerate(self.rawData):
for mthd in self.methods:
# match, key = self.canParse(line, mthd)
match, keys = self.canParse(line, mthd)
if match:
for key in keys:# if not 1-to-1 mapping
q = self.get_quantity(i, key, mthd)
if hasattr(self.results, mthd.map[key]):
obj = getattr(self.results, mthd.map[key])
obj.add(i, q)
else:
obj = ParseContainer()
obj.add(i, q)
setattr(self.results, mthd.map[key], obj)
if not hasattr(self.results, V.has_finished):
container = ParseContainer()
container.add(0, False)
setattr(self.results, V.has_finished, container)
self.logger.warning("Output indicates abnormal exit. Added "+
"[results.has_finished] = False")
"""
if json_file is path, json_filepath = json_file
if output is filename, json_filepath = json_file
if output is path (path/output.out), json_path is filename, saves in folder (path/jsfile.json)
"""
json_folder = os.path.split(self.config['json_file'])[0]
out_folder = os.path.split(output)[0]
json_filepath = self.config['json_file'] if json_folder else os.path.join(out_folder, self.config['json_file'])
if self.config['to_json'] and self.config['overwrite_file']:
self.dump_json(fname=json_filepath)
elif self.config['to_json'] and not self.config['overwrite_file']:
if os.path.isfile(json_filepath):
with open(json_filepath,"r") as f:
old = json.load(f)
else:
old = {}
new = StructEncoder().default(self.results)
if self.config['overwrite_vals']:
old.update(new)
else:
for k in new.keys():
if k not in old.keys():
old[k] = new[k]
with open(json_filepath,"w") as f:
json.dump(old, f)
self.logger.warning("CCParser has finished.")
self.loggerCleanUp()
def read_output(self):
""" Read in output file """
with open(self.f_output, "r") as f:
self.rawData = f.readlines()
def read_input(self, f_input):
""" (Optional) Read input file """
with open(f_input) as n:
self.rawData.insert(0, n.readlines())
def canParse(self, line, mthd):
""" Check if line is parsable """
found = False
keys = []#for cases where there's no 1-to-1 mapping
for key, value in mthd.hooks.items():
if value in line:
found = True
keys.append(key)
# return found, key
else:
match = re.search(value, line)
if match:
found = True
keys.append(key)
# return found, key
if not found:
return found, None
else:
return found, keys
def get_quantity(self, i, key, mthd):
""" Call function of method class. This is the actual parsing. """
method_func = getattr(mthd, key)# needs to be method not list of methods
result = method_func(i, self.rawData)
return result
def load_methods(self):
""" Load correct module which contains parsing information
based on which software was specified. """
tmp = re.sub('[^A-Za-z]+', '', self.software.lower())
if tmp == "qchem":
m_package = ".QChem"
elif tmp == "gaussian":
m_package = ".Gaussian"
elif tmp == "molcas":
raise NotImplementedError("Molcas parsing not implemented yet!")
m_package = ".Molcas"
elif tmp == "turbomole":
raise NotImplementedError("Turbomole parsing not implemented yet!")
m_package = ".Turbomole"
elif tmp == "psi":
m_package = ".Psi4"
else:
raise ValueError("The specified software is misspelled or not implemented yet!")
#global m
m = il.import_module(m_package, package="CCParser")
self.method_names = [k[0] for k in inspect.getmembers(m,\
inspect.isclass) if k[1].__module__ == "CCParser"+m_package]
# this also instantiates!!
self.methods = [getattr(m, mname)(self.config) for mname in self.method_names]
def setupLogger(self):
"""Initiate logger for CCParser.Parser"""
# Set main logger's minimum output level
self.logger.setLevel(logging.INFO)
# Set up Formatter
# p_fmt = logging.Formatter("[results.%(Parsed)s] Parsed %(message)s")
#
# This is abusing the Formatter class a bit, but I wanted to avoid
# one Logger for every format, maybe I'll change this in the future.
p_fmt = GenFormatter(
{logging.INFO: "[results.%(Parsed)s] Parsed %(message)s",
logging.WARNING: "==[%(asctime)s]== %(message)s",
logging.ERROR: "%(message)s"})
# Set up Handlers
if self.config['to_file']:
fh = logging.FileHandler(self.config['log_file'])
fh.setLevel(logging.INFO)
fh.setFormatter(p_fmt)
self.logger.addHandler(fh)
if self.config['to_console']:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(p_fmt)
self.logger.addHandler(ch)
# No output in case both booleans are False
if not any([self.config['to_console'], self.config['to_file']]):
self.logger.setLevel(logging.CRITICAL)
def loggerCleanUp(self):
"""In order to avoid multiplying handlers. """
for i in range(len(self.logger.handlers)):
self.logger.handlers.pop()
def set_missing_keys(self):
"""Set default values for keywords that have not been found."""
# use V.fde_expansion as an indicaotr whether or not an FDE calculation
# was requested
# if hasattr(self.results, V.fde_expansion):
# if not hasattr(self.results, V.fde_isA_imported):
# container = ParseContainer(0, False)
# setattr(self.results, V.fde_isA_imported, container)
# self.logger.info("whether FDET program imports rhoA_ref",
# extra={"Parsed":V.fde_isA_imported})
# if not hasattr(self.results, V.fde_isB_imported):
# container = ParseContainer(0, False)
# setattr(self.results, V.fde_isB_imported, container)
# self.logger.info("whether FDET program imports rhoB",
# extra={"Parsed":V.fde_isB_imported})
if not hasattr(self.results, V.has_finished):
container = ParseContainer(0, False)
setattr(self.results, V.has_finished, container)
self.logger.warning("Output indicates abnormal exit.")
def dump_json(self, fname="CCParser.json"):
"""Dumps contens of the CCParser.results container to a JSON file.
Parameters
----------
fname : str
Filename to dump to.
"""
with open(fname, "w") as pdump:
json.dump(self.results, pdump, cls=StructEncoder)
self.logger.warning("Dumped CCParser.results to JSON file.")
def find_software(self):
with open(self.f_output) as f:
for n, line in enumerate(f):
if n > Parser.FIND_MAX_LINES:
err_str = "Could not determine software within {0} lines!".format(
Parser.FIND_MAX_LINES)
raise IndexError(err_str)
if is_qchem(line):
self.software = "qchem"
break
elif is_gaussian(line):
self.software = "gaussian"
break
elif is_molcas(line):
self.software = "molcas"
break
elif is_turbomole(line):
self.software = "turbomole"
break
elif is_psi4(line):
self.software = "psi"
break
self.logger.warning("Automatically determined software is {0}".format(self.software))
def is_qchem(line):
hooks = ["Welcome to Q-Chem",
"A Quantum Leap Into The Future Of Chemistry"
]
# for now only use first hook to save time
return hooks[0] in line
def is_gaussian(line):
hooks = ["Gaussian 88(TM) system (copyright 1988, Gaussian, Inc.)"]
return hooks[0] in line
def is_molcas(line):
return False
def is_turbomole(line):
return False
def is_psi4(line):
return False
| spectre007/CCParser | Parser.py | Python | mit | 11,057 | [
"Gaussian",
"MOLCAS",
"Psi4",
"Q-Chem",
"TURBOMOLE"
] | da74ca20de29cc28dbfb82d708cbb073785e57f02b5a4bddb4267f57e2812331 |
"""Example to solve 3D aqueous foam pipe flow using rheological
Herschel-Bulkley power law for bulk and wall shear stress dependent
slip velocity law for wall layer
"""
import numpy as np
import foam_controlwrapper
from simphony.core.cuba import CUBA
from simphony.api import CUDS, Simulation
from simphony.cuds.meta import api
from simphony.engine import EngineInterface
from mayavi.scripts import mayavi2
import slit_mesh
import tempfile
import time
start = time.time()
case_name = 'aqueous_foam'
mesh_name = 'aqueous_foam_mesh'
cuds = CUDS(name=case_name)
# physics model
cfd = api.Cfd(name='cfd model')
# these are already bt default set in CFD
cfd.thermal_model = api.IsothermalModel(name='isothermal')
cfd.turbulence_model = api.LaminarFlowModel(name='laminar')
cfd.compressibility_model = api.IncompressibleFluidModel(name='incompressible')
# material
foam = api.Material(name='foam')
foam.data[CUBA.DENSITY] = 250.0
foam.data[CUBA.DYNAMIC_VISCOSITY] = 4.37 # initial_viscosity of HB model
cuds.add([foam])
# use Herschel Bulkley viscosity model for aqueous foam
hb = api.HerschelBulkleyModel(name='foam_rheology')
hb.initial_viscosity = 0.01748 * foam.data[CUBA.DENSITY]
hb.relaxation_time = 0.0148 * foam.data[CUBA.DENSITY]
hb.linear_constant = 0.00268 * foam.data[CUBA.DENSITY]
hb.power_law_index = 0.5
hb.material = cuds.get_by_name('foam').uid
cfd.rheology_model = hb
cuds.add([cfd])
sol_par = api.SolverParameter(name='steady_state')
sol_par.data[CUBA.STEADY_STATE] = True
cuds.add([sol_par])
end = time.time()
print "Time spend in initialization: ", end-start
start = time.time()
# create computational mesh
mesh = foam_controlwrapper.create_block_mesh(tempfile.mkdtemp(), mesh_name,
slit_mesh.blockMeshDict)
end = time.time()
print "Time spend in blockmesh: ", end-start
start = time.time()
cuds.add([mesh])
end = time.time()
print "Time spend in add mesh to cuds: ", end-start
start = time.time()
# boundary conditions
vel_inlet = api.Dirichlet(foam, name='vel_inlet')
vel_inlet.data[CUBA.VARIABLE] = CUBA.VELOCITY
vel_inlet.data[CUBA.VELOCITY] = (0.53, 0, 0)
pres_inlet = api.Neumann(foam, name='pres_inlet')
pres_inlet.data[CUBA.VARIABLE] = CUBA.PRESSURE
vel_outlet = api.Neumann(foam, name='vel_outlet')
vel_outlet.data[CUBA.VARIABLE] = CUBA.VELOCITY
pres_outlet = api.Dirichlet(foam, name='pres_outlet')
pres_outlet.data[CUBA.VARIABLE] = CUBA.PRESSURE
pres_outlet.data[CUBA.PRESSURE] = 0.0
vel_walls = api.ShearStressPowerLawSlipVelocity(foam,
density=250.0,
linear_constant=3.1e-3,
power_law_index=1.16,
name='vel_walls')
vel_walls.data[CUBA.VARIABLE] = CUBA.VELOCITY
pres_walls = api.Neumann(name='pres_walls')
pres_walls.data[CUBA.VARIABLE] = CUBA.PRESSURE
vel_frontAndBack = api.Empty(name='vel_frontAndBack')
vel_frontAndBack.data[CUBA.VARIABLE] = CUBA.VELOCITY
pres_frontAndBack = api.Empty(name='pres_frontAndBack')
pres_frontAndBack.data[CUBA.VARIABLE] = CUBA.PRESSURE
inlet = api.Boundary(name='inlet', condition=[vel_inlet, pres_inlet])
walls = api.Boundary(name='walls', condition=[vel_walls, pres_walls])
outlet = api.Boundary(name='outlet', condition=[vel_outlet, pres_outlet])
frontAndBack = api.Boundary(name='frontAndBack',
condition=[vel_frontAndBack, pres_frontAndBack])
cuds.add([inlet, walls, outlet, frontAndBack])
end = time.time()
print "Time spend in boundary settings: ", end-start
start = time.time()
sim = Simulation(cuds, 'OpenFOAM', engine_interface=EngineInterface.Internal)
end = time.time()
print "Time spend in Simulation initialization: ", end-start
# time setting
sim_time = api.IntegrationTime(name='simulation_time',
current=0.0,
final=67,
size=1)
cuds.add([sim_time])
sim.run()
mesh_in_engine = cuds.get_by_name(mesh_name)
sm = api.MesoscopicStressModel(name='meso_stress_model')
cuds.add([sm])
cuds.remove('simulation_time')
sim_time = api.IntegrationTime(name='simulation_time',
current=0.0,
final=1,
size=1)
cuds.add([sim_time])
start = time.time()
number_of_outer_timesteps = 20
for time_i in range(number_of_outer_timesteps):
updated_cells = []
for cell in mesh_in_engine._iter_cells():
strain = cell.data[CUBA.STRAIN_TENSOR]
strain_rate = np.linalg.norm(strain)
nu = min(hb.initial_viscosity,
(hb.relaxation_time +
hb.linear_constant*pow(strain_rate, hb.power_law_index))
/ (max(strain_rate, 1.0e-6))
)
hb_stress = [nu*sri for sri in strain]
cell.data[CUBA.HOMOGENIZED_STRESS_TENSOR] = hb_stress
updated_cells.append(cell)
mesh_in_engine._update_cells(updated_cells)
# solve macroscopic scale
print "Solve cfd"
sim.run()
print "Time: ", mesh_in_engine._time
print "Mesoscale as analytic coupling"
print " Update stress"
end = time.time()
print "Time spend in run: ", end-start
start = time.time()
print "Working directory ", mesh_in_engine.path
average_pressure = 0.0
for cell in mesh_in_engine.get_boundary_cells(inlet.name):
average_pressure += cell.data[CUBA.PRESSURE]
average_pressure /= len(mesh_in_engine._boundaries[inlet.name])
end = time.time()
print "Time spend in post processing: ", end-start
print "Average pressure on inlet: ", average_pressure
@mayavi2.standalone
def view():
from mayavi.modules.surface import Surface
from simphony_mayavi.sources.api import CUDSSource
mayavi.new_scene() # noqa
src = CUDSSource(cuds=mesh_in_engine)
mayavi.add_source(src) # noqa
s = Surface()
mayavi.add_module(s) # noqa
if __name__ == '__main__':
view()
| simphony/simphony-openfoam | foam_internalwrapper/examples/aqueous_foam_slit_flow_steady_state_meso_coupling.py | Python | gpl-2.0 | 6,010 | [
"Mayavi"
] | 5af93565b20ee8f3e498415a1b28dc42ff95bbece44f174c11097e6caab53f86 |
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XSF file utilities.
"""
import numpy as np
import re
import ase.io
def read_xsf(xsfile):
"""Return an atoms with energy and forces for the aenet xsfile."""
atoms = ase.io.read(xsfile)
calc = atoms.get_calculator()
with open(xsfile, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if line.startswith('# total energy'):
m = re.findall(r'[-+]?\d*\.\d+|\d+', line)
energy = float(m[0])
break
calc.results['energy'] = energy
forces = []
with open(xsfile, 'r') as f:
while True:
l = f.readline()
if not l:
break
if l.startswith('PRIMCOORD'):
break
count = int(f.readline().split()[0])
for i in range(count):
fields = f.readline().split()
forces += [[float(x) for x in fields[4:]]]
calc.results['forces'] = np.array(forces)
return atoms
def write_xsf(xsfile, atoms):
"""Create an aenet compatible xsf file in FNAME for ATOMS.
fname: a string for the filename.
atoms: an ase atoms object with an attached calculator containing energy and
forces.
returns the string that is written to the file.
"""
energy = atoms.get_potential_energy()
forces = atoms.get_forces()
xsf = ['# total energy = {} eV'.format(energy), '']
if True in atoms.pbc:
xsf += ['CRYSTAL', 'PRIMVEC']
for v in atoms.get_cell():
xsf += ['{} {} {}'.format(*v)]
xsf += ['PRIMCOORD', '{} 1'.format(len(atoms))]
else:
xsf += ['ATOMS']
S = ('{atom.symbol:<3s} {atom.x: .12f} {atom.y: .12f} {atom.z: .12f} {f[0]: '
'.12f} {f[1]: .12f} {f[2]: .12f}')
xsf += [S.format(atom=atom, f=forces[i]) for i, atom in enumerate(atoms)]
output = '\n'.join(xsf)
with open(xsfile, 'w') as f:
f.write(output)
return output
| google/differentiable-atomistic-potentials | dap/py/xsf.py | Python | apache-2.0 | 2,354 | [
"ASE",
"CRYSTAL"
] | 461c785ced069e7d1858fbe69dc6e280c821e77ec76dcf9cf55949d4e74171e2 |
#!/usr/bin/env python
import unittest
from decimal import Decimal
from traversal import Node, visit, avg
def d(float_value):
assert float_value is not None
return Decimal(str(float_value))
class NodeTest(unittest.TestCase):
def test_init_node(self):
node = Node(43)
self.assertEqual(node.value, 43)
def test_connect(self):
n1 = Node(1)
n2 = Node(2)
n3 = Node(3)
n1.connect(n2, n3)
self.assertEqual(n1.connections, {n2, n3})
self.assertEqual(n2.connections, {n1})
self.assertEqual(n3.connections, {n1})
class TestTraverse(unittest.TestCase):
def test_visit(self):
n1 = Node(1)
n2 = Node(2)
n3 = Node(3)
n4 = Node(4)
n5 = Node(5)
n1.connect(n2, n3)
n2.connect(n3, n4, n5)
n3.connect(n1, n5)
unique_nodes = list(visit(n1))
self.assertEqual(len(unique_nodes), 5)
def test_avg(self):
n1 = Node(1)
n2 = Node(2)
n3 = Node(3)
n4 = Node(4)
n5 = Node(5)
n6 = Node(6)
n1.connect(n2, n3)
n3.connect(n4)
n4.connect(n5)
self.assertEqual(
d(avg(n2)),
d(n1.value + n2.value + n3.value + n4.value + n5.value) / 5)
self.assertEqual(d(avg(n6)), d(n6.value))
if __name__ == '__main__':
unittest.main()
| satyrius/traversal | test.py | Python | mit | 1,389 | [
"VisIt"
] | 33f64c750937cddff0c7859568997b6541dfd13e9f3eab12fe42846982fb0228 |
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_pkiprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of PKIProfile Avi RESTful Object
description:
- This module is used to configure PKIProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
ca_certs:
description:
- List of certificate authorities (root and intermediate) trusted that is used for certificate validation.
created_by:
description:
- Creator name.
crl_check:
description:
- When enabled, avi will verify via crl checks that certificates in the trust chain have not been revoked.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
crls:
description:
- Certificate revocation lists.
ignore_peer_chain:
description:
- When enabled, avi will not trust intermediate and root certs presented by a client.
- Instead, only the chain certs configured in the certificate authority section will be used to verify trust of the client's cert.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
name:
description:
- Name of the pki profile.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
validate_only_leaf_crl:
description:
- When enabled, avi will only validate the revocation status of the leaf certificate using crl.
- To enable validation for the entire chain, disable this option and provide all the relevant crls.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create PKIProfile object
avi_pkiprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_pkiprofile
"""
RETURN = '''
obj:
description: PKIProfile (api/pkiprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
ca_certs=dict(type='list',),
created_by=dict(type='str',),
crl_check=dict(type='bool',),
crls=dict(type='list',),
ignore_peer_chain=dict(type='bool',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
validate_only_leaf_crl=dict(type='bool',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'pkiprofile',
set([]))
if __name__ == '__main__':
main()
| adityacs/ansible | lib/ansible/modules/network/avi/avi_pkiprofile.py | Python | gpl-3.0 | 4,715 | [
"VisIt"
] | c9e454da626d90b7d4ae84c69e66bf9b41a8a7830b69dfb233da608c89c58c9a |
#!/usr/bin/env python3
"""
Start the atram preprocessor.
This wrapper module parses the input arguments and passes them to the module
that does the actual preprocessing (core_preprocessor.py).
"""
import argparse
import os
import textwrap
from datetime import date
from glob import glob
from itertools import chain
from os.path import join
import lib.blast as blast
import lib.db as db
import lib.util as util
from lib.core_preprocessor import preprocess
def parse_command_line():
"""Process command-line arguments."""
description = """
This script prepares data for use by the atram.py
script. It takes fasta or fastq files of paired-end (or
single-end) sequence reads and creates a set of atram
databases.
You need to prepare the sequence read archive files so that the
header lines contain only a sequence ID with the optional
paired-end suffix at the end of the header line. The separator
for the optional trailing paired-end suffix may be a space,
a slash "/", a dot ".", or an underscore "_".
For example:
>DBRHHJN1:427:H9YYAADXX:1:1101:10001:77019/1
GATTAA...
>DBRHHJN1:427:H9YYAADXX:1:1101:10001:77019/2
ATAGCC...
>DBRHHJN1:427:H9YYAADXX:1:1101:10006:63769/2
CGAAAA...
"""
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@', description=textwrap.dedent(description))
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(db.ATRAM_VERSION))
parser.add_argument(
'--end-1', '-1', metavar='FASTA/Q', action='append',
help="""Sequence read archive files that have only end 1 sequences. The
sequence names do not need an end suffix, we will assume the suffix
is always 1. The files are in fasta or fastq format. You may
repeat this argument or use wildcards.
""")
parser.add_argument(
'--end-2', '-2', metavar='FASTA/Q', action='append',
help="""Sequence read archive files that have only end 2 sequences.
The sequence names do not need an end suffix, we will assume the
suffix is always 2. The files are in fasta or fastq format. You
may repeat this argument or use wildcards.
""")
parser.add_argument(
'--mixed-ends', '-m', metavar='FASTA/Q', action='append',
help="""Sequence read archive files that have a mix of both end 1 and
end 2 sequences (or single ends). The files are in fasta or fastq
format. You may repeat this argument or use wildcards.
""")
parser.add_argument(
'--single-ends', '-0', metavar='FASTA/Q', action='append',
help="""Sequence read archive files that have only unpaired sequences.
Any sequence suffix will be ignored. The files are in fasta or
fastq format. You may repeat this argument or use wildcards.
""")
group = parser.add_argument_group('preprocessor arguments')
blast_db = join('.', 'atram_' + date.today().isoformat())
group.add_argument(
'-b', '--blast-db', '--db', default=blast_db, metavar='DB',
help="""This is the prefix of all of the blast database files. So you
can identify different blast database sets. You may include a
directory as part of the prefix. (default %(default)s)
""".format(blast_db))
cpus = min(10, os.cpu_count() - 4 if os.cpu_count() > 4 else 1)
group.add_argument(
'--cpus', '--processes', '--max-processes', type=int, default=cpus,
help="""Number of CPU threads to use. (default %(default)s)
""".format(cpus))
group.add_argument(
'-t', '--temp-dir', metavar='DIR',
help="""Place temporary files in this directory. All files will be
deleted after aTRAM completes. The directory must exist.""")
group.add_argument(
'--keep-temp-dir', action='store_true',
help="""This flag will keep the temporary files in the --temp-dir
around for debugging.""")
group.add_argument('-l', '--log-file', help="""Log file (full path).""")
group.add_argument(
'--log-level', choices=['debug', 'info', 'error', 'fatal'],
default='info',
help="""Log messages of the given level (or above). 'debug' shows the
most messages and 'fatal' shows the least.
(default %(default)s)""")
group.add_argument(
'-s', '--shards', '--number', type=int, metavar='SHARDS',
dest='shard_count',
help="""Number of blast DB shards to create. The default is to have
each shard contain roughly 250MB of sequence data.""")
group.add_argument(
'--path',
help="""If makeblastdb is not in your $PATH then use this to prepend
directories to your path.""")
group.add_argument(
'--fasta', action='store_true',
help="""Are these fasta files? If you do not specify either --fasta or
--fastq then aTRAM will guess the file type by looking at the last
character of the file name.""")
group.add_argument(
'--fastq', action='store_true',
help="""Are these fastq files? If you do not specify either --fasta or
--fastq then aTRAM will guess the file type by looking at the last
character of the file name.""")
group.add_argument(
'--gzip', action='store_true',
help="""Are these gzip files?""")
group.add_argument(
'--bzip', action='store_true',
help="""Are these bzip files?""")
group.add_argument(
'--shuffle', action='store_true',
help="""Shuffle sequences before putting them into blast files?""")
args = vars(parser.parse_args())
# Prepend to PATH environment variable if requested
if args['path']:
os.environ['PATH'] = '{}:{}'.format(args['path'], os.environ['PATH'])
all_files = []
for ends in ['mixed_ends', 'end_1', 'end_2', 'single_ends']:
if args.get(ends):
end_files = [glob(p) for p in args[ends]]
end_files = sorted(list(chain.from_iterable(end_files)))
args[ends] = end_files
all_files.extend(end_files)
args['shard_count'] = blast.default_shard_count(args, all_files)
blast.make_blast_output_dir(args['blast_db'])
blast.find_program('makeblastdb')
util.temp_dir_exists(args['temp_dir'])
return args
if __name__ == '__main__':
ARGS = parse_command_line()
preprocess(ARGS)
| juliema/aTRAM | atram_preprocessor.py | Python | bsd-3-clause | 6,668 | [
"BLAST"
] | 3086528a707dbfe8cd6038a457c88d8a31e875975b336e6dc5dc75c43f475252 |
"""
"""
import pickle
import numpy as np
from mayavi import mlab
def plot_catalogue(ax, pickle_fname, vsc):
cat = pickle.load(open(pickle_fname, 'rb'))
ax.scatter(cat.data['longitude'], cat.data['latitude'],
cat.data['depth']*vsc,
s=cat.data['magnitude'], c=cat.data['depth'])
def plot_mesh(ax, msh, vsc, lw=1, color='green'):
"""
:param numpy.ndarray msh:
:param float vsc:
"""
for i in range(0, msh.shape[0] - 1):
for j in range(0, msh.shape[1] - 1):
xt = [msh[i, j, 0], msh[i + 1, j, 0], msh[i + 1, j + 1, 0],
msh[i, j + 1, 0], msh[i, j, 0]]
yt = [msh[i, j, 1], msh[i + 1, j, 1], msh[i + 1, j + 1, 1],
msh[i, j + 1, 1], msh[i, j, 1]]
zt = [msh[i, j, 2] * vsc, msh[i + 1, j, 2] * vsc,
msh[i + 1, j + 1, 2] * vsc,
msh[i, j + 1, 2] * vsc, msh[i, j, 2] * vsc]
if all(np.isfinite(xt)):
ax.plot(xt, yt, zt, color=color, linewidth=lw)
def plot_mesh_mayavi(msh, vsc, lw=2, color=(1, 0, 0)):
"""
:param numpy.ndarray msh:
:param float vsc:
"""
for i in range(0, msh.shape[0] - 1):
for j in range(0, msh.shape[1] - 1):
xt = [msh[i, j, 0], msh[i + 1, j, 0], msh[i + 1, j + 1, 0],
msh[i, j + 1, 0], msh[i, j, 0]]
yt = [msh[i, j, 1], msh[i + 1, j, 1], msh[i + 1, j + 1, 1],
msh[i, j + 1, 1], msh[i, j, 1]]
zt = [msh[i, j, 2] * vsc, msh[i + 1, j, 2] * vsc,
msh[i + 1, j + 1, 2] * vsc,
msh[i, j + 1, 2] * vsc, msh[i, j, 2] * vsc]
if all(np.isfinite(xt)):
mlab.plot3d(xt, yt, zt, color=color, line_width=lw)
| GEMScienceTools/oq-subduction | openquake/sub/plotting/tools.py | Python | agpl-3.0 | 1,766 | [
"Mayavi"
] | 3fe4c3f5cf416b49633e35a60a820ff21a710f0c6ba8b968761dd1102981c606 |
#!/usr/bin/python
#
# Copyright 2017 "OVS Performance" Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Files name:
# ovs_performance.py
#
# Description:
# Simple script to run the OVS performance tests
#
# Author:
# Eelco Chaudron
#
# Initial Created:
# 17 January 2017
#
# Notes:
# - Install the spur python module
# dnf install python-spur
# - Install the XenaPythonLib from https://github.com/fleitner/XenaPythonLib
# cd XenaPythonLib/
# sudo python setup.py install
# - Install natsort and enum modules
# pip install natsort enum34
# - Install matplotlib
# dnf install python-matplotlib
# - Install latest Scapy
# pip install scapy
# - Install netaddr
# pip install netaddr
# - Install packaging (python3)
# pip install packaging
#
# Example:
#
#
# TODOs:
# - Add tunnel test cases (Geneve and VXLAN)
# - Add check after test to see all OF flows got packets (n_packets != 0)
# - Add option to stop trying more packet sizes once maximum performance
# of link is reached (i.e. two consecutive runs @ wire speed)
# - Add option to maximize traffic rate (PPS, and/or % based on port speed)
# - Add some VLAN test cases
# - Add a Bi-directional PVP test [phy0-vf0-VM-vf1-phy1]
# - Add option to run traffic part multiple(3) times to calculate deviation,
# and add error bars to the graphs
#
#
# Imports
#
import argparse
import csv
import datetime
import inspect
import logging
import numpy as np
import os
import re
import spur
import sys
import time
#
# Imports from simpel shell API
#
from dut_ssh_shell import DutSshShell
#
# Import general traffic_generator library
#
from traffic_generator_base import TrafficFlowType
from traffic_generator import TrafficGenerator, TrafficGeneratorType
#
# Imports from natural sort
#
from natsort import natsorted
#
# Imports from packaging or distutils
#
if sys.version_info[0] == 2:
from distutils.version import StrictVersion as Version
else:
from packaging.version import Version
#
# Imports from Matplot, by default disable the tk interface
#
import matplotlib
matplotlib.use('Agg')
# In Python 2, raw_input() returns a string, and input() tries
# to run the input as a Python expression.
# Since getting a string was almost always what we wanted,
# Python 3 does that with input()
# The following line checks the Python version being used to
# stick to raw_input() for Python2 and input() for Python3
if sys.version_info[0] == 3:
raw_input = input
#
# Default configuration
#
DEFAULT_TESTER_TYPE = 'xena'
DEFAULT_TESTER_SERVER_ADDRESS = ''
DEFAULT_TESTER_INTERFACE = ''
DEFAULT_SECOND_TESTER_INTERFACE = ''
DEFAULT_DUT_ADDRESS = ''
DEFAULT_DUT_LOGIN_USER = 'root'
DEFAULT_DUT_LOGIN_PASSWORD = 'root'
DEFAULT_DUT_VM_ADDRESS = ''
DEFAULT_DUT_SECOND_VM_ADDRESS = ''
DEFAULT_DUT_VM_NIC_PCI_ADDRESS = ''
DEFAULT_DUT_VM_LOGIN_USER = 'root'
DEFAULT_DUT_VM_LOGIN_PASSWORD = 'root'
DEFAULT_PHYSICAL_INTERFACE = ''
DEFAULT_SECOND_PHYSICAL_INTERFACE = ''
DEFAULT_PACKET_LIST = '64, 128, 256, 512, 768, 1024, 1514'
DEFAULT_VIRTUAL_INTERFACE = ''
DEFAULT_SECOND_VIRTUAL_INTERFACE = ''
DEFAULT_RUN_TIME = 20
DEFAULT_STREAM_LIST = '10, 1000, 10000, 100000, 1000000'
DEFAULT_BRIDGE_NAME = 'ovs_pvp_br0'
DEFAULT_WARM_UP_TIMEOUT = 360
DEFAULT_DST_MAC_ADDRESS = '00:00:02:00:00:00'
DEFAULT_SRC_MAC_ADDRESS = '00:00:01:00:00:00'
#
# Run simple traffic test Virtual to Virtual
#
def test_v2v(nr_of_flows, packet_sizes):
v2v_tx_results = list()
v2v_rx_results = list()
cpu_results = list()
for packet_size in packet_sizes:
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] START".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
##################################################
lprint(" * Create OVS OpenFlow rules...")
create_ovs_of_rules(nr_of_flows,
of_interfaces[config.virtual_interface],
of_interfaces[config.second_virtual_interface])
##################################################
lprint(" * Start packet receiver on second VM...")
start_traffic_rx_on_vm(config.dut_second_vm_address,
config.dut_second_vm_nic_pci)
##################################################
lprint(" * Start CPU monitoring on DUT...")
start_cpu_monitoring()
##################################################
lprint(" * Start packet generation for {0} seconds...".format(
config.run_time))
start_traffic_tx_on_vm(config.dut_vm_address,
nr_of_flows, packet_size)
time.sleep(config.run_time)
##################################################
lprint(" * Stop CPU monitoring on DUT...")
stop_cpu_monitoring()
##################################################
lprint(" * Stopping packet stream on VM1...")
stop_traffic_tx_on_vm(config.dut_vm_address)
##################################################
lprint(" * Stop packet receiver on VM2...")
stop_traffic_rx_on_vm(config.dut_second_vm_address)
##################################################
lprint(" * Gathering statistics...")
of_dump_port_to_logfile(config.bridge_name)
vm_pkts_sec = get_traffic_rx_stats_from_vm(
config.dut_second_vm_address)
vm_tx_pkts_sec = get_traffic_tx_stats_from_vm(config.dut_vm_address)
lprint(" - Transmit rate on VM: {:,} pps".format(vm_tx_pkts_sec))
lprint(" ! Result, average: {:,} pps".format(vm_pkts_sec))
cpu_results.append(get_cpu_monitoring_stats())
v2v_tx_results.append(vm_tx_pkts_sec)
v2v_rx_results.append(vm_pkts_sec)
##################################################
lprint(" * Restoring state for next test...")
# dut_shell.dut_exec('sh -c "ovs-ofctl del-flows {0} && '
# 'ovs-appctl dpctl/del-flows"'.\
# format(config.bridge_name),
# die_on_error=True)
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] END".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
flow_str = get_flow_type_short()
flow_file_str = get_flow_type_name()
create_multiple_graph(packet_sizes, {'Send Rate': v2v_tx_results,
'Receive Rate': v2v_rx_results},
"Packet size", "Packets/second",
"Virtual to Virtual with {} {} flows".format(
nr_of_flows, flow_str),
"test_v2v_{}_{}".format(nr_of_flows, flow_file_str),
None,
cpu_utilization={'Receive Rate': cpu_results})
create_multiple_graph(packet_sizes, {'Send Rate': v2v_tx_results,
'Receive Rate': v2v_rx_results},
"Packet size", "Packets/second",
"Virtual to Virtual with {} {} flows".format(
nr_of_flows, flow_str),
"test_v2v_{}_{}_ref".format(nr_of_flows,
flow_file_str),
[phy_speed], cpu_utilization={'Receive Rate':
cpu_results})
return v2v_rx_results, cpu_results
#
# Calculate loss percentage
#
def calc_loss_percentage(results):
value = 100 - (float(results["total_rx_pkts"])
/ float(results["total_tx_pkts"])
* 100)
return max(value, 0)
#
# Get the PVP results for a single binary search iteration
#
def PVP_binary_search_single_run(test_value, **kwargs):
packet_size = kwargs.get("packet_size", 64)
nr_of_streams = kwargs.get("nr_of_streams", 10)
results = test_p2v2p_single_packet_size(nr_of_streams, packet_size,
decrease_rate=100 - test_value)
results["traffic_rate"] = test_value
lprint(" > Zero pkt loss: pkt {}, load {:.6f}%, miss {:.6f}%".
format(packet_size, test_value,
calc_loss_percentage(results)))
return results
#
# Run the NFV mobile tests for a binary search iteration
#
def PVP_binary_search_itteration_result(result_values, test_value, **kwargs):
return calc_loss_percentage(result_values[test_value])
#
# binary search to find the highest value where the results are less or equal
# to the required_result.
#
# It will return all the data sets returned by the run_test_function,
# and index which one is matching the above. -1 means it was not found!
#
def binary_search(min_value, max_value, required_result,
run_test_function,
get_results_function,
**kwargs):
step = kwargs.pop("bs_step", 1)
results = dict()
#
# Need values from max to min, but in low to high order
#
values = np.arange(max_value, min_value - min(min_value, step), -step)
values = values[::-1]
if len(values) <= 1:
return results, -1
#
# Here we do a binary like search until the min and max values are one
# apart. When this happens we closed in to the highest possible value to
# get the results, or if both are not matching we can not achieve the
# requested.
#
current_min = 0
current_max = len(values) - 1
while True:
if current_min == current_max - 1:
break
current_test = int(current_min + ((current_max - current_min) / 2))
results[values[current_test]] = run_test_function(values[current_test],
**kwargs)
result = get_results_function(results, values[current_test], **kwargs)
if result > required_result:
current_max = current_test
else:
current_min = current_test
if not values[current_max] in results:
results[values[current_max]] = run_test_function(values[current_max],
**kwargs)
if get_results_function(results, values[current_max],
**kwargs) <= required_result:
return results, values[current_max]
if not values[current_min] in results:
results[values[current_min]] = run_test_function(values[current_min],
**kwargs)
if get_results_function(results, values[current_min],
**kwargs) <= required_result:
return results, values[current_min]
return results, -1
#
# Run simple traffic test Physical to VM back to Physical
#
def test_p2v2p_single_packet_size(nr_of_flows, packet_size, **kwargs):
decrease_rate = kwargs.get("decrease_rate", 0)
assert (decrease_rate >= 0 or decrease_rate < 100)
decrease_rate *= 10000
results = dict()
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2}, rate={3:.2f}%)] START".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size, (1000000 - decrease_rate) / 10000))
##################################################
lprint(" * Create OVS OpenFlow rules...")
create_ovs_bidirectional_of_rules(nr_of_flows,
of_interfaces[config.physical_interface],
of_interfaces[config.virtual_interface])
##################################################
lprint(" * Initializing packet generation...")
tester.configure_traffic_stream(config.tester_interface,
get_traffic_generator_flow(),
nr_of_flows, packet_size,
traffic_dst_mac=config.dst_mac_address,
traffic_src_mac=config.src_mac_address,
percentage=1000000 - decrease_rate,
random_payload=config.payload_packet_random
)
##################################################
if config.warm_up:
lprint(" * Doing flow table warm-up...")
start_vm_time = datetime.datetime.now()
start_traffic_loop_on_vm(config.dut_vm_address,
config.dut_vm_nic_pci)
tester.start_traffic(config.tester_interface)
warm_up_done = warm_up_verify(nr_of_flows * 2,
config.warm_up_timeout)
tester.stop_traffic(config.tester_interface)
if not warm_up_done and not config.warm_up_no_fail:
sys.exit(-1)
##################################################
lprint(" * Clear all statistics...")
tester.clear_statistics(config.tester_interface)
pp_tx_start, pp_tx_drop_start, pp_rx_start, pp_rx_drop_start \
= get_of_port_packet_stats(of_interfaces[config.physical_interface])
vp_tx_start, vp_tx_drop_start, vp_rx_start, vp_rx_drop_start \
= get_of_port_packet_stats(of_interfaces[config.virtual_interface])
##################################################
if not config.warm_up:
lprint(" * Start packet receiver on VM...")
start_traffic_loop_on_vm(config.dut_vm_address,
config.dut_vm_nic_pci)
warm_up_time = 0
else:
# warm_up_time is the total time it takes from the start of the
# VM at warm-up till we would normally start the loop back VM.
# This values is used to remove warm-up statistics.
warm_up_time = int(np.ceil((datetime.datetime.now()
- start_vm_time).total_seconds()))
lprint(" * Determine warm op time, {} seconds...".
format(warm_up_time))
##################################################
lprint(" * Start CPU monitoring on DUT...")
start_cpu_monitoring()
##################################################
lprint(" * Start packet generation for {0} seconds...".format(
config.run_time))
tester.start_traffic(config.tester_interface)
for i in range(1, config.run_time):
time.sleep(1)
tester.take_rx_statistics_snapshot(config.tester_interface)
##################################################
lprint(" * Stop CPU monitoring on DUT...")
stop_cpu_monitoring()
##################################################
lprint(" * Stopping packet stream...")
tester.stop_traffic(config.tester_interface)
time.sleep(1)
##################################################
lprint(" * Stop packet receiver on VM...")
stop_traffic_loop_on_vm(config.dut_vm_address)
##################################################
lprint(" * Gathering statistics...")
tester.take_statistics_snapshot(config.tester_interface)
full_tx_stats = tester.get_tx_statistics_snapshots(config.tester_interface)
full_rx_stats = tester.get_rx_statistics_snapshots(config.tester_interface)
slogger.debug(" full_tx_stats={}".format(full_tx_stats))
slogger.debug(" full_rx_stats={}".format(full_rx_stats))
pp_tx_end, pp_tx_drop_end, pp_rx_end, pp_rx_drop_end \
= get_of_port_packet_stats(of_interfaces[config.physical_interface])
vp_tx_end, vp_tx_drop_end, vp_rx_end, vp_rx_drop_end \
= get_of_port_packet_stats(of_interfaces[config.virtual_interface])
pp_rx = pp_rx_end - pp_rx_start
pp_tx = pp_tx_end - pp_tx_start
pp_rx_drop = pp_rx_drop_end - pp_rx_drop_start
pp_tx_drop = pp_tx_drop_end - pp_tx_drop_start
vp_rx = vp_rx_end - vp_rx_start
vp_tx = vp_tx_end - vp_tx_start
vp_rx_drop = vp_rx_drop_end - vp_rx_drop_start
vp_tx_drop = vp_tx_drop_end - vp_tx_drop_start
vm_pkts_sec = get_traffic_rx_stats_from_vm(config.dut_vm_address,
skip_samples=warm_up_time)
packets_tx = full_tx_stats[sorted(
full_tx_stats.keys())[-1]]['pt_total']['packets']
packets_rx = full_rx_stats[sorted(
full_rx_stats.keys())[-1]]['pr_total']['packets']
lprint(" - Packets send by Tester : {:-20,}".format(packets_tx))
lprint(" - Packets received by physical: {:-20,} [Lost {:,}, Drop "
"{:,}]".format(pp_rx, packets_tx - pp_rx, pp_rx_drop))
lprint(" - Packets received by virtual : {:-20,} [Lost {:,}, Drop "
"{:,}]".format(vp_tx, pp_rx - vp_tx, vp_tx_drop))
lprint(" - Packets send by virtual : {:-20,} [Lost {:,}, Drop "
"{:,}]".format(vp_rx, vp_tx - vp_rx, vp_rx_drop))
lprint(" - Packets send by physical : {:-20,} [Lost {:,}, Drop "
"{:,}]".format(pp_tx, vp_rx - pp_tx, pp_tx_drop))
lprint(" - Packets received by Tester : {:-20,} [Lost {:,}]".
format(packets_rx, pp_tx - packets_rx))
lprint(" - Receive rate on VM: {:,} pps".format(vm_pkts_sec))
rx_pkts_sec = get_packets_per_second_from_traffic_generator_rx_stats(
full_rx_stats)
lprint(" ! Result, average: {:,} pps".format(rx_pkts_sec))
##################################################
lprint(" * Restoring state for next test...")
tester.unconfigure_traffic_stream(config.tester_interface)
# dut_shell.dut_exec('sh -c "ovs-ofctl del-flows {0} && '
# 'ovs-appctl dpctl/del-flows"'.\
# format(config.bridge_name),
# die_on_error=True)
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] END".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
results["cpu_stats"] = get_cpu_monitoring_stats()
results["rx_packets_second"] = rx_pkts_sec
results["total_tx_pkts"] = packets_tx
results["total_rx_pkts"] = packets_rx
return results
#
# Run simple traffic test Physical to VM back to Physical
#
def test_p2v2p(nr_of_flows, packet_sizes):
p2v2p_results = list()
cpu_results = list()
for packet_size in packet_sizes:
results = test_p2v2p_single_packet_size(nr_of_flows, packet_size,
decrease_rate=100
- config.traffic_rate)
cpu_results.append(results["cpu_stats"])
p2v2p_results.append(results["rx_packets_second"])
create_single_graph(packet_sizes, p2v2p_results,
"Packet size", "Packets/second",
"Physical to Virtual back to Physical with {} {} "
"flows{}".format(nr_of_flows, get_flow_type_short(),
get_traffic_rate_str()),
"test_p2v2p_{}_{}".format(nr_of_flows,
get_flow_type_name()),
phy_speed,
cpu_utilization=cpu_results)
return p2v2p_results, cpu_results
#
# Run simple traffic test Physical to VM
#
def test_p2v(nr_of_flows, packet_sizes):
p2v_results = list()
cpu_results = list()
for packet_size in packet_sizes:
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2}, rate={3:.3f}%)]"
" START".format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size, config.traffic_rate))
##################################################
lprint(" * Create OVS OpenFlow rules...")
create_ovs_of_rules(nr_of_flows,
of_interfaces[config.physical_interface],
of_interfaces[config.virtual_interface])
##################################################
lprint(" * Initializing packet generation...")
tester.configure_traffic_stream(
config.tester_interface,
get_traffic_generator_flow(),
nr_of_flows, packet_size,
traffic_dst_mac=config.dst_mac_address,
traffic_src_mac=config.src_mac_address,
percentage=config.traffic_rate * 10000,
random_payload=config.payload_packet_random)
##################################################
if config.warm_up:
lprint(" * Doing flow table warm-up...")
tester.start_traffic(config.tester_interface)
warm_up_done = warm_up_verify(nr_of_flows, config.warm_up_timeout)
tester.stop_traffic(config.tester_interface)
if not warm_up_done and not config.warm_up_no_fail:
sys.exit(-1)
##################################################
lprint(" * Clear all statistics...")
tester.clear_statistics(config.tester_interface)
pp_rx_start = get_of_port_packet_stats(
of_interfaces[config.physical_interface])[2]
vp_tx_start, vp_tx_drop_start = get_of_port_packet_stats(
of_interfaces[config.virtual_interface])[0:2]
##################################################
lprint(" * Start packet receiver on VM...")
start_traffic_rx_on_vm(config.dut_vm_address,
config.dut_vm_nic_pci)
##################################################
lprint(" * Start CPU monitoring on DUT...")
start_cpu_monitoring()
##################################################
lprint(" * Start packet generation for {0} seconds...".format(
config.run_time))
tester.start_traffic(config.tester_interface)
for i in range(1, config.run_time):
time.sleep(1)
##################################################
lprint(" * Stop CPU monitoring on DUT...")
stop_cpu_monitoring()
##################################################
lprint(" * Stopping packet stream...")
tester.stop_traffic(config.tester_interface)
time.sleep(1)
##################################################
lprint(" * Stop packet receiver on VM...")
stop_traffic_rx_on_vm(config.dut_vm_address)
##################################################
lprint(" * Gathering statistics...")
tester.take_tx_statistics_snapshot(config.tester_interface)
full_tx_stats = tester.get_tx_statistics_snapshots(
config.tester_interface)
slogger.debug(" full_tx_stats={}".format(full_tx_stats))
pp_rx_end = get_of_port_packet_stats(
of_interfaces[config.physical_interface])[2]
vp_tx_end, vp_tx_drop_end = get_of_port_packet_stats(
of_interfaces[config.virtual_interface])[0:2]
pp_rx = pp_rx_end - pp_rx_start
vp_tx = vp_tx_end - vp_tx_start
vp_tx_drop = vp_tx_drop_end - vp_tx_drop_start
vm_pkts_sec = get_traffic_rx_stats_from_vm(config.dut_vm_address)
packets_tx = full_tx_stats[sorted(
full_tx_stats.keys())[-1]]['pt_total']['packets']
lprint(" - Packets send by Tester {:,}".format(packets_tx))
lprint(" - Packets received by physical port {:,} [Lost {:,}]".
format(pp_rx, packets_tx - pp_rx))
lprint(" - Packets received by virtual port {:,} [Lost {:,}]".
format(vp_tx, pp_rx - vp_tx))
lprint(" - Packets dropped by virtual port {:,}".
format(vp_tx_drop))
lprint(" ! Result, average: {:,} pps".format(vm_pkts_sec))
p2v_results.append(vm_pkts_sec)
cpu_results.append(get_cpu_monitoring_stats())
##################################################
lprint(" * Restoring state for next test...")
tester.unconfigure_traffic_stream(config.tester_interface)
# dut_shell.dut_exec('sh -c "ovs-ofctl del-flows {0} && '
# 'ovs-appctl dpctl/del-flows"'.\
# format(config.bridge_name),
# die_on_error=True)
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] END".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
create_single_graph(packet_sizes, p2v_results,
"Packet size", "Packets/second",
"Physical to Virtual with {} {} flows{}".
format(nr_of_flows, get_flow_type_short(),
get_traffic_rate_str()),
"test_p2v_{}_{}".
format(nr_of_flows, get_flow_type_name()),
phy_speed, cpu_utilization=cpu_results)
return p2v_results, cpu_results
#
# Run simple traffic test Physical to Physical
#
def test_p2p(nr_of_flows, packet_sizes):
p2p_results = list()
cpu_results = list()
for packet_size in packet_sizes:
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2}, rate={3:.3f}%))]"
" START".format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size, config.traffic_rate))
##################################################
lprint(" * Create OVS OpenFlow rules...")
create_ovs_of_rules(nr_of_flows,
of_interfaces[config.physical_interface],
of_interfaces[config.second_physical_interface])
##################################################
lprint(" * Initializing packet generation...")
tester.configure_traffic_stream(
config.tester_interface,
get_traffic_generator_flow(),
nr_of_flows, packet_size,
traffic_dst_mac=config.dst_mac_address,
traffic_src_mac=config.src_mac_address,
percentage=config.traffic_rate * 10000,
random_payload=config.payload_packet_random)
##################################################
if config.warm_up:
lprint(" * Doing flow table warm-up...")
tester.start_traffic(config.tester_interface)
warm_up_done = warm_up_verify(nr_of_flows, config.warm_up_timeout)
tester.stop_traffic(config.tester_interface)
if not warm_up_done and not config.warm_up_no_fail:
sys.exit(-1)
##################################################
lprint(" * Clear all statistics...")
tester.clear_statistics(config.tester_interface)
tester.clear_statistics(config.second_tester_interface)
pp_tx_start, pp_tx_drop_start, pp_rx_start, pp_rx_drop_start \
= get_of_port_packet_stats(
of_interfaces[config.physical_interface])
rpp_tx_start, rpp_tx_drop_start, rpp_rx_start, rpp_rx_drop_start \
= get_of_port_packet_stats(
of_interfaces[config.second_physical_interface])
##################################################
lprint(" * Start CPU monitoring on DUT...")
start_cpu_monitoring()
##################################################
lprint(" * Start packet generation for {0} seconds...".
format(config.run_time))
tester.start_traffic(config.tester_interface)
for i in range(1, config.run_time):
time.sleep(1)
tester.take_rx_statistics_snapshot(config.second_tester_interface)
##################################################
lprint(" * Stop CPU monitoring on DUT...")
stop_cpu_monitoring()
##################################################
lprint(" * Stopping packet stream...")
tester.stop_traffic(config.tester_interface)
time.sleep(1)
##################################################
lprint(" * Gathering statistics...")
tester.take_tx_statistics_snapshot(config.tester_interface)
tester.take_rx_statistics_snapshot(config.second_tester_interface)
full_tx_stats = tester.get_tx_statistics_snapshots(
config.tester_interface)
full_rx_stats = tester.get_rx_statistics_snapshots(
config.second_tester_interface)
slogger.debug(" full_tx_stats={}".format(full_tx_stats))
slogger.debug(" full_rx_stats={}".format(full_rx_stats))
pp_tx_end, pp_tx_drop_end, pp_rx_end, pp_rx_drop_end \
= get_of_port_packet_stats(
of_interfaces[config.physical_interface])
rpp_tx_end, rpp_tx_drop_end, rpp_rx_end, rpp_rx_drop_end \
= get_of_port_packet_stats(
of_interfaces[config.second_physical_interface])
pp_rx = pp_rx_end - pp_rx_start
pp_rx_drop = pp_rx_drop_end - pp_rx_drop_start
rpp_tx = rpp_tx_end - rpp_tx_start
rpp_tx_drop = rpp_tx_drop_end - rpp_tx_drop_start
packets_tx = full_tx_stats[sorted(
full_tx_stats.keys())[-1]]['pt_total']['packets']
packets_rx = full_rx_stats[sorted(
full_rx_stats.keys())[-1]]['pr_total']['packets']
lprint(" - Packets send by Tester : {:-20,}".format(
packets_tx))
lprint(" - Packets received by physical : {:-20,} [Lost {:,}, "
"Drop {:,}]".format(pp_rx, packets_tx - pp_rx, pp_rx_drop))
lprint(" - Packets send by second physical: {:-20,} [Lost {:,}, "
"Drop {:,}]".format(rpp_tx, pp_rx - rpp_tx, rpp_tx_drop))
lprint(" - Packets received by Tester : {:-20,} [Lost {:,}]".
format(packets_rx, rpp_tx - packets_rx))
rx_pkts_sec = get_packets_per_second_from_traffic_generator_rx_stats(
full_rx_stats)
lprint(" ! Result, average: {:,} pps".format(rx_pkts_sec))
p2p_results.append(rx_pkts_sec)
cpu_results.append(get_cpu_monitoring_stats())
##################################################
lprint(" * Restoring state for next test...")
tester.unconfigure_traffic_stream(config.tester_interface)
# dut_shell.dut_exec('sh -c "ovs-ofctl del-flows {0} && '
# 'ovs-appctl dpctl/del-flows"'.\
# format(config.bridge_name),
# die_on_error=True)
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] END".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
create_single_graph(packet_sizes, p2p_results,
"Packet size", "Packets/second",
"Physical to Physical with {} {} flows{}".
format(nr_of_flows, get_flow_type_short(),
get_traffic_rate_str()),
"test_p2p_{}_{}".
format(nr_of_flows, get_flow_type_name()),
phy_speed, cpu_utilization=cpu_results)
return p2p_results, cpu_results
#
# Run simple traffic test Physical loopback
#
def test_p_single_packet_size(nr_of_flows, packet_size, **kwargs):
decrease_rate = kwargs.get("decrease_rate", 0)
assert (decrease_rate >= 0 or decrease_rate < 100)
decrease_rate *= 10000
results = dict()
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2}, rate={3:.2f}%)] START".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size, (1000000 - decrease_rate) / 10000))
##################################################
lprint(" * Create OVS OpenFlow rules...")
create_ovs_of_rules(nr_of_flows,
of_interfaces[config.physical_interface],
"IN_PORT")
##################################################
lprint(" * Initializing packet generation...")
tester.configure_traffic_stream(config.tester_interface,
get_traffic_generator_flow(),
nr_of_flows, packet_size,
traffic_dst_mac=config.dst_mac_address,
traffic_src_mac=config.src_mac_address,
percentage=1000000 - decrease_rate,
random_payload=config.payload_packet_random
)
##################################################
if config.warm_up:
lprint(" * Doing flow table warm-up...")
tester.start_traffic(config.tester_interface)
warm_up_done = warm_up_verify(nr_of_flows,
config.warm_up_timeout)
tester.stop_traffic(config.tester_interface)
if not warm_up_done and not config.warm_up_no_fail:
sys.exit(-1)
##################################################
lprint(" * Clear all statistics...")
tester.clear_statistics(config.tester_interface)
pp_tx_start, pp_tx_drop_start, pp_rx_start, pp_rx_drop_start \
= get_of_port_packet_stats(of_interfaces[config.physical_interface])
##################################################
lprint(" * Start CPU monitoring on DUT...")
start_cpu_monitoring()
##################################################
lprint(" * Start packet generation for {0} seconds...".format(
config.run_time))
tester.start_traffic(config.tester_interface)
for i in range(1, config.run_time):
time.sleep(1)
tester.take_rx_statistics_snapshot(config.tester_interface)
##################################################
lprint(" * Stop CPU monitoring on DUT...")
stop_cpu_monitoring()
##################################################
lprint(" * Stopping packet stream...")
tester.stop_traffic(config.tester_interface)
time.sleep(1)
##################################################
lprint(" * Gathering statistics...")
tester.take_statistics_snapshot(config.tester_interface)
full_tx_stats = tester.get_tx_statistics_snapshots(config.tester_interface)
full_rx_stats = tester.get_rx_statistics_snapshots(config.tester_interface)
slogger.debug(" full_tx_stats={}".format(full_tx_stats))
slogger.debug(" full_rx_stats={}".format(full_rx_stats))
pp_tx_end, pp_tx_drop_end, pp_rx_end, pp_rx_drop_end \
= get_of_port_packet_stats(of_interfaces[config.physical_interface])
pp_rx = pp_rx_end - pp_rx_start
pp_tx = pp_tx_end - pp_tx_start
pp_rx_drop = pp_rx_drop_end - pp_rx_drop_start
pp_tx_drop = pp_tx_drop_end - pp_tx_drop_start
packets_tx = full_tx_stats[sorted(
full_tx_stats.keys())[-1]]['pt_total']['packets']
packets_rx = full_rx_stats[sorted(
full_rx_stats.keys())[-1]]['pr_total']['packets']
lprint(" - Packets send by Tester : {:-20,}".format(packets_tx))
lprint(" - Packets received by physical: {:-20,} [Lost {:,}, Drop "
"{:,}]".format(pp_rx, packets_tx - pp_rx, pp_rx_drop))
lprint(" - Packets send by physical : {:-20,} [Lost {:,}, Drop "
"{:,}]".format(pp_tx, pp_rx - pp_tx, pp_tx_drop))
lprint(" - Packets received by Tester : {:-20,} [Lost {:,}]".
format(packets_rx, pp_tx - packets_rx))
rx_pkts_sec = get_packets_per_second_from_traffic_generator_rx_stats(
full_rx_stats)
lprint(" ! Result, average: {:,} pps".format(rx_pkts_sec))
##################################################
lprint(" * Restoring state for next test...")
tester.unconfigure_traffic_stream(config.tester_interface)
# dut_shell.dut_exec('sh -c "ovs-ofctl del-flows {0} && '
# 'ovs-appctl dpctl/del-flows"'.\
# format(config.bridge_name),
# die_on_error=True)
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] END".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
results["cpu_stats"] = get_cpu_monitoring_stats()
results["rx_packets_second"] = rx_pkts_sec
results["total_tx_pkts"] = packets_tx
results["total_rx_pkts"] = packets_rx
return results
#
# Run simple traffic test Physical to VM back to Physical
#
def test_p(nr_of_flows, packet_sizes):
p_results = list()
cpu_results = list()
for packet_size in packet_sizes:
results = test_p_single_packet_size(nr_of_flows, packet_size,
decrease_rate=100
- config.traffic_rate)
cpu_results.append(results["cpu_stats"])
p_results.append(results["rx_packets_second"])
create_single_graph(packet_sizes, p_results,
"Packet size", "Packets/second",
"Physical loopback with {} {} "
"flows{}".format(nr_of_flows, get_flow_type_short(),
get_traffic_rate_str()),
"test_p_{}_{}".format(nr_of_flows,
get_flow_type_name()),
phy_speed,
cpu_utilization=cpu_results)
return p_results, cpu_results
#
# Run simple traffic test Physical to VM back to Physical
#
def test_p2v2p_zero_loss(stream_size_list, packet_size_list, **kwargs):
csv_handle = kwargs.pop("csv_handle", None)
zero_loss_step = kwargs.pop("zero_loss_step", 1)
flow_str = get_flow_type_short()
flow_file_str = get_flow_type_name()
test_results = dict()
for nr_of_streams in stream_size_list:
test_results[nr_of_streams] = dict()
for packet_size in packet_size_list:
results, index = binary_search(
1, 100, 0.00001,
PVP_binary_search_single_run,
PVP_binary_search_itteration_result,
bs_step=zero_loss_step,
packet_size=packet_size,
nr_of_streams=nr_of_streams)
for dump_index in natsorted(list(results.keys())):
result = results[dump_index]
lprint(
" > Results: load {:.6f}%, rate {} pps, miss {:.6f}%".
format(result["traffic_rate"],
result["rx_packets_second"],
calc_loss_percentage(result)))
if index >= 1:
test_results[nr_of_streams][packet_size] = \
results[index]
lprint(" ! Zero pkt loss @ pkt {}, load {:.6f}%, "
"miss {:.6f}%, rx rate {:,.0f} pps".
format(packet_size, index,
calc_loss_percentage(
results[index]),
test_results[nr_of_streams][packet_size]
["rx_packets_second"]))
else:
test_results[nr_of_streams][packet_size] = results[
min(results)]
lprint(" ! Zero pkt loss for {} bytes, NOT reached!!".
format(packet_size))
pvp0_results, pvp0_cpu_results, pvp0_traffic_rate, pvp0_loss_rate \
= get_result_sets_from_zero_loss_results(test_results)
#
# Write the per flow size graphs
#
create_single_graph(
packet_size_list, pvp0_results[nr_of_streams],
"Packet size", "Packets/second",
"Physical to Virtual back to Physical Zero Loss "
"with {} {} flows".format(nr_of_streams, flow_str),
"test_p2v2p_zero_{}_{}".format(nr_of_streams, flow_file_str),
phy_speed,
cpu_utilization=pvp0_cpu_results[nr_of_streams],
zero_loss_traffic_rate=pvp0_traffic_rate[nr_of_streams],
zero_loss_loss_rate=pvp0_loss_rate[nr_of_streams]
)
#
# This might look like a wrong indentation, but we would like to update
# the graph every stream run so we have a graph in case of a failure.
#
create_multiple_graph(packet_size_list, pvp0_results,
"Packet size", "Packets/second",
"Physical to Virtual to Physical Zero Loss, {}".
format(flow_str),
"test_p2v2p_zero_all_{}".
format(flow_file_str),
None, cpu_utilization=pvp0_cpu_results)
create_multiple_graph(packet_size_list, pvp0_results,
"Packet size", "Packets/second",
"Physical to Virtual to Physical Zero Loss, {}".
format(flow_str),
"test_p2v2p_zero_all_{}_ref".
format(flow_file_str),
[phy_speed],
cpu_utilization=pvp0_cpu_results)
if csv_handle is not None:
csv_write_test_results(
csv_handle,
'Zero Loss Physical to Virtual to Physical test',
stream_size_list, packet_size_list,
pvp0_results, pvp0_cpu_results, loss_rate=pvp0_loss_rate,
traffic_rate=pvp0_traffic_rate)
#
# Get the P results for a single binary search iteration
#
def P_binary_search_single_run(test_value, **kwargs):
packet_size = kwargs.get("packet_size", 64)
nr_of_streams = kwargs.get("nr_of_streams", 10)
results = test_p_single_packet_size(nr_of_streams, packet_size,
decrease_rate=100 - test_value)
results["traffic_rate"] = test_value
lprint(" > Zero pkt loss: pkt {}, load {:.6f}%, miss {:.6f}%".
format(packet_size, test_value,
calc_loss_percentage(results)))
return results
#
# Run the P mobile tests for a binary search iteration
#
def P_binary_search_itteration_result(result_values, test_value, **kwargs):
return calc_loss_percentage(result_values[test_value])
#
# Run simple traffic test Physical loopback zero loss
#
def test_p_zero_loss(stream_size_list, packet_size_list, **kwargs):
csv_handle = kwargs.pop("csv_handle", None)
zero_loss_step = kwargs.pop("zero_loss_step", 1)
flow_str = get_flow_type_short()
flow_file_str = get_flow_type_name()
test_results = dict()
for nr_of_streams in stream_size_list:
test_results[nr_of_streams] = dict()
for packet_size in packet_size_list:
results, index = binary_search(
1, 100, 0.00001,
P_binary_search_single_run,
P_binary_search_itteration_result,
bs_step=zero_loss_step,
packet_size=packet_size,
nr_of_streams=nr_of_streams)
for dump_index in natsorted(list(results.keys())):
result = results[dump_index]
lprint(
" > Results: load {:.6f}%, rate {} pps, miss {:.6f}%".
format(result["traffic_rate"],
result["rx_packets_second"],
calc_loss_percentage(result)))
if index >= 1:
test_results[nr_of_streams][packet_size] = \
results[index]
lprint(" ! Zero pkt loss @ pkt {}, load {:.6f}%, "
"miss {:.6f}%, rx rate {:,.0f} pps".
format(packet_size, index,
calc_loss_percentage(
results[index]),
test_results[nr_of_streams][packet_size]
["rx_packets_second"]))
else:
test_results[nr_of_streams][packet_size] = \
results[min(results)]
lprint(" ! Zero pkt loss for {} bytes, NOT reached!!".
format(packet_size))
p0_results, p0_cpu_results, p0_traffic_rate, p0_loss_rate \
= get_result_sets_from_zero_loss_results(test_results)
#
# Write the per flow size graphs
#
create_single_graph(
packet_size_list, p0_results[nr_of_streams],
"Packet size", "Packets/second",
"Physical Loopback Zero Loss "
"with {} {} flows".format(nr_of_streams, flow_str),
"test_p_zero_{}_{}".format(nr_of_streams, flow_file_str),
phy_speed,
cpu_utilization=p0_cpu_results[nr_of_streams],
zero_loss_traffic_rate=p0_traffic_rate[nr_of_streams],
zero_loss_loss_rate=p0_loss_rate[nr_of_streams]
)
#
# This might look like a wrong indentation, but we would like to update
# the graph every stream run so we have a graph in case of a failure.
#
create_multiple_graph(packet_size_list, p0_results,
"Packet size", "Packets/second",
"Physical Loopback Zero Loss, {}".
format(flow_str),
"test_p_zero_all_{}".
format(flow_file_str),
None, cpu_utilization=p0_cpu_results)
create_multiple_graph(packet_size_list, p0_results,
"Packet size", "Packets/second",
"Physical Loopback Zero Loss, {}".
format(flow_str),
"test_p_zero_all_{}_ref".
format(flow_file_str),
[phy_speed],
cpu_utilization=p0_cpu_results)
if csv_handle is not None:
csv_write_test_results(
csv_handle,
'Zero Loss Physical Loopback test',
stream_size_list, packet_size_list,
p0_results, p0_cpu_results, loss_rate=p0_loss_rate,
traffic_rate=p0_traffic_rate)
#
# Run VXLAN test
#
# TODO: This is only tested on OVS-DPDK, need modular support
# so it will work on kernel (hw offload) datapath.
#
# Also needs encap test, and encap-decap test.
#
# Also note that this test will not distribute the
# load among rx queue's as the outer IP+UDP headers
# do not change. Making the source UDP port of the
# outer header will solve this, but we have no more
# modifiers. We could do a destination IP only OF
# rule and use the source IP counters for src UDP.
#
def test_vxlan(nr_of_flows, packet_sizes):
vxlan_results = list()
cpu_results = list()
tunnel_bridge = (config.bridge_name + "_tterm")[:15]
for packet_size in packet_sizes:
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2}, rate={3:.3f}%)]"
" START".format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size, config.traffic_rate))
##################################################
lprint(" * Get bridge MAC address...")
tunnel_dst_mac = get_of_bridge_mac_address(tunnel_bridge)
##################################################
lprint(" * Create OVS OpenFlow rules...")
create_ovs_of_rules(nr_of_flows,
of_interfaces['vxlan0'],
of_interfaces[config.virtual_interface])
##################################################
if ovs_data_path == "netdev":
#
# For DPDK data path only
#
lprint(" * Setup neighbor entry...")
dut_shell.dut_exec('sh -c "ovs-appctl tnl/neigh/set {} '
' 3.1.1.2 00:00:00:00:00:01"'.format(
tunnel_bridge), die_on_error=True)
dut_shell.dut_exec('sh -c "ip addr add 3.1.1.1/24 dev {0};'
'ip link set {0} up"'.format(tunnel_bridge),
die_on_error=True)
##################################################
lprint(" * Initializing packet generation...")
tester.configure_traffic_stream(
config.tester_interface,
TrafficFlowType.vxlan_l3_ipv4,
nr_of_flows, packet_size,
tunnel_dst_mac=tunnel_dst_mac,
traffic_dst_mac=config.dst_mac_address,
percentage=config.traffic_rate * 10000,
random_payload=config.payload_packet_random)
##################################################
lprint(" * Clear all statistics...")
tester.clear_statistics(config.tester_interface)
pp_rx_start = get_of_port_packet_stats(
of_interfaces[config.physical_interface], bridge=tunnel_bridge)[2]
vp_tx_start, vp_tx_drop_start = get_of_port_packet_stats(
of_interfaces[config.virtual_interface])[0:2]
##################################################
lprint(" * Start packet receiver on VM...")
start_traffic_rx_on_vm(config.dut_vm_address,
config.dut_vm_nic_pci)
##################################################
lprint(" * Start CPU monitoring on DUT...")
start_cpu_monitoring()
##################################################
lprint(" * Start packet generation for {0} seconds...".
format(config.run_time))
tester.start_traffic(config.tester_interface)
for i in range(1, config.run_time):
time.sleep(1)
##################################################
lprint(" * Stop CPU monitoring on DUT...")
stop_cpu_monitoring()
##################################################
lprint(" * Stopping packet stream...")
tester.stop_traffic(config.tester_interface)
time.sleep(1)
##################################################
lprint(" * Stop packet receiver on VM...")
stop_traffic_rx_on_vm(config.dut_vm_address)
##################################################
lprint(" * Gathering statistics...")
tester.take_tx_statistics_snapshot(config.tester_interface)
full_tx_stats = tester.get_tx_statistics_snapshots(
config.tester_interface)
slogger.debug(" full_tx_stats={}".format(full_tx_stats))
pp_rx_end = get_of_port_packet_stats(
of_interfaces[config.physical_interface], bridge=tunnel_bridge)[2]
vp_tx_end, vp_tx_drop_end = get_of_port_packet_stats(
of_interfaces[config.virtual_interface])[0:2]
pp_rx = pp_rx_end - pp_rx_start
vp_tx = vp_tx_end - vp_tx_start
vp_tx_drop = vp_tx_drop_end - vp_tx_drop_start
vm_pkts_sec = get_traffic_rx_stats_from_vm(config.dut_vm_address)
packets_tx = full_tx_stats[sorted(
full_tx_stats.keys())[-1]]['pt_total']['packets']
lprint(" - Packets send by Tester {:,}".format(packets_tx))
lprint(" - Packets received by physical port {:,} [Lost {:,}]".
format(pp_rx, packets_tx - pp_rx))
lprint(" - Packets received by virtual port {:,} [Lost {:,}]".
format(vp_tx, pp_rx - vp_tx))
lprint(" - Packets dropped by virtual port {:,}".
format(vp_tx_drop))
lprint(" ! Result, average: {:,} pps".format(vm_pkts_sec))
vxlan_results.append(vm_pkts_sec)
cpu_results.append(get_cpu_monitoring_stats())
##################################################
lprint(" * Restoring state for next test...")
tester.unconfigure_traffic_stream(config.tester_interface)
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] END".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
create_single_graph(packet_sizes, vxlan_results,
"Packet size", "Packets/second",
"VXLAN Tunnel with {} {} flows{}".
format(nr_of_flows, get_flow_type_short(),
get_traffic_rate_str()),
"test_vxlan_{}_{}".
format(nr_of_flows, get_flow_type_name()),
phy_speed, cpu_utilization=cpu_results)
return vxlan_results, cpu_results
#
# Count datapath flows
#
def get_active_datapath_flows():
if ovs_data_path == "netdev":
cmd = 'sh -c "ovs-appctl dpctl/show netdev@ovs-netdev | ' \
'grep flows | awk \'{print $2}\'"'
else:
if ovs_tc_enabled:
cmd = 'sh -c "ovs-appctl dpctl/dump-flows system@ovs-system | ' \
'wc -l"'
else:
cmd = 'sh -c "ovs-appctl dpctl/show system@ovs-system | ' \
'grep flows | awk \'{print $2}\'"'
result = dut_shell.dut_exec(cmd, die_on_error=True)
return int(result.stdout_output)
#
# Warm up verification
#
def warm_up_verify(requested_flows, timeout):
run_time = 0
active_flows = 0
while active_flows < requested_flows:
run_time += 1
if timeout != 0 and run_time >= timeout:
if config.flow_rule_type == "flows":
lprint("ERROR: Failed to complete warm-up in time "
"({} seconds)!".format(timeout))
else:
lprint(" * Failed to complete warm-up as expected!")
return False
time.sleep(1)
active_flows = get_active_datapath_flows()
#
# Flows exist, we can continue now
#
return True
#
# Wait for datapth flows to flush
#
def flow_table_cool_down(failure_fatal=True):
run_time = 0
active_flows = 0
if config.warm_up or not config.no_cool_down:
lprint(" * Doing flow table cool-down...")
active_flows = get_active_datapath_flows()
while active_flows > 32:
run_time += 1
if run_time >= 20:
if failure_fatal:
lprint("ERROR: Failed to complete cool-down in time "
"(20 seconds)!")
sys.exit(-1)
else:
lprint("WARNING: Failed to complete cool-down in time "
"(20 seconds)!")
break
active_flows = get_active_datapath_flows()
time.sleep(1)
#
# Flush all OVS flows
#
def flush_ovs_flows():
# data_path = "system@ovs-system"
#
# For now we only flush the openflow rules for nedtev, because as soon as
# we flush the datapath rules no more flows get added to the datapath.
#
# However other vendors are also struggling when flushing the datapath.
#
# if ovs_data_path == "netdev":
# data_path = "netdev@ovs-netdev"
#
# cmd = 'sh -c "ovs-ofctl del-flows {0}; ' \
# 'ovs-appctl dpctl/del-flows {1}"'. \
# format(config.bridge_name, data_path)
cmd = 'sh -c "ovs-ofctl del-flows {0}"'. \
format(config.bridge_name)
dut_shell.dut_exec(cmd, die_on_error=True)
flow_table_cool_down(failure_fatal=False)
time.sleep(2)
#
# Dump openflow port statistics to logfile
#
def of_dump_port_to_logfile(bridge):
return dut_shell.dut_exec("ovs-ofctl dump-ports {}".format(bridge),
die_on_error=True)
#
# Static definition of a script to wait for testpmd CPU >= 150
#
DELAY_TEST_PMD = r'for i in {1..30}; do ' \
r' TEST_PMD_LOAD=$(top -b -n 2 -d 1 -p $(pidof testpmd) | ' \
r' tail -1 | awk "{\$1=\$1;print}" | ' \
r' tr -s " " | cut -d " " -f 9); ' \
r' TEST_PMD_LOAD=${TEST_PMD_LOAD%%.*}; ' \
r' if [[ $TEST_PMD_LOAD -ge "150" ]]; then ' \
r' break; ' \
r' fi ' \
r'done'
#
# Start packet receive application on VM
#
def start_traffic_rx_on_vm(vm, pci):
cpu_mask = ((1 << (config.dut_vm_nic_queues + 1)) - 1)
pmd_cpu_mask = cpu_mask & ~0x1
disable_hw_vlan = " --disable-hw-vlan" if vm_dpdk_version < \
Version('18.2.0') else ""
legacy_mem = " --legacy-mem" if vm_dpdk_version >= \
Version('18.5.0') else ""
auto_delay = DELAY_TEST_PMD if config.testpmd_startup_delay == 0 else ""
pci_flag = "-w" if vm_dpdk_version < Version('20.11.0') else "-a"
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'rm -f ~/results.txt; " \
r" nohup sh -c " \
r' "(while sleep 1; do echo show port stats 0; done | ' \
r" testpmd -c {5:x} -n 4 --socket-mem 2048,0 {12} {3}{10} -- "\
r" --burst 64 -i --rxq={4} --txq={4} --rxd={8} " \
r" --txd={9} --auto-start --forward-mode=rxonly " \
r' --port-topology=chained --coremask={6:x}{7})" ' \
r" &>results.txt &{11}'". \
format(vm, config.dut_vm_user, config.dut_vm_password, pci,
config.dut_vm_nic_queues, cpu_mask, pmd_cpu_mask,
disable_hw_vlan, config.dut_vm_nic_rxd,
config.dut_vm_nic_txd, legacy_mem, auto_delay, pci_flag)
dut_shell.dut_exec_shell(cmd, die_on_error=True)
if config.testpmd_startup_delay > 0:
time.sleep(config.testpmd_startup_delay)
#
# Stop packet receive application on VM
#
def stop_traffic_rx_on_vm(vm, **kwargs):
die = kwargs.pop("die", True)
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'TESTPMD_PID=$(pidof testpmd); kill -SIGINT $TESTPMD_PID; " \
r"timeout 4 tail --pid=$TESTPMD_PID -f /dev/null'". \
format(vm, config.dut_vm_user, config.dut_vm_password)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=die)
#
# Start packet receive and loop application on VM
#
def start_traffic_loop_on_vm(vm, pci):
cpu_mask = ((1 << (config.dut_vm_nic_queues + 1)) - 1)
pmd_cpu_mask = cpu_mask & ~0x1
mac_swap = " --forward-mode=macswap" if config.mac_swap else ""
disable_hw_vlan = " --disable-hw-vlan" if vm_dpdk_version < \
Version('18.2.0') else ""
legacy_mem = " --legacy-mem" if vm_dpdk_version >= \
Version('18.5.0') else ""
auto_delay = DELAY_TEST_PMD if config.testpmd_startup_delay == 0 else ""
pci_flag = "-w" if vm_dpdk_version < Version('20.11.0') else "-a"
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'rm -f ~/results.txt; " \
r" nohup sh -c " \
r' "(while sleep 1; do echo show port stats 0; done | ' \
r" testpmd -c {5:x} -n 4 --socket-mem 2048,0 {13} {3}{11} -- "\
r" --burst 64 -i --rxq={4} --txq={4} --rxd={9} " \
r" --txd={10} --coremask={6:x} --auto-start " \
r' --port-topology=chained{7}{8})" ' \
r" &>results.txt &{12}'". \
format(vm, config.dut_vm_user, config.dut_vm_password, pci,
config.dut_vm_nic_queues, cpu_mask, pmd_cpu_mask,
mac_swap, disable_hw_vlan, config.dut_vm_nic_rxd,
config.dut_vm_nic_txd, legacy_mem, auto_delay, pci_flag)
dut_shell.dut_exec_shell(cmd, die_on_error=True)
if config.testpmd_startup_delay > 0:
time.sleep(config.testpmd_startup_delay)
#
# Stop packet receive and loop application on VM
#
def stop_traffic_loop_on_vm(vm):
stop_traffic_rx_on_vm(vm)
#
# Get traffic receive stats from application on VM
#
def get_traffic_rx_stats_from_vm(vm, **kwargs):
skip_samples = kwargs.pop("skip_samples", 0)
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
"'cat ~/results.txt | grep -E \"Rx-pps|Tx-pps\"'". \
format(vm, config.dut_vm_user, config.dut_vm_password)
result = dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd],
die_on_error=True)
pkt_rates = [int(re.sub(r'^\s*Rx-pps:\s*', '', s))
for s in re.findall(r'^\s*Rx-pps:\s*\d+',
result.stdout_output,
re.MULTILINE)]
if skip_samples > 0:
pkt_rates = pkt_rates[skip_samples:]
if len(pkt_rates) <= 10:
lprint("ERROR: Not enough elements to calculate packet rate!")
sys.exit(-1)
pkt_rates = pkt_rates[5:-5]
return sum(pkt_rates) / len(pkt_rates)
#
# Start packet generation application on VM
#
def start_traffic_tx_on_vm(vm, nr_of_flows, packet_size):
if config.flow_type == 'L2':
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'rm -f ~/results.txt; " \
r" nohup /bin/trafgen -c 3 -n 4 -- -p 1 --benchmark " \
r"--flows-per-stream 1 --bursts-per-stream 1 --streams {3} " \
r"--src-mac {5} --dst-mac {6} " \
r"--src-ip 1.0.0.0 --dst-ip 2.0.0.0 --packet-size {4} " \
r"--vary-src mac --vary-dst mac -s ~/results.txt" \
r"> /dev/null 2>&1 &'". \
format(vm, config.dut_vm_user,
config.dut_vm_password, nr_of_flows, packet_size,
config.src_mac_address, config.dst_mac_address)
elif config.flow_type == 'L3':
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'rm -f ~/results.txt; " \
r" nohup /bin/trafgen -c 3 -n 4 -- -p 1 --benchmark " \
r"--flows-per-stream 1 --bursts-per-stream 1 --streams {3} " \
r"--src-mac {5} --dst-mac {6} " \
r"--src-ip 1.0.0.0 --dst-ip 2.0.0.0 --packet-size {4} " \
r"--vary-src ip --vary-dst ip -s ~/results.txt" \
r"> /dev/null 2>&1 &'". \
format(vm, config.dut_vm_user,
config.dut_vm_password, nr_of_flows, packet_size,
config.src_mac_address, config.dst_mac_address)
elif config.flow_type == 'L4-UDP':
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'rm -f ~/results.txt; " \
r" nohup /bin/trafgen -c 3 -n 4 -- -p 1 --benchmark " \
r"--flows-per-stream 1 --bursts-per-stream 1 --streams {3} " \
r"--src-mac {5} --dst-mac {6} " \
r"--src-ip 1.0.0.0 --dst-ip 2.0.0.0 --packet-size {4} " \
r"--src-port 0 --dst-port 0 " \
r"--vary-src port --vary-dst port -s ~/results.txt" \
r"> /dev/null 2>&1 &'". \
format(vm, config.dut_vm_user,
config.dut_vm_password, nr_of_flows, packet_size,
config.src_mac_address, config.dst_mac_address)
else:
raise ValueError("No support for this protocol on!!")
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
#
# Stop packet generation application on VM
#
def stop_traffic_tx_on_vm(vm, **kwargs):
die = kwargs.pop("die", True)
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'kill -SIGINT `pidof trafgen`'". \
format(vm, config.dut_vm_user, config.dut_vm_password)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=die)
#
# Get traffic transmit stats from application on VM
#
def get_traffic_tx_stats_from_vm(vm):
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'cat ~/results.txt | grep port0.tx_packets'". \
format(vm, config.dut_vm_user, config.dut_vm_password)
result = dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd],
die_on_error=True)
return get_packets_per_second_from_pkt_counters(result.stdout_output, 5)
#
# Get packets per seconds from traffic rx generator starts
#
def get_packets_per_second_from_traffic_generator_rx_stats(rx_stats):
avg = cnt = 0
for timestamp in natsorted(list(rx_stats.keys()))[2:-2]:
stats = rx_stats[timestamp]
pps = stats['pr_total']['pps']
avg += pps
cnt += 1
return avg / cnt
#
# Get packets per seconds from traffic tx generator starts
#
def get_packets_per_second_from_traffic_generator_tx_stats(tx_stats):
avg = cnt = 0
for timestamp in natsorted(list(tx_stats.keys()))[2:-2]:
stats = tx_stats[timestamp]
pps = stats['pt_total']['pps']
avg += pps
cnt += 1
return avg / cnt
#
# Get packets per seconds from a string with packets count values
# It might strip, start, stop number of entries, and than return
# average value.
#
def get_packets_per_second_from_pkt_counters(counters, strip):
slogger.info("get_pacets_per_second_from_counters(\"{}\", {})".
format(counters, strip))
counters_clean = re.sub(r'.+:\s?', '', counters)
counter_list = map(int, counters_clean.split())
if strip < 0 or (len(counter_list) - (strip * 2)) < 2:
lprint("ERROR: Not enough elements to calculate packet rate!")
sys.exit(-1)
if strip > 0:
del counter_list[:strip]
del counter_list[-strip:]
slogger.info("[gppsfc] Work list \"{}\"".format(counter_list))
pkts_sec = 0
for i in range(1, len(counter_list)):
pkts_sec = pkts_sec + (counter_list[i] - counter_list[i - 1])
pkts_sec = pkts_sec / (len(counter_list) - 1)
slogger.info("[gppsfc] pkts/sec = {:,}".format(pkts_sec))
return pkts_sec
#
# Add OVS OpenFlow rules
#
def create_ovs_of_rules(number_of_flows, src_port, dst_port, **kwargs):
if config.flow_rule_type == "flows":
if config.flow_type == 'L2':
create_ovs_l2_of_rules(number_of_flows,
src_port, dst_port, **kwargs)
elif config.flow_type == 'L3':
create_ovs_l3_of_rules(number_of_flows,
src_port, dst_port, **kwargs)
elif config.flow_type == 'L4-UDP':
create_ovs_l4_of_rules(number_of_flows,
src_port, dst_port, **kwargs)
else:
raise ValueError("No support for this protocol!!")
elif config.flow_rule_type == "NORMAL":
create_ovs_of_normal_rule(**kwargs)
elif config.flow_rule_type == "port":
create_ovs_of_phy_rule(src_port, dst_port, **kwargs)
elif config.flow_rule_type == "none":
slogger.debug("No rules installed due to flow-rule-type=none")
else:
raise ValueError("No support for this flow rule type!!")
#
# Add OVS OpenFlow rules
#
def create_ovs_bidirectional_of_rules(number_of_flows, src_port,
dst_port, **kwargs):
if config.flow_rule_type == "flows":
if config.flow_type == 'L2':
create_ovs_bidirectional_l2_of_rules(number_of_flows, src_port,
dst_port, **kwargs)
elif config.flow_type == 'L3':
create_ovs_bidirectional_l3_of_rules(number_of_flows, src_port,
dst_port, **kwargs)
elif config.flow_type == 'L4-UDP':
create_ovs_bidirectional_l4_of_rules(number_of_flows, src_port,
dst_port, **kwargs)
else:
raise ValueError("No support for this protocol!!")
elif config.flow_rule_type == "NORMAL":
create_ovs_of_normal_rule(**kwargs)
elif config.flow_rule_type == "port":
create_ovs_bidirectional_of_phy_rules(src_port, dst_port)
elif config.flow_rule_type == "none":
slogger.debug("No rules installed due to flow-rule-type=none")
else:
raise ValueError("No support for this flow rule type!!")
#
# Add OVS OpenFlow rule from physical 2 physical, and reverse
#
def create_ovs_bidirectional_of_phy_rules(src_port, dst_port):
lprint(" * Clear all OpenFlow/Datapath rules on bridge \"{}\"...".
format(config.bridge_name))
dut_shell.dut_exec('sh -c "ovs-ofctl del-flows {0}"'.
format(config.bridge_name),
die_on_error=True)
lprint(" * Create two OpenFlow physical to physical rules...")
cmd = "ovs-ofctl add-flow {0} in_port={1},action={2} && " \
"ovs-ofctl add-flow {0} in_port={2},action={1}". \
format(config.bridge_name,
src_port, dst_port)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
lprint(" * Verify that of physical port flows exists...")
result \
= dut_shell.dut_exec('sh -c "ovs-ofctl dump-flows {0} | '
'grep -v \'NXST_FLOW reply\'"'.
format(config.bridge_name),
die_on_error=True)
if result.output.count('\n') != 2:
lprint("ERROR: Only 2 flows should exsits, but there are {0}!".
format(result.output.count('\n') - 1))
sys.exit(-1)
#
# Add OVS OpenFlow rule from physical 2 physical
#
def create_ovs_of_phy_rule(src_port, dst_port, **kwargs):
clear_rules = kwargs.pop("clear_rules", True)
if clear_rules:
lprint(" * Clear all OpenFlow/Datapath rules on bridge \"{}\"...".
format(config.bridge_name))
flush_ovs_flows()
lprint(" * Create OpenFlow physical to physical rules...")
cmd = "ovs-ofctl add-flow {0} in_port={1},action={2}". \
format(config.bridge_name, src_port, dst_port)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
lprint(" * Verify that of physical port flows exists...")
result \
= dut_shell.dut_exec('sh -c "ovs-ofctl dump-flows {0} | '
'grep -v \'NXST_FLOW reply\'"'.
format(config.bridge_name),
die_on_error=True)
if result.output.count('\n') != 1:
lprint("ERROR: Only 2 flows should exsits, but there are {0}!".
format(result.output.count('\n') - 1))
sys.exit(-1)
#
# Add OVS L2 OpenFlow rules
#
def create_ovs_l2_of_rules(number_of_flows, src_port, dst_port, **kwargs):
total_nr_of_flows = kwargs.pop("total_number_of_flows", number_of_flows)
clear_rules = kwargs.pop("clear_rules", True)
mac_swap = kwargs.pop("mac_swap", False)
base_mac = mac_2_int(config.dst_mac_address if not mac_swap
else config.src_mac_address) & 0xffffff000000
if clear_rules:
lprint(" * Clear all OpenFlow/Datapath rules on bridge \"{}\"...".
format(config.bridge_name))
flush_ovs_flows()
if config.debug or config.debug_dut_shell:
of_dump_port_to_logfile(config.bridge_name)
lprint(" * Create {} L2 OpenFlow rules...".format(number_of_flows))
cmd = "python -c 'for i in range({4}, {0}): " \
"print(\"add in_port={2}," \
"dl_dst=" \
"{{0:02x}}:{{1:02x}}:{{2:02x}}:{{3:02x}}:{{4:02x}}:{{5:02x}}," \
"action=" \
"{3}\".format((i >> 40) & 0xff, (i >> 32) & 0xff, (i >> 24) " \
"& 0xff, (i >> 16) & 0xff, (i >> 8) & 0xff, i & 0xff))'" \
" | ovs-ofctl add-flow {1} -". \
format(number_of_flows + base_mac, config.bridge_name,
src_port, dst_port, base_mac)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
if total_nr_of_flows != 0:
lprint(" * Verify requested number of flows exists...")
result = dut_shell.dut_exec('sh -c "ovs-ofctl dump-flows {0} | '
'grep -v \'NXST_FLOW reply\' | wc -l"'.
format(config.bridge_name),
die_on_error=True)
if int(result.stdout_output) != total_nr_of_flows:
lprint("ERROR: Only {0} flows should exsits, but there are {1}!".
format(number_of_flows, int(result.stdout_output)))
sys.exit(-1)
#
# Add OVS Bidirectional L2 OpenFlow rules
#
def create_ovs_bidirectional_l2_of_rules(number_of_flows,
src_port, dst_port, **kwargs):
create_ovs_l2_of_rules(number_of_flows,
src_port,
dst_port)
create_ovs_l2_of_rules(number_of_flows,
dst_port,
src_port,
total_number_of_flows=number_of_flows * 2,
clear_rules=False,
mac_swap=config.mac_swap)
#
# Add OVS OpenFlow NORMAL rule to bridge
#
def create_ovs_of_normal_rule(**kwargs):
clear_rules = kwargs.pop("clear_rules", True)
if clear_rules:
lprint(" * Clear all OpenFlow/Datapath rules on bridge \"{}\"...".
format(config.bridge_name))
flush_ovs_flows()
cmd = "ovs-appctl fdb/flush {}".format(config.bridge_name)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
lprint(" * Create OpenFlow NORMAL rules...")
cmd = "ovs-ofctl add-flow {0} action=NORMAL". \
format(config.bridge_name)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
lprint(" * Verify that OpenFlow NORMAL flow exists...")
result = dut_shell.dut_exec('sh -c "ovs-ofctl dump-flows {0} | '
'grep -v \'NXST_FLOW reply\'"'.
format(config.bridge_name),
die_on_error=True)
if result.output.count('\n') != 1:
lprint("ERROR: Only 1 flows should exsits, but there are {0}!".
format(result.output.count('\n')))
sys.exit(-1)
#
# Add OVS L3 OpenFlow rules
#
def create_ovs_l3_of_rules(number_of_flows, src_port, dst_port, **kwargs):
total_nr_of_flows = kwargs.pop("total_number_of_flows", number_of_flows)
clear_rules = kwargs.pop("clear_rules", True)
ip_start_offset = kwargs.pop("ipv4_start", 0x01000000)
if number_of_flows > 1000000:
lprint("ERROR: Maximum of 1,000,000 L3 flows are supported!")
sys.exit(-1)
if clear_rules:
lprint(" * Clear all OpenFlow/Datapath rules on bridge \"{}\"...".
format(config.bridge_name))
flush_ovs_flows()
if config.debug or config.debug_dut_shell:
of_dump_port_to_logfile(config.bridge_name)
lprint(" * Create {} L3 OpenFlow rules...".format(number_of_flows))
cmd = "python -c 'for i in range({4}, {0}): " \
"print(\"add in_port={2}," \
"eth_type(0x800)," \
"nw_src={{}}.{{}}.{{}}.{{}},nw_dst={{}}.{{}}.{{}}.{{}}," \
"action={3}\".format(" \
"(i >> 24) & 0xff, (i >> 16) & 0xff," \
"(i >> 8) & 0xff, i & 0xff," \
"((i + 0x01000000) >> 24) & 0xff, ((i + 0x01000000) >> 16) & 0xff," \
"((i + 0x01000000) >> 8) & 0xff, (i + 0x01000000) & 0xff))'" \
" | ovs-ofctl add-flow {1} -". \
format(number_of_flows + ip_start_offset, config.bridge_name,
src_port, dst_port, ip_start_offset)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
if total_nr_of_flows != 0:
lprint(" * Verify requested number of flows exists...")
result = dut_shell.dut_exec('sh -c "ovs-ofctl dump-flows {0} | '
'grep -v \'NXST_FLOW reply\' | wc -l"'.
format(config.bridge_name),
die_on_error=True)
if int(result.stdout_output) != total_nr_of_flows:
lprint("ERROR: Only {0} flows should exsits, but there are {1}!".
format(number_of_flows, int(result.stdout_output)))
sys.exit(-1)
#
# Add OVS Bidirectional L3 OpenFlow rules
#
def create_ovs_bidirectional_l3_of_rules(number_of_flows, src_port, dst_port,
**kwargs):
clear_rules = kwargs.pop("clear_rules", True)
total_nr_of_flows = kwargs.pop("total_number_of_flows",
number_of_flows * 2)
ip_start_offset = kwargs.pop("ipv4_start", 0x01000000)
create_ovs_l3_of_rules(number_of_flows,
src_port,
dst_port,
clear_rules=clear_rules,
total_number_of_flows=0,
ipv4_start=ip_start_offset)
create_ovs_l3_of_rules(number_of_flows,
dst_port,
src_port,
clear_rules=False,
total_number_of_flows=total_nr_of_flows,
ipv4_start=ip_start_offset)
#
# Add OVS OpenFlow rules for the /16 flow ranges we create
#
def create_ovs_bidirectional_l3_of_slash_16_rules(number_of_flows,
src_port, dst_port):
create_ovs_l3_of_slash_16_rules(number_of_flows,
src_port,
dst_port)
create_ovs_l3_of_slash_16_rules(number_of_flows,
dst_port,
src_port,
total_number_of_flows=number_of_flows * 2,
clear_rules=False)
def create_ovs_l3_of_slash_16_rules(number_of_flows,
src_port, dst_port,
**kwargs):
total_nr_of_flows = kwargs.pop("total_number_of_flows", number_of_flows)
clear_rules = kwargs.pop("clear_rules", True)
if number_of_flows > 255:
lprint("ERROR: Maximum of 255 /16 flows are supported!")
sys.exit(-1)
if clear_rules:
lprint(" * Clear all OpenFlow/Datapath rules on bridge \"{}\"...".
format(config.bridge_name))
flush_ovs_flows()
if config.debug or config.debug_dut_shell:
of_dump_port_to_logfile(config.bridge_name)
lprint(" * Create {} L3 /16 OpenFlow rules...".format(number_of_flows))
cmd = "python -c 'for i in range(0, {0}): " \
"print(\"add in_port={2}," \
"eth_type(0x800),nw_src=1.{{0}}.0.0/16,nw_dst=2.{{0}}.0.0/16," \
"action={3}\".format(i))'" \
" | ovs-ofctl add-flow {1} -". \
format(number_of_flows, config.bridge_name,
src_port, dst_port)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
if total_nr_of_flows != 0:
lprint(" * Verify requested number of flows exists...")
result = dut_shell.dut_exec('sh -c "ovs-ofctl dump-flows {0} | '
'grep -v \'NXST_FLOW reply\' | wc -l"'.
format(config.bridge_name),
die_on_error=True)
if int(result.stdout_output) != total_nr_of_flows:
lprint("ERROR: Only {0} flows should exsits, but there are {1}!".
format(number_of_flows, int(result.stdout_output)))
sys.exit(-1)
#
# Add OVS L4 OpenFlow rules
#
def create_ovs_l4_of_rules(number_of_flows, src_port, dst_port, **kwargs):
total_nr_of_flows = kwargs.pop("total_number_of_flows", number_of_flows)
clear_rules = kwargs.pop("clear_rules", True)
if number_of_flows > 1000000:
lprint("ERROR: Maximum of 1,000,000 L4 flows are supported!")
sys.exit(-1)
if clear_rules:
lprint(" * Clear all OpenFlow/Datapath rules on bridge \"{}\"...".
format(config.bridge_name))
dut_shell.dut_exec('sh -c "ovs-ofctl del-flows {0}"'.
format(config.bridge_name),
die_on_error=True)
flush_ovs_flows()
if config.debug or config.debug_dut_shell:
of_dump_port_to_logfile(config.bridge_name)
lprint(" * Create {} L4 OpenFlow rules...".format(number_of_flows))
cmd = "python -c 'for i in range(0, {0}): " \
"print(\"add in_port={2}," \
"udp,udp_src={{0}},udp_dst={{0}}," \
"action={3}\".format(i))'" \
" | ovs-ofctl add-flow {1} -". \
format(number_of_flows, config.bridge_name,
src_port, dst_port)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
if total_nr_of_flows != 0:
lprint(" * Verify requested number of flows exists...")
result = dut_shell.dut_exec('sh -c "ovs-ofctl dump-flows {0} | '
'grep -v \'NXST_FLOW reply\' | wc -l"'.
format(config.bridge_name),
die_on_error=True)
if int(result.stdout_output) != total_nr_of_flows:
lprint("ERROR: Only {0} flows should exsits, but there are {1}!".
format(number_of_flows, int(result.stdout_output)))
sys.exit(-1)
#
# Add OVS Bidirectional L4 OpenFlow rules
#
def create_ovs_bidirectional_l4_of_rules(number_of_flows, src_port, dst_port,
**kwargs):
create_ovs_l4_of_rules(number_of_flows,
src_port,
dst_port)
create_ovs_l4_of_rules(number_of_flows,
dst_port,
src_port,
total_number_of_flows=number_of_flows * 2,
clear_rules=False)
#
# Add test bridge setup
#
def create_ovs_bridge():
lprint("- Configuring bridge...")
if "dpdk" in config.physical_interface:
dpdk = True
else:
dpdk = False
#
# Delete bridge if existing
#
dut_shell.dut_exec("ovs-vsctl -- --if-exists del-br {0} "
"-- --if-exists del-br {1}".
format(config.bridge_name,
(config.bridge_name + "_tterm")[:15]),
die_on_error=True)
#
# Create bridge and set data path if needed
#
command = "ovs-vsctl add-br {0} ".format(config.bridge_name)
if dpdk:
command += "-- set Bridge {} datapath_type=netdev ".format(
config.bridge_name)
#
# Add basic ports (1x ingress, and 1x egress)
#
command += "-- add-port {0} {1} -- set Interface {1} ofport_request=10 ". \
format(config.bridge_name, config.physical_interface)
if config.virtual_interface:
command += "-- add-port {0} {1} -- set Interface {1} " \
"ofport_request=20 ". \
format(config.bridge_name, config.virtual_interface)
if dpdk:
command += "-- set Interface {0} type=dpdk " . \
format(config.physical_interface)
if config.virtual_interface:
command += "-- set Interface {0} type=dpdkvhostuser ". \
format(config.virtual_interface)
if config.pmd_rxq_affinity is not None:
command += "-- set Interface {0} options:n_rxq={1} " \
"other_config:pmd-rxq-affinity={2} " . \
format(config.physical_interface,
config.pmd_rxq_affinity.count(':'),
config.pmd_rxq_affinity)
if config.virtual_interface:
command += "-- set Interface {0} options:n_rxq={1} " \
"other_config:pmd-rxq-affinity={2} ". \
format(config.virtual_interface,
config.pmd_rxq_affinity.count(':'),
config.pmd_rxq_affinity)
#
# Add second virtual ports if vv test is enabled
#
if not config.skip_vv_test:
command += "-- add-port {0} {1} -- set Interface {1} " \
"ofport_request=21 ".format(config.bridge_name,
config.second_virtual_interface)
if dpdk:
command += "-- set Interface {0} type=dpdkvhostuser ". \
format(config.second_virtual_interface)
if config.pmd_rxq_affinity is not None:
command += "-- set Interface {0} options:n_rxq={1} " \
"other_config:pmd-rxq-affinity={2} ". \
format(config.second_virtual_interface,
config.pmd_rxq_affinity.count(':'),
config.pmd_rxq_affinity)
#
# Add second physical port if pp test is enabled
#
if config.run_pp_test:
command += "-- add-port {0} {1} -- set Interface {1} " \
"ofport_request=11 ".format(
config.bridge_name, config.second_physical_interface)
if dpdk:
command += "-- set Interface {0} type=dpdk ". \
format(config.second_physical_interface)
if config.pmd_rxq_affinity is not None:
command += "-- set Interface {0} options:n_rxq={1} " \
"other_config:pmd-rxq-affinity={2} ". \
format(config.second_physical_interface,
config.pmd_rxq_affinity.count(':'),
config.pmd_rxq_affinity)
#
# If we are running DPDK and it's 2.7 or higher we need to specify the PCI
# addresses for the physical ports.
#
if dpdk and ovs_version >= Version('2.7.0'):
if not check_pci_address_string(config.physical_interface_pci) or \
(config.run_pp_test and not
check_pci_address_string(config.second_physical_interface_pci)):
lprint("ERROR: For OVS >=2.7 you must supply a valid PCI address "
"for the physical interfaces!")
sys.exit(-1)
command += "-- set Interface {0} options:dpdk-devargs={1} ". \
format(config.physical_interface,
config.physical_interface_pci)
if config.second_physical_interface:
command += "-- set Interface {0} options:dpdk-devargs={1} " . \
format(config.second_physical_interface,
config.second_physical_interface_pci)
#
# Configure all the above!
#
dut_shell.dut_exec(command, die_on_error=True)
if config.debug or config.debug_dut_shell:
dut_shell.dut_exec("ovs-vsctl show", die_on_error=True)
#
# If this is DPDK, you might need to start the VM for thinks to start
# working. So we pause here, asking for restart of the VM.
#
if dpdk and config.virtual_interface:
print("!!! Finished configuring the OVS bridge, please restart the "
"Virtual Machine !!!")
raw_input("Press Enter to continue...")
#
# Add VXLAN test bridge setup
#
def create_ovs_vxlan_bridge():
lprint("- Configuring bridge...")
if "dpdk" in config.physical_interface:
dpdk = True
else:
dpdk = False
tunnel_bridge = (config.bridge_name + "_tterm")[:15]
#
# Delete bridge if existing
#
dut_shell.dut_exec("ovs-vsctl -- --if-exists del-br {0} "
"-- --if-exists del-br {1}".
format(config.bridge_name, tunnel_bridge),
die_on_error=True)
#
# Create bridge and set data path if needed
#
command = "ovs-vsctl add-br {0} -- add-br {1} " \
.format(config.bridge_name, tunnel_bridge)
if dpdk:
command += "-- set Bridge {} datapath_type=netdev ".format(
config.bridge_name)
command += "-- set Bridge {} datapath_type=netdev ".format(
tunnel_bridge)
#
# Add basic ports (1x ingress, and 1x egress)
#
command += "-- add-port {3} {1} -- set Interface {1} ofport_request=10 " \
"-- add-port {0} {2} -- set Interface {2} ofport_request=20 " \
"-- add-port {0} vxlan0 -- set Interface vxlan0 " \
"ofport_request=30 " \
"-- set interface vxlan0 type=vxlan " \
"options:remote_ip=3.1.1.2 options:key=69 ". \
format(config.bridge_name,
config.physical_interface,
config.virtual_interface,
tunnel_bridge)
if dpdk:
command += "-- set Interface {0} type=dpdk " \
"-- set Interface {1} type=dpdkvhostuser ". \
format(config.physical_interface, config.virtual_interface)
if config.pmd_rxq_affinity is not None:
command += "-- set Interface {0} options:n_rxq={2} " \
"other_config:pmd-rxq-affinity={3} " \
"-- set Interface {1} options:n_rxq={2} " \
"other_config:pmd-rxq-affinity={3} ". \
format(config.physical_interface,
config.virtual_interface,
config.pmd_rxq_affinity.count(':'),
config.pmd_rxq_affinity)
#
# If we are running DPDK and it's 2.7 or higher we need to specify the PCI
# addresses for the physical ports.
#
if dpdk and ovs_version >= Version('2.7.0'):
if not check_pci_address_string(config.physical_interface_pci) or \
(config.run_pp_test and not
check_pci_address_string(config.second_physical_interface_pci)):
lprint("ERROR: For OVS >=2.7 you must supply a valid PCI address "
"for the physical interfaces!")
sys.exit(-1)
command += "-- set Interface {0} options:dpdk-devargs={1} ". \
format(config.physical_interface,
config.physical_interface_pci)
#
# Configure all the above!
#
dut_shell.dut_exec(command, die_on_error=True)
if config.debug or config.debug_dut_shell:
dut_shell.dut_exec("ovs-vsctl show", die_on_error=True)
#
# If this is DPDK, you might need to start the VM for thinks to start
# working. So we pause here, asking for restart of the VM.
#
if dpdk:
print("!!! Finished configuring the OVS bridge, please restart the "
"Virtual Machine !!!")
raw_input("Press Enter to continue...")
#
# Get bridge port numbers
#
def get_bridge_port_numbers(tunnel=False):
lprint("- Get OpenFlow and DataPath port numbers...")
of = dict()
dp = dict()
#
# Get mapping from openvswitch
#
command = 'sh -c "ovs-ofctl show {0} && ovs-appctl dpctl/show"'.\
format(config.bridge_name)
if tunnel:
tunnel_bridge = (config.bridge_name + "_tterm")[:15]
command = 'sh -c "ovs-ofctl show {0} && ovs-ofctl show {1} && '\
'ovs-appctl dpctl/show"'.\
format(config.bridge_name, tunnel_bridge)
result = dut_shell.dut_exec(command, die_on_error=True)
#
# Create list of interfaces, second interfaces are optional,
# so check if they exist before adding.
#
interfaces = [config.physical_interface]
if config.virtual_interface != '':
interfaces.append(config.virtual_interface)
if config.second_virtual_interface != '':
interfaces.append(config.second_virtual_interface)
if config.second_physical_interface != '':
interfaces.append(config.second_physical_interface)
if tunnel:
interfaces.append('vxlan0')
for interface in interfaces:
m = re.search('\\s*([0-9]*)\\({0}\\): addr:.*'.format(interface),
result.output)
if m:
of[interface] = m.group(1)
else:
lprint("ERROR: Can't figure out OpenFlow interface for {0}".
format(interface))
sys.exit(-1)
if interface == 'vxlan0':
continue
m = re.search('\\s*port\\s*([0-9]*):\\s*{0}\\s*.*'.format(interface),
result.output)
if m:
dp[interface] = m.group(1)
else:
lprint("ERROR: Can't figure out OpenFlow datapath interface "
"for {0}".format(interface))
sys.exit(-1)
slogger.info("OpenFlow ports; {}".format(of))
slogger.info("DataPath ports; {}".format(dp))
return of, dp
#
# Get OpenFlow port packet stats
#
def get_of_port_packet_stats(of_port, **kwargs):
bridge = kwargs.pop("bridge", config.bridge_name)
port_stats = of_dump_port_to_logfile(bridge)
m = re.search('\\s.*port *{}: rx pkts=.*\n.*tx pkts=([0-9?]*), '.format(
of_port), port_stats.output)
if m:
if '?' in m.group(1):
tx = int(0)
else:
tx = int(m.group(1))
else:
lprint("ERROR: Can't get transmitted packet stats for OpenFlow "
"port {0} on brige \"{1}\"".
format(of_port, config.bridge_name))
sys.exit(-1)
m = re.search('\\s.*port *{}: rx pkts=.*\n.*tx pkts=.* '
'drop=([0-9?]*), .*'.format(of_port), port_stats.output)
if m:
if '?' in m.group(1):
tx_drop = int(0)
else:
tx_drop = int(m.group(1))
else:
lprint("ERROR: Can't get transmitted drop stats for OpenFlow "
"port {0} on brige \"{1}\"".
format(of_port, config.bridge_name))
sys.exit(-1)
m = re.search('\\s.*port *{}: rx pkts=([0-9?]*), .*'.format(of_port),
port_stats.output)
if m:
if '?' in m.group(1):
rx = int(0)
else:
rx = int(m.group(1))
else:
lprint("ERROR: Can't get received packet stats for OpenFlow "
"port {0} on brige \"{1}\"".
format(of_port, config.bridge_name))
sys.exit(-1)
m = re.search('\\s.*port *{}: rx pkts=.* '
'drop=([0-9?]*), .*'.format(of_port), port_stats.output)
if m:
if '?' in m.group(1):
rx_drop = int(0)
else:
rx_drop = int(m.group(1))
else:
lprint("ERROR: Can't get received drop stats for OpenFlow port {0} "
"on bridge \"{1}\"".format(of_port, config.bridge_name))
sys.exit(-1)
slogger.debug("OF port {0} stats: tx = {1}, tx_drop = {2}, "
"rx = {3}, rx_drop = {4}".format(of_port, tx, tx_drop,
rx, rx_drop))
return tx, tx_drop, rx, rx_drop
#
# Convert a MAC address string to an integer
#
def mac_2_int(mac_str):
return int(mac_str.replace(":", ""), 16)
#
# Check tester interface number string
#
def tester_interface_valid(interface):
if config.tester_type == 'xena':
xport = interface.split(',')
if len(xport) != 2:
return False
else:
xport = interface
for number in xport:
try:
if int(number) < 0:
return False
except ValueError:
return False
return True
#
# Create a single graph
#
def create_single_graph(x, y, x_label, y_label, title,
file_name, phy_speed, **kwargs):
cpu_util = kwargs.pop("cpu_utilization", None)
zero_loss_traffic_rate = kwargs.pop("zero_loss_traffic_rate", None)
zero_loss_loss_rate = kwargs.pop("zero_loss_loss_rate", None)
slogger.info("create_single_graph[{}], x = {} : y = {}".
format(title, x, y))
if cpu_util is None:
fig, pps = plt.subplots()
pps_plot = pps
else:
sub_plots = 4
fig, pps = plt.subplots(sub_plots)
pps_plot = pps[0]
pmd_plot = pps[1]
ovs_plot = pps[2]
sys_plot = pps[3]
fig.set_figwidth(sub_plots * fig.get_figwidth(), forward=True)
fig.set_figheight(sub_plots * fig.get_figheight(), forward=True)
#
# Main graph showing utilization
#
pps_plot.set_title(title)
pps_plot.set_xlabel(x_label)
pps_plot.set_ylabel(y_label)
pps_plot.grid(True)
pps_plot.autoscale(enable=True, axis='both', tight=False)
pps_plot.plot(x, y, 'o-', label='average')
pps_plot.ticklabel_format(axis='y', style='plain')
pps_plot.grid(b=True, which='minor', color='k', linestyle=':', alpha=0.2)
pps_plot.minorticks_on()
#
# Add second scaled graph showing line utilization
#
if phy_speed > 0 and zero_loss_traffic_rate is None:
util_y = list()
for i in range(0, len(x)):
util_y.append(eth_utilization(phy_speed,
x[i], y[i]))
util = pps_plot.twinx()
util.plot(x, util_y, '.:', color='r')
util.set_ylim(0, 100)
util.set_ylabel('Link Utilization in % ({} Gbit/s)'.
format(phy_speed / 1000000000), color='r')
util.tick_params('y', colors='r')
#
# Add second scaled graph showing zero loss traffic rate
#
if zero_loss_traffic_rate is not None:
util = pps_plot.twinx()
util.plot(x, zero_loss_traffic_rate, '.:', color='g')
if zero_loss_loss_rate is not None:
for i, x1 in enumerate(x):
#
# For tests where 0% packet loss is not met, mark it with a
# red square.
#
if zero_loss_loss_rate[i] > 0:
util.plot(x1, zero_loss_traffic_rate[i],
marker='s', color='r')
util.set_ylim(0, 100)
util.set_ylabel('Zero loss traffic rate in % ({} Gbit/s)'.
format(phy_speed / 1000000000), color='g')
util.tick_params('y', colors='g')
#
# Adding CPU utilization if requested
#
if cpu_util is not None:
other_y_values = list()
urcu_y_values = list()
handler_y_values = list()
revalidator_y_values = list()
pmd_y_values = list()
usr_y_values = list()
nice_y_values = list()
sys_y_values = list()
iowait_y_values = list()
irq_y_values = list()
soft_y_values = list()
steal_y_values = list()
guest_y_values = list()
gnice_y_values = list()
idle_y_values = list()
for i in range(0, len(x)):
pmd_y_values.append(cpu_util[i]['ovs_cpu_pmd'])
revalidator_y_values.append(cpu_util[i]['ovs_cpu_revalidator'])
handler_y_values.append(cpu_util[i]['ovs_cpu_handler'])
urcu_y_values.append(cpu_util[i]['ovs_cpu_urcu'])
other_y_values.append(cpu_util[i]['ovs_cpu_other'])
usr_y_values.append(cpu_util[i]['sys_usr']
- cpu_util[i]['ovs_cpu'])
nice_y_values.append(cpu_util[i]['sys_nice'])
sys_y_values.append(cpu_util[i]['sys_sys'])
iowait_y_values.append(cpu_util[i]['sys_iowait'])
irq_y_values.append(cpu_util[i]['sys_irq'])
soft_y_values.append(cpu_util[i]['sys_soft'])
steal_y_values.append(cpu_util[i]['sys_steal'])
guest_y_values.append(cpu_util[i]['sys_guest'])
gnice_y_values.append(cpu_util[i]['sys_gnice'])
idle_y_values.append(cpu_util[i]['sys_idle'])
total_util = cpu_util[0]['sys_usr'] + cpu_util[0]['sys_nice'] + \
cpu_util[0]['sys_sys'] + cpu_util[0]['sys_iowait'] + \
cpu_util[0]['sys_irq'] + cpu_util[0]['sys_soft'] + \
cpu_util[0]['sys_steal'] + cpu_util[0]['sys_guest'] + \
cpu_util[0]['sys_gnice'] + cpu_util[0]['sys_idle']
#
# Adding PMD CPU utilization
#
x_cpu = np.arange(len(x))
bar_width = 0.20
pmd_plot.bar(x_cpu, pmd_y_values, bar_width,
color="#1f77b4", edgecolor="none",
label="OVS PMD", align="edge", zorder=3)
pmd_plot.bar(x_cpu + bar_width, guest_y_values, bar_width,
color="#ff7f0e", edgecolor="none",
label="Guest", align="edge", zorder=3)
pmd_plot.set_title("Guest and Open vSwitch PMD CPU usage")
pmd_plot.set_xlim(0 - (2 * bar_width),
len(x_cpu) - 1 + (4 * bar_width))
pmd_plot.set_xticks(x_cpu + bar_width)
pmd_plot.set_xticklabels(x, ha='center')
pmd_plot.set_ylabel("CPU utilization")
pmd_plot.set_xlabel("Packet size")
pmd_plot.grid(b=True, which='major', axis='y')
pmd_plot.grid(b=True, which='minor', color='k', linestyle=':',
alpha=0.2, axis='y')
pmd_plot.minorticks_on()
pmd_plot.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#
# Adding OVS CPU utilization
#
y_ovs_values = [other_y_values, urcu_y_values, handler_y_values,
revalidator_y_values, pmd_y_values]
y_ovs_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd']
y_ovs_labels = ['other', 'urcu', 'handler',
'revalidator', 'pmd']
bottom = [0] * len(x)
for i in range(0, len(y_ovs_values) - 1):
ovs_plot.bar(x_cpu, y_ovs_values[i], bar_width,
color=y_ovs_colors[i], edgecolor=y_ovs_colors[i],
bottom=bottom, label=y_ovs_labels[i], align="center",
zorder=3)
bottom = [a + b for a, b in zip(bottom, y_ovs_values[i])]
ovs_plot.set_title("Open vSwitch CPU usage non PMD")
ovs_plot.set_xticks(x_cpu)
ovs_plot.set_xticklabels(x, ha='center')
ovs_plot.set_ylabel("CPU utilization")
ovs_plot.set_xlabel("Packet size")
ovs_plot.grid(b=True, which='major', axis='y')
ovs_plot.grid(b=True, which='minor', color='k', linestyle=':',
alpha=0.2, axis='y')
ovs_plot.minorticks_on()
ovs_plot.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#
# Adding System CPU utilization
#
y_cpu_values = [nice_y_values, sys_y_values, iowait_y_values,
irq_y_values, soft_y_values, steal_y_values,
gnice_y_values, usr_y_values, guest_y_values,
idle_y_values]
y_cpu_colors = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',
'#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5']
y_cpu_labels = ['nice', 'sys', 'iowait', 'irq', 'soft', 'steal',
'gnice', 'usr', 'guest', 'idle']
bottom = [0] * len(x)
for i in range(0, len(y_cpu_values) - 2):
sys_plot.bar(x_cpu, y_cpu_values[i], bar_width,
color=y_cpu_colors[i], edgecolor=y_cpu_colors[i],
bottom=bottom, label=y_cpu_labels[i], align="center",
zorder=3)
bottom = [a + b for a, b in zip(bottom, y_cpu_values[i])]
sys_plot.set_title("System CPU usage (max {:.0f}%)".format(total_util))
sys_plot.set_xticks(x_cpu)
sys_plot.set_xticklabels(x, ha='center')
sys_plot.set_ylabel("CPU utilization")
sys_plot.set_xlabel("Packet Size")
sys_plot.grid(b=True, which='major', axis='y')
sys_plot.grid(b=True, which='minor', color='k', linestyle=':',
alpha=0.2, axis='y')
sys_plot.minorticks_on()
sys_plot.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#
# Due to bug in matplotlib we need to disable some np errors
#
old_np_seterr = np.seterr(divide='ignore', invalid='ignore')
#
# Final tweaking
#
fig.tight_layout()
if cpu_util is not None:
box = pmd_plot.get_position()
pmd_plot.set_position([box.x0, box.y0, box.width * 0.9, box.height])
box = ovs_plot.get_position()
ovs_plot.set_position([box.x0, box.y0, box.width * 0.9, box.height])
box = sys_plot.get_position()
sys_plot.set_position([box.x0, box.y0, box.width * 0.9, box.height])
#
# Write picture
#
if file_name is not None and file_name != "":
plt.savefig(file_name + '.png')
#
# Show picture if requested, and clear the graph
#
if config.gui:
plt.show()
plt.close()
np.seterr(**old_np_seterr)
#
# Single graph with multiple results
#
def create_multiple_graph(x, y, x_label, y_label,
title, file_name, phy_speed, **kwargs):
fixed_packet_size = kwargs.pop("fixed_packet_size", None)
cpu_util = kwargs.pop("cpu_utilization", None)
show_idle_cpu = kwargs.pop("show_cpu_idle", True)
slogger.info("create_multiple_graph[{}], x = {} : y = {}".
format(title, x, y))
if cpu_util is None:
fig, pps = plt.subplots()
pps_plot = pps
else:
fig = plt.figure()
#
# This split looked nice, until we used all packets sizes,
# and multiple flows
#
# pps_plot = plt.subplot2grid((2, 2), (0, 0), colspan=2)
# cpu_plot = plt.subplot2grid((2, 2), (1, 0))
# sys_plot = plt.subplot2grid((2, 2), (1, 1))
# fig.set_figwidth(2 * fig.get_figwidth(), forward = True)
# fig.set_figheight(2 * fig.get_figheight(), forward = True)
pps_plot = plt.subplot2grid((3, 2), (0, 0), colspan=2)
cpu_plot = plt.subplot2grid((3, 2), (1, 0), colspan=2)
sys_plot = plt.subplot2grid((3, 2), (2, 0), colspan=2)
fig.set_figwidth(2 * fig.get_figwidth(), forward=True)
fig.set_figheight(3 * fig.get_figheight(), forward=True)
#
# Main graph showing utilization
#
pps_plot.set_title(title)
pps_plot.set_xlabel(x_label)
pps_plot.set_ylabel(y_label)
pps_plot.grid(True)
pps_plot.autoscale(enable=True, axis='both', tight=False)
pps_plot.ticklabel_format(axis='y', style='plain')
pps_plot.grid(b=True, which='minor', color='k', linestyle=':', alpha=0.2)
pps_plot.minorticks_on()
for y_run in natsorted(list(y.keys())):
pps_plot.plot(x, y[y_run], 'o-', label="{}".format(y_run))
#
# Add maximum PPS for the given physical speed
#
if phy_speed is not None:
for speed in phy_speed:
y_values = list()
for x_val in x:
if fixed_packet_size is None:
y_values.append(eth_max_pps(speed, x_val))
else:
y_values.append(eth_max_pps(speed, fixed_packet_size))
pps_plot.plot(x, y_values, '.:', label="Max PPS {}G".
format(speed / 1000000000))
pps_plot.legend(loc='upper right', shadow=True)
#
# Add CPU util information if given
#
if cpu_util is not None:
#
# OVS CPU utilization
#
x_cpu = np.arange(len(x))
bar_width = 0.11
cpu_plot.set_title("Open vSwitch CPU utilization")
other_y_values = dict(list(zip(list(
cpu_util.keys()), [[] for i in range(len(cpu_util))])))
urcu_y_values = dict(list(zip(list(
cpu_util.keys()), [[] for i in range(len(cpu_util))])))
handler_y_values = dict(list(zip(list(
cpu_util.keys()), [[] for i in range(len(cpu_util))])))
revalidator_y_values = dict(list(zip(list(
cpu_util.keys()), [[] for i in range(len(cpu_util))])))
pmd_y_values = dict(list(zip(list(
cpu_util.keys()), [[] for i in range(len(cpu_util))])))
for i in range(0, len(x)):
for key in list(cpu_util.keys()):
pmd_y_values[key].append(
cpu_util[key][i]['ovs_cpu_pmd'])
revalidator_y_values[key].append(
cpu_util[key][i]['ovs_cpu_revalidator'])
handler_y_values[key].append(
cpu_util[key][i]['ovs_cpu_handler'])
urcu_y_values[key].append(
cpu_util[key][i]['ovs_cpu_urcu'])
other_y_values[key].append(
cpu_util[key][i]['ovs_cpu_other'])
y_ovs_values = [other_y_values, urcu_y_values, handler_y_values,
revalidator_y_values, pmd_y_values]
y_ovs_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd']
y_ovs_labels = ['other', 'urcu', 'handler',
'revalidator', 'pmd']
if len(cpu_util) % 2 != 0:
align = 'center'
else:
align = 'edge'
for i, key in enumerate(natsorted(list(cpu_util.keys()))):
x_pos = (x_cpu - (len(cpu_util) / 2 * bar_width)) + (i * bar_width)
bottom = [0] * len(x)
for j in range(0, len(y_ovs_values)):
cpu_plot.bar(x_pos, y_ovs_values[j][key], bar_width,
align=align, color=y_ovs_colors[j],
label=y_ovs_labels[j] if i == 0 else "",
bottom=bottom, zorder=3,
linewidth=1, edgecolor=(1, 1, 1, 0.2))
bottom = [a + b for a, b in zip(bottom, y_ovs_values[j][key])]
cpu_plot.set_xlim(0 - (len(cpu_util) * bar_width),
len(x_cpu) - 1 + (len(cpu_util) * bar_width))
cpu_plot.set_xticks(x_cpu)
cpu_plot.set_xticklabels(x, ha='center')
cpu_plot.set_ylabel("CPU utilization")
cpu_plot.set_xlabel(x_label)
cpu_plot.grid(b=True, which='major')
cpu_plot.grid(b=True, which='minor', color='k', linestyle=':',
alpha=0.2)
cpu_plot.minorticks_on()
handles, labels = cpu_plot.get_legend_handles_labels()
cpu_plot.legend(list(reversed(handles)),
list(reversed(labels)),
loc='center left', bbox_to_anchor=(1, 0.5))
#
# System CPU utilization
#
sys_plot.set_title("Total System CPU utilization")
usr_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
nice_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
sys_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
iowait_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
irq_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
soft_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
steal_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
guest_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
gnice_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
idle_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
y_cpu_values = [usr_y_values, nice_y_values, sys_y_values,
iowait_y_values, irq_y_values, soft_y_values,
steal_y_values, guest_y_values, gnice_y_values,
idle_y_values]
y_cpu_labels = ['usr', 'nice', 'sys', 'iowait', 'irq',
'soft', 'steal', 'guest', 'gnice', 'idle']
y_cpu_keys = ['sys_usr', 'sys_nice', 'sys_sys', 'sys_iowait',
'sys_irq', 'sys_soft', 'sys_steal', 'sys_guest',
'sys_gnice', 'sys_idle']
y_cpu_colors = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',
'#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5']
for i in range(0, len(x)):
for key in list(cpu_util.keys()):
for j, y_cpu_value in enumerate(y_cpu_values):
y_cpu_value[key].append(cpu_util[key][i][y_cpu_keys[j]])
if len(cpu_util) % 2 != 0:
align = 'center'
else:
align = 'edge'
for i, key in enumerate(natsorted(list(cpu_util.keys()))):
x_pos = (x_cpu - (len(cpu_util) / 2 * bar_width)) + (i * bar_width)
bottom = [0] * len(x)
for j in range(0, len(y_cpu_values) - (1, 0)[show_idle_cpu]):
sys_plot.bar(x_pos, y_cpu_values[j][key], bar_width,
align=align, color=y_cpu_colors[j],
label=y_cpu_labels[j] if i == 0 else "",
bottom=bottom, zorder=3,
linewidth=1, edgecolor=(1, 1, 1, 0.2))
bottom = [a + b for a, b in zip(bottom, y_cpu_values[j][key])]
sys_plot.set_xlim(0 - (len(cpu_util) * bar_width),
len(x_cpu) - 1 + (len(cpu_util) * bar_width))
sys_plot.set_xticks(x_cpu)
sys_plot.set_xticklabels(x, ha='center')
sys_plot.set_ylabel("CPU utilization")
sys_plot.set_xlabel(x_label)
sys_plot.grid(b=True, which='major')
sys_plot.grid(b=True, which='minor', color='k', linestyle=':',
alpha=0.2)
sys_plot.minorticks_on()
handles, labels = sys_plot.get_legend_handles_labels()
sys_plot.legend(list(reversed(handles)),
list(reversed(labels)),
loc='center left', bbox_to_anchor=(1, 0.5))
#
# Due to bug in matplotlib we need to disable some np errors
#
old_np_seterr = np.seterr(divide='ignore', invalid='ignore')
#
# Final tweaking
#
fig.tight_layout()
if cpu_util is not None:
box = cpu_plot.get_position()
cpu_plot.set_position([box.x0, box.y0, box.width * 0.89, box.height])
box = sys_plot.get_position()
sys_plot.set_position([box.x0, box.y0, box.width * 0.89, box.height])
#
# Write picture
#
if file_name is not None and file_name != "":
plt.savefig(file_name + '.png')
#
# Show picture if requested, and clear the graph
#
if config.gui:
plt.show()
plt.close()
np.seterr(**old_np_seterr)
#
# Try to get phy speed from physical port
#
def get_physical_port_speed():
speed = 10000000000
result = dut_shell.dut_exec("ethtool {}".format(config.physical_interface))
m = re.search('\\s*Speed: ([0-9]*)Mb.*', result.output)
if m:
speed = int(m.group(1)) * 1000000
else:
slogger.info("Can't determine physical interface \"{0}\" its speed!".
format(config.physical_interface))
slogger.info("Set physical interface \"{0}\" speed to {1} bits/second".
format(config.physical_interface, speed))
return speed
#
# Calculate wire utilization based on packet size and packets per seconds
#
# Packet size = 12 bytes IFG +
# 8 bytes preamble +
# x bytes packet +
# 4 bytes CRC
#
def eth_utilization(line_speed_bps, packet_size, packets_per_second):
packet_size_bits = (12 + 8 + packet_size + 4) * 8
packet_speed_second = packet_size_bits * packets_per_second
util = int(float(packet_speed_second) / line_speed_bps * 100)
if util > 100:
util = 100
return util
#
# Calculate max packets per second base on packet size and wire speed
#
def eth_max_pps(line_speed_bps, packet_size):
packet_size_bits = (12 + 8 + packet_size + 4) * 8
return line_speed_bps / packet_size_bits
#
# Print results in CSV
#
def csv_write_test_results(csv_handle, test_name, flow_size_list,
packet_size_list, test_results, cpu_results,
**kwargs):
loss_rate = kwargs.pop("loss_rate", None)
traffic_rate = kwargs.pop("traffic_rate", None)
if config.flow_type == 'L2':
flow_type = ", L2 flows"
elif config.flow_type == 'L3':
flow_type = ", L3 flows"
elif config.flow_type == 'L4-UDP':
flow_type = ", L4-udp flows"
else:
raise ValueError("No support for this protocol!!")
if config.flow_rule_type == 'NORMAL':
flow_type += "[NORMAL]"
elif config.flow_rule_type == 'port':
flow_type += "[port redirect]"
csv_handle.writerow([test_name + flow_type])
if len(test_results) > 0:
csv_handle.writerow(['', 'Packet size'])
if loss_rate is not None or traffic_rate is not None:
lables = ['Receive rate']
packet_size_lables = []
l1 = []
if traffic_rate is not None:
lables.append('Traffic rate')
l1.append('')
if loss_rate is not None:
lables.append('Loss rate')
l1.append('')
for pkt in packet_size_list:
packet_size_lables.append(pkt)
packet_size_lables.extend(l1)
csv_handle.writerow(['Number of flows'] + packet_size_lables)
csv_handle.writerow([''] + lables * len(packet_size_list))
else:
csv_handle.writerow(['Number of flows'] + packet_size_list)
for flow in flow_size_list:
results = [flow]
for i in range(0, len(packet_size_list)):
results.append(test_results[flow][i])
if traffic_rate is not None:
results.append(traffic_rate[flow][i])
if loss_rate is not None:
results.append(loss_rate[flow][i])
csv_handle.writerow(results)
results = ["cpu_{}".format(flow)]
for i in range(0, len(packet_size_list)):
results.append(cpu_results[flow][i])
csv_handle.writerow(results)
for i in range(0, 4):
csv_handle.writerow([])
#
# Check a string of list entries, and make sure they are valid number,
# and are in order.
#
def check_list(list_string, min_val, max_val):
last_entry = 0
list = list_string.split(',')
if len(list) == 0:
return False
for entry in list:
try:
value = int(entry)
except ValueError:
return False
if value < min_val or value > max_val or last_entry >= value:
return False
last_entry = value
return True
#
# Check the string to be a valid PCI address in the format "0000:02:00.0".
# In addition we also allow the ",txq_inline=" option needed for some vendors,
# as a workaround for L3 forwarding to work.
#
def check_pci_address_string(pci_address):
if pci_address is None:
return False
if re.match(r"^\d{4}:\d{2}:[0-9A-Fa-f]{2}\.\d{1}$",
pci_address) is None and \
re.match(r"^\d{4}:\d{2}:[0-9A-Fa-f]{2}\.\d{1},txq_inline=\d+$",
pci_address) is None:
return False
return True
#
# Mimic the normal print command, but also send the same output
# put on the console to the log file. But only if the log file option
# is enabled else we end up with the same text on the console twice.
#
def lprint(msg):
print(msg)
if config.logging is not None:
slogger.info(msg)
#
# Start Perf recording on DUT
#
def start_perf_recording(test_name):
if not config.perf:
return
perf_path = "/root/ovs_test_perf_data/run_{}".format(run_start_time)
perf_file = "{}/{}.perf".format(perf_path, test_name)
cmd = r"mkdir -p {0}; " \
r"nohup perf record -o '{1}' -g -p `pidof ovs-vswitchd` " \
r"&> /dev/null &".format(perf_path, perf_file)
lprint(" * Start perf recording on DUT ({})...".format(perf_file))
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
#
# Stop Perf recording on DUT
#
def stop_perf_recording():
if not config.perf:
return
lprint(" * Stop perf recording on DUT...")
cmd = r"kill -s INT `pidof perf`"
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
#
# Start CPU monitoring on DUT
#
def start_cpu_monitoring():
#
# pidstat -u -t -p `pidof ovs-vswitchd`,`pidof ovsdb-server` 1
# PIDSTAT for all qemu?
# mpstat -P ALL 1
# kill -SIGINT `pidof pidstat`
cmd = r"rm -f /var/tmp/cpu_ovs.txt /var/tmp/cpu_mpstat.txt; " \
r"nohup pidstat -u -t -p `pidof ovs-vswitchd`,"\
r"`pidof ovsdb-server` 1 > /var/tmp/cpu_ovs.txt 2> /dev/null & " \
r"nohup mpstat -P ALL 1 > /var/tmp/cpu_mpstat.txt 2> /dev/null &"
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
#
# Stop CPU monitoring on DUT
#
def stop_cpu_monitoring(**kwargs):
die = kwargs.pop("die", True)
cmd = r"kill -s INT `pidof pidstat`; " \
r"kill -s INT `pidof mpstat`"
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=die)
#
# Get CPU monitoring stats
#
def get_cpu_monitoring_stats():
cmd = r"cat /var/tmp/cpu_ovs.txt"
results = dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd],
die_on_error=True)
ovs_cpu_pmd = float(0)
ovs_cpu_revalidator = float(0)
ovs_cpu_handler = float(0)
ovs_cpu_urcu = float(0)
ovs_cpu_other = float(0)
if "%guest %wait %CPU" in results.stdout_output:
# Average: 988 - 16979 0.00 0.00 0.00 0.00 0.00 - |__ovs-vswitchd # noqa: E501
regex = re.compile("^Average:\\s+[0-9]+\\s+-\\s+[0-9]+\\s+[0-9\\.]+\\s+[0-9\\.]+\\s+[0-9\\.]+\\s+[0-9\\.]+\\s+([0-9\\.]+).+__(.+)", re.MULTILINE) # noqa: E501
else:
# Average: 0 - 6982 0.00 0.05 0.00 0.05 - |__ovs-vswitchd # noqa: E501
regex = re.compile("^Average:\\s+[0-9]+\\s+-\\s+[0-9]+\\s+[0-9\\.]+\\s+[0-9\\.]+\\s+[0-9\\.]+\\s+([0-9\\.]+).+__(.+)", re.MULTILINE) # noqa: E501
for match in regex.finditer(results.stdout_output):
if match.group(2).startswith("pmd"):
ovs_cpu_pmd += float(match.group(1))
elif match.group(2).startswith("revalidator"):
ovs_cpu_revalidator += float(match.group(1))
elif match.group(2).startswith("handler"):
ovs_cpu_handler += float(match.group(1))
elif match.group(2).startswith("urcu"):
ovs_cpu_urcu += float(match.group(1))
else:
ovs_cpu_other += float(match.group(1))
cmd = r"cat /var/tmp/cpu_mpstat.txt"
results = dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd],
die_on_error=True)
cpu_usr = float(0)
cpu_nice = float(0)
cpu_sys = float(0)
cpu_iowait = float(0)
cpu_irq = float(0)
cpu_soft = float(0)
cpu_steal = float(0)
cpu_guest = float(0)
cpu_gnice = float(0)
cpu_idle = float(0)
# %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle # noqa: E501
regex = re.compile("^Average:\\s+[0-9]+\\s+([0-9\\.]+)\\s+([0-9\\.]+)\\s+([0-9\\.]+)\\s+([0-9\\.]+)\\s+([0-9\\.]+)\\s+([0-9\\.]+)\\s+([0-9\\.]+)\\s+([0-9\\.]+)\\s+([0-9\\.]+)\\s+([0-9\\.]+)$", # noqa: E501
re.MULTILINE)
for match in regex.finditer(results.stdout_output):
cpu_usr += float(match.group(1))
cpu_nice += float(match.group(2))
cpu_sys += float(match.group(3))
cpu_iowait += float(match.group(4))
cpu_irq += float(match.group(5))
cpu_soft += float(match.group(6))
cpu_steal += float(match.group(7))
cpu_guest += float(match.group(8))
cpu_gnice += float(match.group(9))
cpu_idle += float(match.group(10))
cpu_total = int(cpu_usr + cpu_nice + cpu_sys + cpu_iowait
+ cpu_irq + cpu_soft + cpu_steal + cpu_guest
+ cpu_gnice + cpu_idle)
ovs_cpu_total = ovs_cpu_pmd + ovs_cpu_revalidator + ovs_cpu_handler + \
ovs_cpu_urcu + ovs_cpu_other
cpu_results = dict([('ovs_cpu', ovs_cpu_total),
('ovs_cpu_pmd', ovs_cpu_pmd),
('ovs_cpu_revalidator', ovs_cpu_revalidator),
('ovs_cpu_handler', ovs_cpu_handler),
('ovs_cpu_urcu', ovs_cpu_urcu),
('ovs_cpu_other', ovs_cpu_other),
('sys_usr', cpu_usr),
('sys_nice', cpu_nice),
('sys_sys', cpu_sys),
('sys_iowait', cpu_iowait),
('sys_irq', cpu_irq),
('sys_soft', cpu_soft),
('sys_steal', cpu_steal),
('sys_guest', cpu_guest),
('sys_gnice', cpu_gnice),
('sys_idle', cpu_idle),
('sys_total', cpu_total)])
slogger.debug("CPU results: {}".format(cpu_results))
return cpu_results
#
# Get ovs version
#
def get_ovs_version():
result = dut_shell.dut_exec('sh -c "ovs-vswitchd --version"',
die_on_error=True)
m = re.search('.*([0-9]+.[0-9]+.[0-9]+).*',
str(result.output))
if m:
return Version(str(m.group(1)))
lprint("ERROR: Can't figure out ovs-vswitchd's version!")
sys.exit(-1)
def create_testpmd_link_if_dpdk_new(vm):
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"command -v testpmd". \
format(vm, config.dut_vm_user, config.dut_vm_password)
result = dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd],
die_on_error=False)
m = re.search('testpmd', result.output)
if not m:
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"ln -s /usr/bin/dpdk-testpmd /usr/bin/testpmd". \
format(vm, config.dut_vm_user, config.dut_vm_password)
result = dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd],
die_on_error=False)
#
# Get VM DPDK version
#
def get_vm_dpdk_version(vm):
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"testpmd -v". \
format(vm, config.dut_vm_user, config.dut_vm_password)
result = dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd],
die_on_error=False)
m = re.search('DPDK ([0-9]+\\.[0-9]+\\.[0-9]+)',
result.output)
if m:
return Version(str(m.group(1)))
lprint("ERROR: Can't figure out VMs DPDK version!")
sys.exit(-1)
#
# Get ovs data path type
#
def get_ovs_datapath():
result = dut_shell.dut_exec('sh -c "ovs-appctl dpif/show"',
die_on_error=True)
output = result.output.replace("\n", "")
m = re.search('(.+@.*{}):.*'.format(config.bridge_name),
output)
if m:
m = re.search('(.+)@.*', m.group(1))
return m.group(1)
lprint("ERROR: Can't figure out ovs datapath!")
sys.exit(-1)
#
# Check if TC is enabled
#
def get_tc_state():
result = dut_shell.dut_exec(
'sh -c "ovs-vsctl get Open_vSwitch . other_config:hw-offload"',
die_on_error=False)
output = result.output.replace("\n", "")
if output == '"true"':
return True
return False
#
# Get bridge MAC address
#
def get_of_bridge_mac_address(bridge):
command = 'sh -c "ovs-ofctl show {0}"'.format(bridge)
result = dut_shell.dut_exec(command, die_on_error=True)
m = re.search('\\s*LOCAL\\({0}\\): addr:(.*)'.format(bridge),
result.output)
if not m:
lprint("ERROR: Can't figure out MAC address for bridge \"{}\"".
format(bridge))
sys.exit(-1)
slogger.debug("MAC address for bridge \"{}\" is {}".format(bridge,
m.group(1)))
return m.group(1)
#
# Flow (rule) type definitions
#
flow_types = ['L2', 'L3', 'L4-UDP']
flow_rule_types = ['flows', 'NORMAL', 'port', 'none']
def get_flow_type_short():
labels = dict(list(zip(flow_types,
['L2', 'L3', 'L4-UDP'])))
return labels[config.flow_type]
def get_flow_type_name():
labels = dict(list(zip(flow_types,
['l2', 'l3', 'l4_udp'])))
return labels[config.flow_type]
def get_traffic_generator_flow():
flow_type = dict(list(zip(flow_types,
[TrafficFlowType.l2_mac,
TrafficFlowType.l3_ipv4,
TrafficFlowType.l4_udp])))
return flow_type[config.flow_type]
#
# Get traffic rate string if not 100%
#
def get_traffic_rate_str():
if config.traffic_rate < 100:
return ", traffic rate {:.3f}%".format(config.traffic_rate)
return ""
#
# Traffic tester type definitions
#
traffic_tester_types = ['xena', 'trex']
def get_traffic_generator_type():
traffic_generator_type = dict(list(zip(traffic_tester_types,
[TrafficGeneratorType.xena,
TrafficGeneratorType.trex])))
return traffic_generator_type[config.tester_type]
#
# Convert zero packet test result dictionary to cvs/graph dictionary list
#
def get_result_sets_from_zero_loss_results(results):
test_results = dict()
cpu_results = dict()
traffic_rate_results = dict()
loss_rate_results = dict()
for nr_of_flows, per_pkt_results in results.items():
test_results[nr_of_flows] = list()
cpu_results[nr_of_flows] = list()
traffic_rate_results[nr_of_flows] = list()
loss_rate_results[nr_of_flows] = list()
for pkt_size in natsorted(list(per_pkt_results.keys())):
test_results[nr_of_flows].append(
per_pkt_results[pkt_size]["rx_packets_second"])
cpu_results[nr_of_flows].append(
per_pkt_results[pkt_size]["cpu_stats"])
traffic_rate_results[nr_of_flows].append(
per_pkt_results[pkt_size]["traffic_rate"])
loss_rate_results[nr_of_flows].append(
calc_loss_percentage(per_pkt_results[pkt_size]))
return test_results, cpu_results, traffic_rate_results, loss_rate_results
#
# is_vm_needed_for_tests()
#
def is_vm_needed_for_tests():
if not config.skip_vv_test or not config.skip_pv_test or \
not config.skip_pvp_test or config.run_pvp_zero_loss_test:
return True
return False
#
# main()
#
def main():
#
# Not the best way to share all of this, but will work for this
# small test script
#
global config
global plt
global dut_shell
global slogger
global of_interfaces
global ovs_data_path
global ovs_tc_enabled
global dp_interfaces
global tester
global phy_speed
global ovs_version
global vm_dpdk_version
global run_start_time
run_start_time = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
#
# Command line argument parsing
#
parser = argparse.ArgumentParser()
parser.add_argument("--bridge-name", metavar="BRIDGE",
help="Bridge name to use for testing", type=str,
default=DEFAULT_BRIDGE_NAME)
parser.add_argument("-d", "--debug",
help="Enable debugging", action="store_true")
parser.add_argument("--debug-dut-shell",
help="Enable DUT shell debugging", action="store_true")
parser.add_argument("--debug-scapy",
help="Enable scapy debugging", action="store_true")
parser.add_argument("--debug-script",
help="Enable script debugging", action="store_true")
parser.add_argument("--debug-tester",
help="Enable tester debugging", action="store_true")
parser.add_argument("--pmd-rxq-affinity", metavar="AFINITY",
help="Set pmd-rxq-affinity when script configures "
"bridges", type=str)
parser.add_argument("--dut-vm-address", metavar="ADDRESS",
help="IP address of VM running on OpenVSwitch DUT",
type=str, default=DEFAULT_DUT_VM_ADDRESS)
parser.add_argument("--dut-vm-nic-pci", metavar="PCI",
help="PCI address of VMs virtual NIC", type=str,
default=DEFAULT_DUT_VM_NIC_PCI_ADDRESS)
parser.add_argument("--dut-vm-user", metavar="USER",
help="User name of VM running on OpenVSwitch DUT",
type=str, default=DEFAULT_DUT_VM_LOGIN_USER)
parser.add_argument("--dut-vm-password", metavar="PASSWORD",
help="User name of VM running on OpenVSwitch DUT",
type=str, default=DEFAULT_DUT_VM_LOGIN_PASSWORD)
parser.add_argument("--dut-vm-nic-queues", metavar="QUEUES",
help="Number of VM nic queues (and cores) to "
"allocate, default 1", type=int, default=1)
parser.add_argument("--dut-vm-nic-rxd", metavar="DESCRIPTORS",
help="Number of VM nic receive descriptors, "
"default 4096", type=int, default=4096)
parser.add_argument("--dut-vm-nic-txd", metavar="DESCRIPTORS",
help="Number of VM nic transmit descriptors, "
"default 1024", type=int, default=1024)
# Removed VV test for now, as it needs non-upstream trafgen tool
# parser.add_argument("--dut-second-vm-address", metavar="ADDRESS",
# help="IP address of second VM running on "
# "OpenVSwitch DUT", type=str,
# default=DEFAULT_DUT_SECOND_VM_ADDRESS)
# parser.add_argument("--dut-second-vm-nic-pci", metavar="PCI",
# help="PCI address of VMs virtual NIC", type=str,
# default=DEFAULT_DUT_VM_NIC_PCI_ADDRESS)
parser.add_argument("--flow-rule-type",
help="Flow rules programmed, default flows",
choices=flow_rule_types, default='flows')
parser.add_argument("--flow-type",
help="Flow type used for the tests, default L3",
choices=flow_types, default='L3')
parser.add_argument("-g", "--gui",
help="Show graph GUI", action="store_true")
parser.add_argument("--no-bridge-config",
help="Do not configure OVS", action="store_true")
parser.add_argument("-o", "--ovs-address", metavar="ADDRESS",
help="IP address of OpenVSwitch DUT", type=str,
default=DEFAULT_DUT_ADDRESS)
parser.add_argument("--ovs-user", metavar="USER",
help="User name of OpenVSwitch DUT", type=str,
default=DEFAULT_DUT_LOGIN_USER)
parser.add_argument("--ovs-password", metavar="PASSWORD",
help="User name of OpenVSwitch DUT", type=str,
default=DEFAULT_DUT_LOGIN_PASSWORD)
parser.add_argument("-p", "--physical-interface", metavar="DEVICE",
help="Physical interface", type=str,
default=DEFAULT_PHYSICAL_INTERFACE)
parser.add_argument("--payload-packet-random",
help="Generate per packet random payload data "
"instead of the default incremental bytes",
action="store_true")
parser.add_argument("--perf",
help="Enable perf profiling", action="store_true")
parser.add_argument("--physical-interface-pci", metavar="PCI",
help="Physical interface's PCI address", type=str)
parser.add_argument("--second-physical-interface", metavar="DEVICE",
help="Second Physical interface", type=str,
default=DEFAULT_SECOND_PHYSICAL_INTERFACE)
parser.add_argument("--second-physical-interface-pci", metavar="PCI",
help="Second Physical interface", type=str)
parser.add_argument("--physical-speed", metavar="GBPS",
help="Physical interface speed in Gbit/s", type=int,
default=0)
parser.add_argument("--packet-list", metavar="LIST",
help="List of packet sizes to test", type=str,
default=DEFAULT_PACKET_LIST)
parser.add_argument("-r", "--run-time", metavar="SECONDS",
help="Traffic run time per test", type=int,
default=DEFAULT_RUN_TIME)
parser.add_argument("--run-pp-test",
help="Run the P to P test", action="store_true")
parser.add_argument("--run-p-test",
help="Run the port loopback test", action="store_true")
parser.add_argument("--run-p-zero-loss-test",
help="Run the P loopack test with zero packet loss",
action="store_true")
parser.add_argument("--run-pvp-zero-loss-test",
help="Run the P to V to P test with zero packet loss",
action="store_true")
# Disable VXLAN for now due to it being incomplete
# parser.add_argument("--run-vxlan-test",
# help="Run the VXLAN tunnel test", action="store_true")
parser.add_argument("--skip-pv-test",
help="Do not run the P to V test", action="store_true")
parser.add_argument("--skip-pvp-test",
help="Do not run the P to V to P test",
action="store_true")
# Removed VV test for now, as it needs non-upstream trafgen tool
# parser.add_argument("--skip-vv-test",
# help="Do not run the V to V test",
# action="store_true")
parser.add_argument("--stream-list", metavar="LIST",
help="List of stream sizes to test", type=str,
default=DEFAULT_STREAM_LIST)
parser.add_argument("--testpmd-startup-delay", metavar="SECONDS",
help="Time to wait before testpmd is ready to forward,"
" 0 = auto (waits for CPU > 150%%), default 0",
type=int, default=0)
parser.add_argument("--traffic-rate", metavar="PERCENTAGE",
help="Traffic rate sent by tester, default 100%%",
type=float, default=100)
parser.add_argument("--warm-up",
help="Do flow warm-up round before tests",
action="store_true")
parser.add_argument("--warm-up-timeout", metavar="SECONDS",
help="Warm up timeout", type=int,
default=DEFAULT_WARM_UP_TIMEOUT)
parser.add_argument("--warm-up-no-fail",
help="Continue running the test even if warm up "
"times out", action="store_true")
parser.add_argument("--no-cool-down",
help="Do not wait for datapath flows to be cleared",
action="store_true")
parser.add_argument("-v", "--virtual-interface", metavar="DEVICE",
help="Virtual interface", type=str,
default=DEFAULT_VIRTUAL_INTERFACE)
# Removed VV test for now, as it needs non-upstream trafgen tool
# parser.add_argument("-w", "--second-virtual-interface", metavar="DEVICE",
# help="Virtual interface for second VM", type=str,
# default=DEFAULT_SECOND_VIRTUAL_INTERFACE)
parser.add_argument("-x", "--tester-address", metavar="ADDRESS",
help="IP address of network tester", type=str,
default=DEFAULT_TESTER_SERVER_ADDRESS)
parser.add_argument("--tester-type",
help="Traffic tester type to use, default \"xena\"",
choices=traffic_tester_types,
default=DEFAULT_TESTER_TYPE)
parser.add_argument("-i", "--tester-interface", metavar="{MOD,}PORT",
help="Tester interface", type=str,
default=DEFAULT_TESTER_INTERFACE)
parser.add_argument("--second-tester-interface", metavar="{MOD,}PORT",
help="Second tester interface", type=str,
default=DEFAULT_SECOND_TESTER_INTERFACE)
parser.add_argument("-l", "--logging", metavar="FILE",
help="Redirecting log output to file", type=str)
parser.add_argument("--dst-mac-address",
help="Destination Base MAC address",
type=str, default=DEFAULT_DST_MAC_ADDRESS)
parser.add_argument("--src-mac-address",
help="Source Base MAC address",
type=str, default=DEFAULT_SRC_MAC_ADDRESS)
parser.add_argument("--mac-swap",
help="Swap source/destination mac at VM",
action="store_true")
parser.add_argument("--zero-loss-step", metavar="PERCENTAGE",
help="Zero loss interval steps, default 1%%",
type=float, default=1)
config = parser.parse_args()
#
# Removed VV test for now, as it needs non-upstream trafgen tool
#
config.skip_vv_test = True
config.dut_second_vm_address = DEFAULT_DUT_SECOND_VM_ADDRESS
config.dut_second_vm_nic_pci = DEFAULT_DUT_VM_NIC_PCI_ADDRESS
config.second_virtual_interface = DEFAULT_SECOND_VIRTUAL_INTERFACE
#
# Disable VXLAN for now due to it being incomplete
#
config.run_vxlan_test = False
#
# Setting up the logger
#
logging.basicConfig(
format='%(asctime)s[%(levelname)-8.8s][%(name)s]: %(message)s',
datefmt='%H:%M:%S',
level=logging.ERROR,
filename=config.logging)
slogger = logging.getLogger('script')
slogger.setLevel(logging.INFO)
slogger.info("*" * 69)
slogger.info("** Starting \"%s\"", os.path.basename(__file__))
slogger.info("*" * 69)
#
# Check some input parameters
#
if config.ovs_address == '':
lprint(
"ERROR: You must supply the OVS host address to use for testing!")
sys.exit(-1)
if is_vm_needed_for_tests() and config.dut_vm_address == '':
lprint("ERROR: You must supply the DUT VM host address to use for "
"testing!")
sys.exit(-1)
if config.dst_mac_address == '':
lprint("ERROR: You must supply a Destination Base MAC Address")
sys.exit(-1)
if config.src_mac_address == '':
lprint("ERROR: You must supply a Source Base MAC Address")
sys.exit(-1)
if config.flow_type == 'L2':
if (int(config.src_mac_address.replace(":", ""), 16) & 0xffffff) \
!= 0:
lprint("ERROR: For L2 tests the Source Base MAC address must "
"be xx:xx:xx:00:00:00")
sys.exit(-1)
if (int(config.dst_mac_address.replace(":", ""), 16) & 0xffffff) \
!= 0:
lprint("ERROR: For L2 tests the Destination Base MAC address must "
"be xx:xx:xx:00:00:00")
sys.exit(-1)
if is_vm_needed_for_tests() and \
not check_pci_address_string(config.dut_vm_nic_pci):
lprint("ERROR: You must supply a valid PCI address for the VMs NIC!")
sys.exit(-1)
if not config.skip_vv_test and config.second_virtual_interface == '':
lprint("ERROR: You must supply a second virtual interface to use for "
"testing!")
sys.exit(-1)
if not config.skip_vv_test and config.dut_second_vm_address == '':
lprint("ERROR: You must supply the second DUT VM address!")
sys.exit(-1)
if not config.skip_vv_test and \
not check_pci_address_string(config.dut_second_vm_nic_pci):
lprint("ERROR: You must supply a valid PCI address for the second "
"VMs NIC!")
sys.exit(-1)
if config.dut_second_vm_address != '' and config.dut_vm_nic_pci == '':
lprint(
"ERROR: You must supply the second DUT VM host's NIC PCI address!")
sys.exit(-1)
if config.physical_interface == '':
lprint("ERROR: You must supply the physical interface to use for "
"testing!")
sys.exit(-1)
if config.run_pp_test and config.second_physical_interface == '':
lprint("ERROR: You must supply the second physical interface to use "
"for testing!")
sys.exit(-1)
if is_vm_needed_for_tests() and config.virtual_interface == '':
lprint(
"ERROR: You must supply the virtual interface to use for testing!")
sys.exit(-1)
if config.tester_address == '':
lprint(
"ERROR: You must supply the tester's address to use for testing!")
sys.exit(-1)
if config.tester_interface == '':
lprint("ERROR: You must supply the tester's interface to use for "
"testing!")
sys.exit(-1)
if config.run_pp_test and config.second_tester_interface == '':
lprint("ERROR: You must supply the second tester's interface to use "
"for testing!")
sys.exit(-1)
if not tester_interface_valid(config.tester_interface):
lprint("ERROR: Invalid tester interface configuration!")
sys.exit(-1)
if config.second_tester_interface != '' and \
not tester_interface_valid(config.second_tester_interface):
lprint("ERROR: Invalid second tester interface configuration!")
sys.exit(-1)
if not check_list(config.stream_list, 1, 1000000):
lprint("ERROR: Invalid stream list, \"{}\", supplied!".format(
config.stream_list))
sys.exit(-1)
if config.flow_type == 'L4-UDP' and not \
check_list(config.stream_list, 1, 65535):
lprint("ERROR: Invalid stream list, \"{}\", supplied for L4 flows!".
format(config.stream_list))
sys.exit(-1)
if not check_list(config.packet_list, 64, 9000):
lprint("ERROR: Invalid packet list, \"{}\", supplied!".format(
config.packet_list))
sys.exit(-1)
if config.run_time < 20 or config.run_time > 3600:
lprint("ERROR: Run time should be [20..3600] seconds!")
sys.exit(-1)
if config.physical_speed != 0 and \
(config.physical_speed < 0 or config.physical_speed > 1000):
lprint("ERROR: Invalid physical speed supplied [1..1000]!")
sys.exit(-1)
if config.dut_vm_nic_queues < 1 or config.dut_vm_nic_queues > 63:
lprint("ERROR: Invalid VM NIC queue count supplied [1..63]!")
sys.exit(-1)
if config.run_vxlan_test and config.no_bridge_config:
#
# We can only support tunnels with no bridge config, if no other tests
# are ran, as it needs a special config compared to the other tests.
#
if not config.skip_vv_test or not config.skip_pv_test \
or not config.skip_pvp_test or config.run_pp_test or \
config.run_pvp_zero_loss_test or config.run_p_test or \
config.run_p_zero_loss_test:
lprint("ERROR: Tunnel tests can only be run individually "
"with the no-bridge-config option!")
sys.exit(-1)
if config.run_vxlan_test and config.flow_type != 'L3':
lprint("ERROR: Tunnel tests only support the L3 flow type!")
sys.exit(-1)
if config.run_vxlan_test and not check_list(config.packet_list, 96, 9000):
#
# ETH + IPv4 + UDP + VXLAN + ETH + IPv4 + UDP + ETH_CRC
#
lprint("ERROR: Minimal packet size for the VXLAN test should be 96 "
"bytes!")
sys.exit(-1)
if config.warm_up and (not config.skip_vv_test or config.run_vxlan_test):
lprint("WARNING: Warm-up only works for P2P, P2V, and P2V2P tests!")
if config.warm_up and config.flow_rule_type != "flows" and \
not config.warm_up_no_fail:
lprint("ERROR: --warm-up for none --flow-rule-type \"flows\" MUST "
"be configured with --warm-up-no-fail!")
sys.exit(-1)
if config.flow_rule_type == 'NORMAL' and not config.mac_swap:
lprint("ERROR: The NORMAL flow rule type requires the --mac-swap "
"option!")
sys.exit(-1)
if config.zero_loss_step > 25 or config.zero_loss_step < 0.001:
lprint("ERROR: Invalid zero loss interval step size supplied "
"(0.001..25]!")
sys.exit(-1)
if config.traffic_rate > 100 or config.traffic_rate < 0.001:
lprint("ERROR: Invalid traffic rate configured (0.001..100]!")
sys.exit(-1)
if is_vm_needed_for_tests() and config.testpmd_startup_delay == 0 and \
config.dut_vm_nic_queues < 2:
lprint("ERROR: When using less than 2 VM NIC queues the "
"--testpmd-startup-delay can not be AUTO(0)!")
sys.exit(-1)
if config.tester_type == 'trex' and config.payload_packet_random:
lprint("ERROR: The trex tester type currently does not support "
"the --payload-packet-random option!")
sys.exit(-1)
#
# Dump settings if global debug is enabled
#
if config.debug:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if config.debug_script or config.debug:
slogger.setLevel(logging.DEBUG)
if config.debug_scapy or config.debug:
logging.getLogger("scapy.runtime").setLevel(logging.DEBUG)
slogger.debug("Configured values:")
slogger.debug(" %-23.23s: %s", 'Debug', config.debug)
slogger.debug(" %-23.23s: %s", 'Debug DUT Shell', config.debug_dut_shell)
slogger.debug(" %-23.23s: %s", 'Debug Scapy', config.debug_scapy)
slogger.debug(" %-23.23s: %s", 'Debug Script', config.debug_script)
slogger.debug(" %-23.23s: %s", 'Debug Tester', config.debug_tester)
slogger.debug(" %-23.23s: %s", 'Flow Type', config.flow_type)
slogger.debug(" %-23.23s: %s", 'Perf tracing', config.perf)
slogger.debug(" %-23.23s: %s", 'Tester Type', config.tester_type)
slogger.debug(" %-23.23s: %s", 'Tester Address', config.tester_address)
slogger.debug(" %-23.23s: %s", 'Tester Interface',
config.tester_interface)
slogger.debug(" %-23.23s: %s", 'Second Tester Interface',
config.second_tester_interface)
slogger.debug(" %-23.23s: %s", 'OVS Bridge Name', config.bridge_name)
slogger.debug(" %-23.23s: %s", 'OVS DUT Address', config.ovs_address)
slogger.debug(" %-23.23s: %s", 'OVS DUT Login', config.ovs_user)
slogger.debug(" %-23.23s: %s", 'OVS DUT VM1 Address',
config.dut_vm_address)
slogger.debug(" %-23.23s: %s", 'OVS DUT VM2 Address',
config.dut_second_vm_address)
slogger.debug(" %-23.23s: %s", 'OVS DUT VM1 PCI Address',
config.dut_vm_nic_pci)
slogger.debug(" %-23.23s: %s", 'OVS DUT VM2 PCI Address',
config.dut_second_vm_nic_pci)
slogger.debug(" %-23.23s: %s", 'OVS VM Login', config.dut_vm_user)
slogger.debug(" %-23.23s: %s", 'OVS VM NIC queues',
config.dut_vm_nic_queues)
slogger.debug(" %-23.23s: %s", 'OVS VM NIC rxd', config.dut_vm_nic_rxd)
slogger.debug(" %-23.23s: %s", 'OVS VM NIC txd', config.dut_vm_nic_txd)
slogger.debug(" %-23.23s: %s", 'Physical Interface',
config.physical_interface)
slogger.debug(" %-23.23s: %u Gbit/s", 'Physical Int. Speed',
config.physical_speed)
slogger.debug(" %-23.23s: %s", 'Virtual Interface',
config.virtual_interface)
slogger.debug(" %-23.23s: %s", '2nd Virtual Interface',
config.second_virtual_interface)
slogger.debug(" %-23.23s: %s", 'MAC swap', config.mac_swap)
slogger.debug(" %-23.23s: %s", 'Source MAC', config.src_mac_address)
slogger.debug(" %-23.23s: %s", 'Destination MAC', config.dst_mac_address)
slogger.debug(" %-23.23s: %u seconds", 'Test run time', config.run_time)
slogger.debug(" %-23.23s: %s", 'Run with stream size\'s',
config.stream_list)
slogger.debug(" %-23.23s: %s", 'Run with packet size\'s',
config.packet_list)
slogger.debug(" %-23.23s: %s", 'Skip PV test', config.skip_pv_test)
slogger.debug(" %-23.23s: %s", 'Skip PVP test', config.skip_pvp_test)
slogger.debug(" %-23.23s: %s", 'Skip VV test', config.skip_vv_test)
slogger.debug(" %-23.23s: %s", 'Run PP test', config.run_pp_test)
slogger.debug(" %-23.23s: %s", 'Run P loopback test', config.run_p_test)
slogger.debug(" %-23.23s: %s", 'Run PVP 0 loss test',
config.run_pvp_zero_loss_test)
slogger.debug(" %-23.23s: %s", 'Run P 0 loss test',
config.run_p_zero_loss_test)
slogger.debug(" %-23.23s: %s", 'Warm-up', config.warm_up)
slogger.debug(" %-23.23s: %s", 'No cool down', config.no_cool_down)
slogger.debug(" %-23.23s: %f", 'Zero loss step', config.zero_loss_step)
slogger.debug(" %-23.23s: %f%%", 'Traffic rate', config.traffic_rate)
slogger.debug(" %-23.23s: %u seconds", 'testpmd startup delay',
config.testpmd_startup_delay)
#
# If we use the GUI, we need to set the correct back-end
# However this does not seem to work always in a non-Tk back-end, if you
# get Tinker errors, set the following environment variable:
# export MPLBACKEND="agg"
#
# if config.gui:
# matplotlib.use('TkAgg')
# else:
# matplotlib.use('Agg')
#
# Commenting out the above, as it no longer works. Use the export as
# explained above as python loads the modules beforehand.
import matplotlib.pyplot as plt
#
# Quick regenerate grapgh from results (DEBUG)
#
# packet_sizes = [64, 128, 256, 512, 1024, 1514]
# p2v_results = [22969229, 25139846, 18116596, 9398727, 4789329, 3259472]
# create_single_graph(packet_sizes, p2v_results,
# "Packet size", "Packets/second",
# "Physical to Virtual with 1000 flows",
# "test_p2v_1000")
# sys.exit(-1)
#
# Connecting to Tester
#
lprint("- Connecting to the tester...")
tester = TrafficGenerator(get_traffic_generator_type(),
hostname=config.tester_address)
if config.debug_tester:
logging.getLogger('xenalib.BaseSocket').setLevel(logging.DEBUG)
logging.getLogger('xenalib.KeepAliveThread').setLevel(logging.DEBUG)
logging.getLogger('xenalib.XenaManager').setLevel(logging.DEBUG)
logging.getLogger('xenalib.XenaModifier').setLevel(logging.DEBUG)
logging.getLogger('xenalib.XenaPort').setLevel(logging.DEBUG)
logging.getLogger('xenalib.XenaSocket').setLevel(logging.DEBUG)
logging.getLogger('xenalib.XenaStream').setLevel(logging.DEBUG)
if not tester.reserve_port(config.tester_interface):
lprint("ERROR: Failed to add first tester port")
sys.exit(-1)
if config.second_tester_interface != '':
if not tester.reserve_port(config.second_tester_interface):
lprint("ERROR: Failed to add second tester port")
sys.exit(-1)
#
# Connecting to DUT
#
lprint("- Connecting to DUT, \"{}\"...".format(config.ovs_address))
dut_shell = DutSshShell(hostname=config.ovs_address,
username=config.ovs_user,
password=config.ovs_password,
missing_host_key=spur.ssh.MissingHostKey.accept)
if config.debug_dut_shell:
dut_shell.logger.setLevel(logging.DEBUG)
lprint("- Create testpmd link on VM for dpdk-testpmd, if needed...")
create_testpmd_link_if_dpdk_new(config.dut_vm_address)
ovs_version = get_ovs_version()
#
# Stop any running test tools on the VMs
#
#
lprint("- Stop any running test tools...")
stop_cpu_monitoring(die=False)
if config.dut_vm_address != '':
stop_traffic_rx_on_vm(config.dut_vm_address, die=False)
stop_traffic_tx_on_vm(config.dut_vm_address, die=False)
lprint("- Getting VM's DPDK version...")
vm_dpdk_version = get_vm_dpdk_version(config.dut_vm_address)
if config.dut_second_vm_address != '':
stop_traffic_rx_on_vm(config.dut_second_vm_address, die=False)
stop_traffic_tx_on_vm(config.dut_second_vm_address, die=False)
#
# Create OVS bridge, and get OpenFlow port numbers
#
if not config.no_bridge_config:
if not config.skip_pv_test or not config.skip_pvp_test or \
not config.skip_vv_test or config.run_pp_test or \
config.run_pvp_zero_loss_test or config.run_p_test or \
config.run_p_zero_loss_test:
#
# Also skip if all we are running are the tunnel tests
#
create_ovs_bridge()
#
# If we run only tunnel tests we need to skip this
#
if not config.skip_pv_test or not config.skip_pvp_test or \
not config.skip_vv_test or config.run_pp_test or \
config.run_pvp_zero_loss_test or config.run_p_test or \
config.run_p_zero_loss_test:
of_interfaces = dict()
dp_interfaces = dict()
of_interfaces, dp_interfaces = get_bridge_port_numbers()
#
# Getting physical port speed, used for graphs
#
if config.physical_speed != 0:
phy_speed = config.physical_speed * 1000000000
else:
phy_speed = get_physical_port_speed()
#
# Get datapath type
#
ovs_data_path = get_ovs_datapath()
lprint("- Get OVS datapath type, \"{}\"...".format(ovs_data_path))
ovs_tc_enabled = get_tc_state()
lprint("- Get TC state, \"{}\"...".format("enabled" if ovs_tc_enabled
else "disabled"))
#
# Open CSV file for writing
#
lprint("- Create \"test_results.csv\" for writing results...")
if config.flow_type == 'L2':
csv_file = "test_results_l2.csv"
elif config.flow_type == 'L3':
csv_file = "test_results_l3.csv"
elif config.flow_type == 'L4-UDP':
csv_file = "test_results_l4_udp.csv"
else:
raise ValueError("No support for this protocol!!")
with open(csv_file, 'w') as csvfile:
csv_handle = csv.writer(csvfile, dialect='excel')
csv_handle.writerow(["Physical port, \"{}\", speed {} Gbit/s, "
"traffic rate {}%".
format(config.physical_interface,
phy_speed / 1000000000,
config.traffic_rate)])
csv_handle.writerow([])
csv_handle.writerow([])
#
# Run tests
#
stream_size_list = [int(i) for i in config.stream_list.split(',')]
packet_size_list = [int(i) for i in config.packet_list.split(',')]
flow_str = get_flow_type_short()
flow_file_str = get_flow_type_name()
v2v_results = dict()
v2v_cpu_results = dict()
p2v_results = dict()
p2v_cpu_results = dict()
p2p_results = dict()
p2p_cpu_results = dict()
p2v2p_results = dict()
p2v2p_cpu_results = dict()
p_results = dict()
p_cpu_results = dict()
if not config.skip_vv_test:
for nr_of_streams in stream_size_list:
v2v_results[nr_of_streams], \
v2v_cpu_results[nr_of_streams] = test_v2v(nr_of_streams,
packet_size_list)
create_multiple_graph(packet_size_list, v2v_results,
"Packet size", "Packets/second",
"Virtual to Virtual, {}".
format(get_flow_type_short()),
"test_v2v_all_{}".
format(get_flow_type_name()),
None, cpu_utilization=v2v_cpu_results)
create_multiple_graph(packet_size_list, v2v_results,
"Packet size", "Packets/second",
"Virtual to Virtual, {}".
format(get_flow_type_short()),
"test_v2v_all_{}_ref".
format(get_flow_type_name()),
[phy_speed],
cpu_utilization=v2v_cpu_results)
csv_write_test_results(csv_handle, 'Virtual to Virtual test',
stream_size_list, packet_size_list,
v2v_results, v2v_cpu_results)
if not config.skip_pv_test:
for nr_of_streams in stream_size_list:
p2v_results[nr_of_streams], \
p2v_cpu_results[nr_of_streams] = test_p2v(nr_of_streams,
packet_size_list)
create_multiple_graph(packet_size_list, p2v_results,
"Packet size", "Packets/second",
"Physical to Virtual, {}{}".
format(flow_str, get_traffic_rate_str()),
"test_p2v_all_{}".format(flow_file_str),
None, cpu_utilization=p2v_cpu_results)
create_multiple_graph(packet_size_list, p2v_results,
"Packet size", "Packets/second",
"Physical to Virtual, {}{}".
format(flow_str, get_traffic_rate_str()),
"test_p2v_all_{}_ref".format(
flow_file_str),
[phy_speed],
cpu_utilization=p2v_cpu_results)
csv_write_test_results(csv_handle, 'Physical to Virtual test',
stream_size_list, packet_size_list,
p2v_results, p2v_cpu_results)
if not config.skip_pvp_test:
for nr_of_streams in stream_size_list:
p2v2p_results[nr_of_streams], \
p2v2p_cpu_results[nr_of_streams] = test_p2v2p(
nr_of_streams, packet_size_list)
create_multiple_graph(packet_size_list, p2v2p_results,
"Packet size", "Packets/second",
"Physical to Virtual to Physical, {}{}".
format(flow_str, get_traffic_rate_str()),
"test_p2v2p_all_{}".format(
flow_file_str),
None, cpu_utilization=p2v2p_cpu_results)
create_multiple_graph(packet_size_list, p2v2p_results,
"Packet size", "Packets/second",
"Physical to Virtual to Physical, {}{}".
format(flow_str, get_traffic_rate_str()),
"test_p2v2p_all_{}_ref".format(
flow_file_str),
[phy_speed],
cpu_utilization=p2v2p_cpu_results)
csv_write_test_results(csv_handle,
'Physical to Virtual to Physical test',
stream_size_list, packet_size_list,
p2v2p_results, p2v2p_cpu_results)
if config.run_pp_test:
for nr_of_streams in stream_size_list:
p2p_results[nr_of_streams], \
p2p_cpu_results[nr_of_streams] = test_p2p(nr_of_streams,
packet_size_list)
create_multiple_graph(packet_size_list, p2p_results,
"Packet size", "Packets/second",
"Physical to Physical, {}{}".
format(flow_str, get_traffic_rate_str()),
"test_p2p_all_{}".format(flow_file_str),
None, cpu_utilization=p2p_cpu_results)
create_multiple_graph(packet_size_list, p2p_results,
"Packet size", "Packets/second",
"Physical to Physical, {}{}".
format(flow_str, get_traffic_rate_str()),
"test_p2p_all_{}_ref".format(
flow_file_str),
[phy_speed],
cpu_utilization=p2p_cpu_results)
csv_write_test_results(csv_handle, 'Physical to Physical test',
stream_size_list, packet_size_list,
p2p_results, p2p_cpu_results)
if config.run_p_test:
for nr_of_streams in stream_size_list:
p_results[nr_of_streams], \
p_cpu_results[nr_of_streams] = test_p(nr_of_streams,
packet_size_list)
create_multiple_graph(packet_size_list, p_results,
"Packet size", "Packets/second",
"Physical loopback, {}{}".
format(flow_str, get_traffic_rate_str()),
"test_p_all_{}".format(flow_file_str),
None, cpu_utilization=p_cpu_results)
create_multiple_graph(packet_size_list, p_results,
"Packet size", "Packets/second",
"Physical loopback, {}{}".
format(flow_str, get_traffic_rate_str()),
"test_p_all_{}_ref".format(
flow_file_str),
[phy_speed],
cpu_utilization=p_cpu_results)
csv_write_test_results(csv_handle, 'Physical loopback test',
stream_size_list, packet_size_list,
p_results, p_cpu_results)
if config.run_vxlan_test:
if not config.no_bridge_config:
create_ovs_vxlan_bridge()
of_interfaces = dict()
dp_interfaces = dict()
of_interfaces, dp_interfaces = get_bridge_port_numbers(tunnel=True)
vxlan_results = dict()
vxlan_cpu_results = dict()
for nr_of_streams in stream_size_list:
vxlan_results[nr_of_streams], \
vxlan_cpu_results[nr_of_streams] = test_vxlan(
nr_of_streams, packet_size_list)
create_multiple_graph(packet_size_list, vxlan_results,
"Packet size", "Packets/second",
"VXLAN Tunnel, {}{}".
format(flow_str, get_traffic_rate_str()),
"test_vxlan_all_{}".format(
flow_file_str),
None, cpu_utilization=vxlan_cpu_results)
create_multiple_graph(packet_size_list, vxlan_results,
"Packet size", "Packets/second",
"VXLAN Tunnel, {}{}".
format(flow_str, get_traffic_rate_str()),
"test_vxlan_all_{}_ref".format(
flow_file_str),
[phy_speed],
cpu_utilization=vxlan_cpu_results)
csv_write_test_results(csv_handle, 'VXLAN Tunnel',
stream_size_list, packet_size_list,
vxlan_results, vxlan_cpu_results)
#
# Run the zero packet loss test
#
if config.run_pvp_zero_loss_test:
test_p2v2p_zero_loss(stream_size_list, packet_size_list,
zero_loss_step=config.zero_loss_step,
csv_handle=csv_handle)
#
# Run the zero packet loss test for physical loopback
#
if config.run_p_zero_loss_test:
test_p_zero_loss(stream_size_list, packet_size_list,
zero_loss_step=config.zero_loss_step,
csv_handle=csv_handle)
#
# Done...
#
lprint("- Done running performance tests!")
# For now we leave the DUT in the last test state in case we would like
# to do some trouble shooting. First step in re-run is to remove bridge,
# and delete all openflow rules.
tester.disconnect()
del tester
#
# Start main() as default entry point...
#
if __name__ == '__main__':
main()
| chaudron/ovs_perf | ovs_performance.py | Python | apache-2.0 | 172,751 | [
"TINKER"
] | 5b8b579828433756d9b5b8559f50cd668b72797b54b7152e9f786a8933e47d2f |
"""
vtkImageExportToArray - a NumPy front-end to vtkImageExport
This class converts a VTK image to a Numeric Python array.
To use this class, you must have the LLNL Numeric Python distribution
(http://numpy.sf.net)
Methods
SetInput(input) -- connect to VTK image pipeline
GetArray() -- execute pipeline and return a Numeric array
Convert VTK_UNSIGNED_SHORT to python Int
(this might be necessary because Python doesn't support unsigned short,
the default is to cast unsigned short to signed short).
SetConvertUnsignedShortToInt(yesno)
ConvertUnsignedShortToIntOn()
ConvertUnsignedShortToIntOff()
From vtkImageExport
GetDataExtent()
GetDataSpacing()
GetDataOrigin()
"""
import Numeric
import umath
from vtk import vtkImageExport
from vtkConstants import *
_NEW_NUMERIC = 0
try:
val = float(Numeric.__version__)
except ValueError:
_NEW_NUMERIC = 0
else:
if val > 20.0:
_NEW_NUMERIC = 1
else:
_NEW_NUMERIC = 0
class vtkImageExportToArray:
def __init__(self):
self.__export = vtkImageExport()
self.__ConvertUnsignedShortToInt = 0
# type dictionary: note that python doesn't support
# unsigned integers!
__typeDict = { VTK_CHAR:Numeric.Int8,
VTK_UNSIGNED_CHAR:Numeric.UnsignedInt8,
VTK_SHORT:Numeric.Int16,
VTK_UNSIGNED_SHORT:Numeric.Int16,
VTK_INT:Numeric.Int32,
VTK_FLOAT:Numeric.Float32,
VTK_DOUBLE:Numeric.Float64 }
__sizeDict = { VTK_CHAR:1,
VTK_UNSIGNED_CHAR:1,
VTK_SHORT:2,
VTK_UNSIGNED_SHORT:2,
VTK_INT:4,
VTK_FLOAT:4,
VTK_DOUBLE:8 }
# convert unsigned shorts to ints, to avoid sign problems
def SetConvertUnsignedShortToInt(self,yesno):
self.__ConvertUnsignedShortToInt = yesno
def GetConvertUnsignedShortToInt(self):
return self.__ConvertUnsignedShortToInt
def ConvertUnsignedShortToIntOn(self):
self.__ConvertUnsignedShortToInt = 1
def ConvertUnsignedShortToIntOff(self):
self.__ConvertUnsignedShortToInt = 0
# set the input
def SetInput(self,input):
return self.__export.SetInput(input)
def GetInput(self):
return self.__export.GetInput()
def GetArray(self):
input = self.__export.GetInput()
input.UpdateInformation()
type = input.GetScalarType()
extent = input.GetWholeExtent()
numComponents = input.GetNumberOfScalarComponents()
dim = (extent[5]-extent[4]+1,
extent[3]-extent[2]+1,
extent[1]-extent[0]+1)
if (numComponents > 1):
dim = dim + (numComponents,)
size = dim[0]*dim[1]*dim[2]*numComponents*self.__sizeDict[type]
if _NEW_NUMERIC:
imArray = Numeric.zeros((size,),Numeric.UnsignedInt8)
self.__export.Export(imArray)
else:
imString = Numeric.zeros((size,),
Numeric.UnsignedInt8).tostring()
self.__export.Export(imString)
imArray = Numeric.fromstring(imString,self.__typeDict[type])
# just to remind myself of the dangers of memory management
del imString
# reshape array appropriately.
imArray = Numeric.reshape(imArray, dim)
# convert unsigned short to int to avoid sign issues
if (type == VTK_UNSIGNED_SHORT and self.__ConvertUnsignedShortToInt):
imArray = umath.bitwise_and(imArray.astype(Numeric.Int32),0xffff)
return imArray
def GetDataExtent(self):
return self.__export.GetDataExtent()
def GetDataSpacing(self):
return self.__export.GetDataSpacing()
def GetDataOrigin(self):
return self.__export.GetDataOrigin()
| sgh/vtk | Wrapping/Python/vtk/util/vtkImageExportToArray.py | Python | bsd-3-clause | 3,919 | [
"VTK"
] | 23e5c73f274c1e1b59760c5353327a807899431b59305b146a9ff9c8292af705 |
import vtk
from vtk.util.vtkAlgorithm import VTKPythonAlgorithmBase
from vtk.numpy_interface import dataset_adapter as dsa
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from timeit import default_timer as timer
import logging
class FilterWorldMesh(VTKPythonAlgorithmBase):
"""
vtkAlgorithm with input vtkPolyData and output vtkPolyData
Input: Surface to be added to the global mesh
Output: The global mesh
"""
def __init__(self, color=False):
"""
:param color: default=False
Color every new surface of the global mesh a different color.
:return:
"""
VTKPythonAlgorithmBase.__init__(self,
nInputPorts=1, inputType='vtkPolyData',
nOutputPorts=1, outputType='vtkPolyData')
self._worldmesh = vtk.vtkAppendPolyData()
# colormap for changing polydata on every iteration
# http://matplotlib.org/examples/color/colormaps_reference.html
self._color = color
if self._color:
gist_rainbow_r = plt.cm.get_cmap(name='gist_rainbow_r')
mycm = gist_rainbow_r(range(160, 260, 5))[:, 0:3]
self._colorcycle = cycle(mycm)
def RequestData(self, request, inInfo, outInfo):
logging.info('')
start = timer()
# input polydata
# have to make a copy otherwise polys will not show up in the render
# even though GetNumberOfCells() says they should be there
tmp = vtk.vtkPolyData.GetData(inInfo[0])
inp = vtk.vtkPolyData()
inp.ShallowCopy(tmp)
# change color of all cells
if self._color:
ncells = inp.GetNumberOfCells()
c = self._colorcycle.next()
vtkarray = dsa.numpyTovtkDataArray(np.tile(c, (ncells, 1)))
inp.GetCellData().SetScalars(vtkarray)
# add to world mesh
self._worldmesh.AddInputData(inp)
self._worldmesh.Update()
logging.info('Number of cells: in = {} total = {}'
.format(inp.GetNumberOfCells(),
self._worldmesh.GetOutput().GetNumberOfCells()))
# output world mesh
out = vtk.vtkPolyData.GetData(outInfo)
out.ShallowCopy(self._worldmesh.GetOutput())
end = timer()
logging.info('Execution time {:.4f} seconds'.format(end - start))
return 1
| lucasplus/MABDI | mabdi/FilterWorldMesh.py | Python | bsd-3-clause | 2,462 | [
"VTK"
] | 36636356632668e4e2467629fdd556a903acb404a82875c8fcd3dffd1edb1258 |
#! /usr/bin/env python
#
# jaxml
# (C) Jerome Alet <alet@librelogiciel.com> 2000-2001
# You're welcome to redistribute this software under the
# terms of the GNU General Public Licence version 2.0
# or, at your option, any higher version.
#
# You can read the complete GNU GPL in the file COPYING
# which should come along with this software, or visit
# the Free Software Foundation's WEB site http://www.fsf.org
#
# $Id: setup.py,v 1.6 2002/04/25 09:08:35 jerome Exp $
from distutils.core import setup
import jaxml
setup(name = "jaxml", version = jaxml.__version__,
licence = "GNU GPL",
description = "a Python module to generate XML easily",
author = "Jerome Alet",
author_email = "alet@librelogiciel.com",
url = "http://www.librelogiciel.com/software/",
py_modules = [ "jaxml" ])
| denys-duchier/Scolar | config/softs/jaxml-3.01/setup.py | Python | gpl-2.0 | 823 | [
"VisIt"
] | c59b92547235fc448019d121a12f942ff38429ec134038b73b11fa8b57b2d54c |
import urllib.request
import json
data = {
"Inputs": {
"input1":
[
{
'idx': "1",
'age': "1",
'promotion_num': "1",
'identity': "1",
'game_play_min_per_day': "1",
'item_purchase_num_in_90_days': "1",
'game_level': "1",
'crystal': "1",
'race': "",
'gender': "",
'register_code': "1",
'purchase_num': "1",
'game_play_num_per_week': "1",
'country': "",
'churn_YN': "",
}
],
},
"GlobalParameters": {
}
}
body = str.encode(json.dumps(data))
url = 'https://asiasoutheast.services.azureml.net/subscriptions/3e6494d9d19d41c6bfa644870aecc57b/services/2b11b85c3fbe4daf8b8e6dc17c558e11/execute?api-version=2.0&format=swagger'
api_key = '<CHANGE-HERE>' # Replace this with the API key for the web service
headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
req = urllib.request.Request(url, body, headers)
try:
response = urllib.request.urlopen(req)
result = response.read()
print(result)
except urllib.error.HTTPError as error:
print("The request failed with status code: " + str(error.code))
# Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure
print(error.info())
print(json.loads(error.read().decode("utf8", 'ignore'))) | cse-ml-project/cse-ml-basic | code/python/churn.py | Python | mit | 1,814 | [
"CRYSTAL"
] | a6329022cd714baab64709a52fa1f06e62d1c0dbb6a9755cc0e55509059d1886 |
"""Tests for items views."""
import os
import json
import tempfile
from uuid import uuid4
import copy
import textwrap
from pymongo import MongoClient
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.conf import settings
from contentstore.tests.utils import CourseTestCase
from cache_toolbox.core import del_cached_content
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.django import contentstore
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
from opaque_keys.edx.keys import UsageKey
from xmodule.video_module import transcripts_utils
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class Basetranscripts(CourseTestCase):
"""Base test class for transcripts tests."""
def clear_subs_content(self):
"""Remove, if transcripts content exists."""
for youtube_id in self.get_youtube_ids().values():
filename = 'subs_{0}.srt.sjson'.format(youtube_id)
content_location = StaticContent.compute_location(self.course.id, filename)
try:
content = contentstore().find(content_location)
contentstore().delete(content.get_id())
except NotFoundError:
pass
def setUp(self):
"""Create initial data."""
super(Basetranscripts, self).setUp()
# Add video module
data = {
'parent_locator': unicode(self.course.location),
'category': 'video',
'type': 'video'
}
resp = self.client.ajax_post('/xblock/', data)
self.assertEqual(resp.status_code, 200)
self.video_usage_key = self._get_usage_key(resp)
self.item = modulestore().get_item(self.video_usage_key)
# hI10vDNYz4M - valid Youtube ID with transcripts.
# JMD_ifUUfsU, AKqURZnYqpk, DYpADpL7jAY - valid Youtube IDs without transcripts.
self.item.data = '<video youtube="0.75:JMD_ifUUfsU,1.0:hI10vDNYz4M,1.25:AKqURZnYqpk,1.50:DYpADpL7jAY" />'
modulestore().update_item(self.item, self.user.id)
self.item = modulestore().get_item(self.video_usage_key)
# Remove all transcripts for current module.
self.clear_subs_content()
def _get_usage_key(self, resp):
""" Returns the usage key from the response returned by a create operation. """
usage_key_string = json.loads(resp.content).get('locator')
return UsageKey.from_string(usage_key_string)
def get_youtube_ids(self):
"""Return youtube speeds and ids."""
item = modulestore().get_item(self.video_usage_key)
return {
0.75: item.youtube_id_0_75,
1: item.youtube_id_1_0,
1.25: item.youtube_id_1_25,
1.5: item.youtube_id_1_5
}
class TestUploadtranscripts(Basetranscripts):
"""Tests for '/transcripts/upload' url."""
def setUp(self):
"""Create initial data."""
super(TestUploadtranscripts, self).setUp()
self.good_srt_file = tempfile.NamedTemporaryFile(suffix='.srt')
self.good_srt_file.write(textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
"""))
self.good_srt_file.seek(0)
self.bad_data_srt_file = tempfile.NamedTemporaryFile(suffix='.srt')
self.bad_data_srt_file.write('Some BAD data')
self.bad_data_srt_file.seek(0)
self.bad_name_srt_file = tempfile.NamedTemporaryFile(suffix='.BAD')
self.bad_name_srt_file.write(textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
"""))
self.bad_name_srt_file.seek(0)
self.ufeff_srt_file = tempfile.NamedTemporaryFile(suffix='.srt')
def test_success_video_module_source_subs_uploading(self):
self.item.data = textwrap.dedent("""
<video youtube="">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""")
modulestore().update_item(self.item, self.user.id)
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': self.video_usage_key,
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(resp.content).get('status'), 'Success')
item = modulestore().get_item(self.video_usage_key)
self.assertEqual(item.sub, filename)
content_location = StaticContent.compute_location(
self.course.id, 'subs_{0}.srt.sjson'.format(filename))
self.assertTrue(contentstore().find(content_location))
def test_fail_data_without_id(self):
link = reverse('upload_transcripts')
resp = self.client.post(link, {'transcript-file': self.good_srt_file})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'POST data without "locator" form data.')
def test_fail_data_without_file(self):
link = reverse('upload_transcripts')
resp = self.client.post(link, {'locator': self.video_usage_key})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'POST data without "file" form data.')
def test_fail_data_with_bad_locator(self):
# Test for raising `InvalidLocationError` exception.
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': 'BAD_LOCATOR',
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
# Test for raising `ItemNotFoundError` exception.
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': '{0}_{1}'.format(self.video_usage_key, 'BAD_LOCATOR'),
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
def test_fail_for_non_video_module(self):
# non_video module: setup
data = {
'parent_locator': unicode(self.course.location),
'category': 'non_video',
'type': 'non_video'
}
resp = self.client.ajax_post('/xblock/', data)
usage_key = self._get_usage_key(resp)
item = modulestore().get_item(usage_key)
item.data = '<non_video youtube="0.75:JMD_ifUUfsU,1.0:hI10vDNYz4M" />'
modulestore().update_item(item, self.user.id)
# non_video module: testing
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': unicode(usage_key),
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'Transcripts are supported only for "video" modules.')
def test_fail_bad_xml(self):
self.item.data = '<<<video youtube="0.75:JMD_ifUUfsU,1.25:AKqURZnYqpk,1.50:DYpADpL7jAY" />'
modulestore().update_item(self.item, self.user.id)
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': unicode(self.video_usage_key),
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
# incorrect xml produces incorrect item category error
self.assertEqual(json.loads(resp.content).get('status'), 'Transcripts are supported only for "video" modules.')
def test_fail_bad_data_srt_file(self):
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.bad_data_srt_file.name))[0]
resp = self.client.post(link, {
'locator': unicode(self.video_usage_key),
'transcript-file': self.bad_data_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'Something wrong with SubRip transcripts file during parsing.')
def test_fail_bad_name_srt_file(self):
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.bad_name_srt_file.name))[0]
resp = self.client.post(link, {
'locator': unicode(self.video_usage_key),
'transcript-file': self.bad_name_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'We support only SubRip (*.srt) transcripts format.')
def test_undefined_file_extension(self):
srt_file = tempfile.NamedTemporaryFile(suffix='')
srt_file.write(textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
"""))
srt_file.seek(0)
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(srt_file.name))[0]
resp = self.client.post(link, {
'locator': self.video_usage_key,
'transcript-file': srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'Undefined file extension.')
def test_subs_uploading_with_byte_order_mark(self):
"""
Test uploading subs containing BOM(Byte Order Mark), e.g. U+FEFF
"""
filedata = textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Test ufeff characters
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
""").encode('utf-8-sig')
# Verify that ufeff character is in filedata.
self.assertIn("ufeff", filedata)
self.ufeff_srt_file.write(filedata)
self.ufeff_srt_file.seek(0)
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.ufeff_srt_file.name))[0]
resp = self.client.post(link, {
'locator': self.video_usage_key,
'transcript-file': self.ufeff_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 200)
content_location = StaticContent.compute_location(
self.course.id, 'subs_{0}.srt.sjson'.format(filename))
self.assertTrue(contentstore().find(content_location))
subs_text = json.loads(contentstore().find(content_location).data).get('text')
self.assertIn("Test ufeff characters", subs_text)
def tearDown(self):
super(TestUploadtranscripts, self).tearDown()
self.good_srt_file.close()
self.bad_data_srt_file.close()
self.bad_name_srt_file.close()
self.ufeff_srt_file.close()
class TestDownloadtranscripts(Basetranscripts):
"""Tests for '/transcripts/download' url."""
def save_subs_to_store(self, subs, subs_id):
"""Save transcripts into `StaticContent`."""
filedata = json.dumps(subs, indent=2)
mime_type = 'application/json'
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(self.course.id, filename)
content = StaticContent(content_location, filename, mime_type, filedata)
contentstore().save(content)
del_cached_content(content_location)
return content_location
def test_success_download_youtube(self):
self.item.data = '<video youtube="1:JMD_ifUUfsU" />'
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, 'JMD_ifUUfsU')
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key, 'subs_id': "JMD_ifUUfsU"})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, """0\n00:00:00,100 --> 00:00:00,200\nsubs #1\n\n1\n00:00:00,200 --> 00:00:00,240\nsubs #2\n\n2\n00:00:00,240 --> 00:00:00,380\nsubs #3\n\n""")
def test_success_download_nonyoutube(self):
subs_id = str(uuid4())
self.item.data = textwrap.dedent("""
<video youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""".format(subs_id))
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, subs_id)
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key, 'subs_id': subs_id})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp.content,
'0\n00:00:00,100 --> 00:00:00,200\nsubs #1\n\n1\n00:00:00,200 --> '
'00:00:00,240\nsubs #2\n\n2\n00:00:00,240 --> 00:00:00,380\nsubs #3\n\n'
)
transcripts_utils.remove_subs_from_store(subs_id, self.item)
def test_fail_data_without_file(self):
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': ''})
self.assertEqual(resp.status_code, 404)
resp = self.client.get(link, {})
self.assertEqual(resp.status_code, 404)
def test_fail_data_with_bad_locator(self):
# Test for raising `InvalidLocationError` exception.
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': 'BAD_LOCATOR'})
self.assertEqual(resp.status_code, 404)
# Test for raising `ItemNotFoundError` exception.
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': '{0}_{1}'.format(self.video_usage_key, 'BAD_LOCATOR')})
self.assertEqual(resp.status_code, 404)
def test_fail_for_non_video_module(self):
# Video module: setup
data = {
'parent_locator': unicode(self.course.location),
'category': 'videoalpha',
'type': 'videoalpha'
}
resp = self.client.ajax_post('/xblock/', data)
usage_key = self._get_usage_key(resp)
subs_id = str(uuid4())
item = modulestore().get_item(usage_key)
item.data = textwrap.dedent("""
<videoalpha youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</videoalpha>
""".format(subs_id))
modulestore().update_item(item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, subs_id)
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': unicode(usage_key)})
self.assertEqual(resp.status_code, 404)
def test_fail_nonyoutube_subs_dont_exist(self):
self.item.data = textwrap.dedent("""
<video youtube="" sub="UNDEFINED">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""")
modulestore().update_item(self.item, self.user.id)
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key})
self.assertEqual(resp.status_code, 404)
def test_empty_youtube_attr_and_sub_attr(self):
self.item.data = textwrap.dedent("""
<video youtube="">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""")
modulestore().update_item(self.item, self.user.id)
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key})
self.assertEqual(resp.status_code, 404)
def test_fail_bad_sjson_subs(self):
subs_id = str(uuid4())
self.item.data = textwrap.dedent("""
<video youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""".format(subs_id))
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1'
]
}
self.save_subs_to_store(subs, 'JMD_ifUUfsU')
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key})
self.assertEqual(resp.status_code, 404)
class TestChecktranscripts(Basetranscripts):
"""Tests for '/transcripts/check' url."""
def save_subs_to_store(self, subs, subs_id):
"""Save transcripts into `StaticContent`."""
filedata = json.dumps(subs, indent=2)
mime_type = 'application/json'
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(self.course.id, filename)
content = StaticContent(content_location, filename, mime_type, filedata)
contentstore().save(content)
del_cached_content(content_location)
return content_location
def test_success_download_nonyoutube(self):
subs_id = str(uuid4())
self.item.data = textwrap.dedent("""
<video youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""".format(subs_id))
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, subs_id)
data = {
'locator': unicode(self.video_usage_key),
'videos': [{
'type': 'html5',
'video': subs_id,
'mode': 'mp4',
}]
}
link = reverse('check_transcripts')
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 200)
self.assertDictEqual(
json.loads(resp.content),
{
u'status': u'Success',
u'subs': unicode(subs_id),
u'youtube_local': False,
u'is_youtube_mode': False,
u'youtube_server': False,
u'command': u'found',
u'current_item_subs': unicode(subs_id),
u'youtube_diff': True,
u'html5_local': [unicode(subs_id)],
u'html5_equal': False,
}
)
transcripts_utils.remove_subs_from_store(subs_id, self.item)
def test_check_youtube(self):
self.item.data = '<video youtube="1:JMD_ifUUfsU" />'
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, 'JMD_ifUUfsU')
link = reverse('check_transcripts')
data = {
'locator': unicode(self.video_usage_key),
'videos': [{
'type': 'youtube',
'video': 'JMD_ifUUfsU',
'mode': 'youtube',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 200)
self.assertDictEqual(
json.loads(resp.content),
{
u'status': u'Success',
u'subs': u'JMD_ifUUfsU',
u'youtube_local': True,
u'is_youtube_mode': True,
u'youtube_server': False,
u'command': u'found',
u'current_item_subs': None,
u'youtube_diff': True,
u'html5_local': [],
u'html5_equal': False,
}
)
def test_fail_data_without_id(self):
link = reverse('check_transcripts')
data = {
'locator': '',
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
def test_fail_data_with_bad_locator(self):
# Test for raising `InvalidLocationError` exception.
link = reverse('check_transcripts')
data = {
'locator': '',
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
# Test for raising `ItemNotFoundError` exception.
data = {
'locator': '{0}_{1}'.format(self.video_usage_key, 'BAD_LOCATOR'),
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
def test_fail_for_non_video_module(self):
# Not video module: setup
data = {
'parent_locator': unicode(self.course.location),
'category': 'not_video',
'type': 'not_video'
}
resp = self.client.ajax_post('/xblock/', data)
usage_key = self._get_usage_key(resp)
subs_id = str(uuid4())
item = modulestore().get_item(usage_key)
item.data = textwrap.dedent("""
<not_video youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</videoalpha>
""".format(subs_id))
modulestore().update_item(item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, subs_id)
data = {
'locator': unicode(usage_key),
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
link = reverse('check_transcripts')
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'Transcripts are supported only for "video" modules.')
| mtlchun/edx | cms/djangoapps/contentstore/views/tests/test_transcripts.py | Python | agpl-3.0 | 27,376 | [
"FEFF"
] | 0b235c6f8d009b0a4c776ef4b63d6eb3151e6c2d6f6bcabc7cfe05b0a2be845b |
# -*- coding: utf-8 -*-
"""pybooru.resources
This module contains all resources for Pybooru.
SITE_LIST (dict):
Contains various Moebooru and Danbooru-based default sites.
HTTP_STATUS_CODE (dict):
Contains http status codes for Moebooru and Danbooru API.
"""
# Default SITE_LIST
SITE_LIST = {
'konachan': {
'url': "https://konachan.com",
'api_version': "1.13.0+update.3",
'hashed_string': "So-I-Heard-You-Like-Mupkids-?--{0}--"},
'yandere': {
'url': "https://yande.re",
'api_version': "1.13.0+update.3",
'hashed_string': "choujin-steiner--{0}--"},
'danbooru': {
'url': "https://danbooru.donmai.us"},
'safebooru': {
'url': "https://safebooru.donmai.us"},
'lolibooru': {
'url': "https://lolibooru.moe"},
}
# HTTP_STATUS_CODE
HTTP_STATUS_CODE = {
200: ("OK", "Request was successful"),
201: ("Created", "The request has been fulfilled, resulting in the creation"
" of a new resource"),
202: ("Accepted", "The request has been accepted for processing, but the "
"processing has not been completed."),
204: ("No Content", "The server successfully processed the request and is "
"not returning any content."),
400: ("Bad request", "The server cannot or will not process the request"),
401: ("Unauthorized", "Authentication is required and has failed or has "
"not yet been provided."),
403: ("Forbidden", "Access denied"),
404: ("Not Found", "Not found"),
420: ("Invalid Record", "Record could not be saved"),
421: ("User Throttled", "User is throttled, try again later"),
422: ("Locked", "The resource is locked and cannot be modified"),
423: ("Already Exists", "Resource already exists"),
424: ("Invalid Parameters", "The given parameters were invalid"),
500: ("Internal Server Error", "Some unknown error occurred on the server"),
503: ("Service Unavailable", "Server cannot currently handle the request"),
504: ("Gateway Timeout", "The server timed out while waiting for a response")
}
| LuqueDaniel/pybooru | pybooru/resources.py | Python | mit | 2,102 | [
"MOE"
] | 89d9121d5a998367f8f6ca138f7ba7d061b303733f9bbc120d139aea58b1e877 |
'''
Created on Jan. 8, 2020
A module to load different datasets obtained from the Canadian Surface Prediction Archive (CaSPAr).
@author: Andre R. Erler, GPL v3
'''
## some CDO commands to process CaSPAr data
#
# # get timing of commands (with loops)
# time bash -c 'command' # single quotes; execute in same folder
#
# # ensemble mean for CaLDAS
# for NC in ensemble_members/*_000.nc; do C=${NC%_000.nc}; D=${C#*/}; echo $D; cdo ensmean ensemble_members/${D}_???.nc ${D}.nc; done
#
# # rename precip variable for experimental CaPA period (before March 2018)
# for NC in *.nc; do echo $NC; ncrename -v .CaPA_fine_exp_A_PR_SFC,CaPA_fine_A_PR_SFC -v .CaPA_fine_exp_A_CFIA_SFC,CaPA_fine_A_CFIA_SFC $NC; done
#
# # reproject rotated pole grid to custom grid
# # the CDO grid definition for a Lambert conic conformal projection is stored here:
# lcc_snw_griddef.txt
#
# # single execution (execute in source folder, write to target folder '../lcc_snw/')
# cdo remapbil,../lcc_snw/lcc_snw_griddef.txt data.nc ../lcc_snw/data.nc
#
# # paralel execution using GNU Parallel (execute in source folder, write to target folder '../lcc_snw/')
# time ls -1 *.nc | parallel -j 6 --colsep '\t' cdo remapbil,../lcc_snw/lcc_snw_griddef.txt {1} ../lcc_snw/{1} :::: -
# external imports
import datetime as dt
import pandas as pd
import os
import os.path as osp
import numpy as np
import netCDF4 as nc # netCDF4-python module
import xarray as xr
from collections import namedtuple
# internal imports
from datasets.common import getRootFolder
# for georeferencing
from geospatial.xarray_tools import addGeoReference, readCFCRS
## Meta-vardata
dataset_name = 'CaSPAr'
root_folder = getRootFolder(dataset_name=dataset_name, fallback_name='HGS') # get dataset root folder based on environment variables
# attributes of variables in different collections
# Axes and static variables
axes_varatts = dict(time = dict(name='time', units='hours', long_name='Days'), # time coordinate
lon = dict(name='lon', units='deg', long_name='Longitude'), # longitude coordinate
lat = dict(name='lat', units='deg', long_name='Latitude'), # latitude coordinate
x = dict(name='x', units='m', long_name='Easting'),
y = dict(name='y', units='m', long_name='Northing'),)
axes_varlist = axes_varatts.keys()
# CaSPAr (general/mixed variables)
caspar_varatts = dict(liqwatflx = dict(name='liqwatflx',units='kg/m^2/s',scalefactor=1.,
long_name='Liquid Water Flux'),)
caspar_varlist = caspar_varatts.keys()
caspar_ignore_list = []
# CaPA
capa_varatts = dict(CaPA_fine_A_PR_SFC = dict(name='precip', units='kg/m^2/s', scalefactor=1000./(6*3600.),
long_name='Total Precipitation'),
CaPA_fine_A_CFIA_SFC = dict(name='confidence', units='', scalefactor=1,
long_name='Confidence Index'),)
capa_varlist = capa_varatts.keys()
capa_ignore_list = ['CaPA_fine_A_CFIA_SFC']
# CaLDAS
caldas_varatts = dict(CaLDAS_P_DN_SFC = dict(name='rho_snw', units='kg/m^3', scalefactor=1, long_name='Snow Density'),
CaLDAS_A_SD_Avg = dict(name='snowh', units='m', scalefactor=0.01, long_name='Snow Depth'),
# derived variables
snow = dict(name='snow', units='kg/m^2', scalefactor=1., long_name='Snow Water Equivalent'),)
caldas_varlist = caldas_varatts.keys()
caldas_ignore_list = ['CaLDAS_P_I2_SFC','CaLDAS_P_SD_Glacier','CaLDAS_A_SD_Veg','CaLDAS_P_SD_OpenWater','CaLDAS_P_SD_IceWater']
# HRDPS
hrdps_varatts = dict(HRDPS_P_TT_10000 = dict(name='T2', units='K', offset=273.15, long_name='2 m Temperature'),
HRDPS_P_HU_10000 = dict(name='Q2', units='kg/kg', long_name='2 m Specific Humidity'),
HRDPS_P_UU_10000 = dict(name='U2', units='m/s', scalefactor=1852./3600., long_name='2 m Zonal Wind'),
HRDPS_P_VV_10000 = dict(name='V2', units='m/s', scalefactor=1852./3600., long_name='2 m Meridional Wind'),
HRDPS_P_GZ_10000 = dict(name='zs', units='m', scalefactor=10., long_name='Surface Geopotential'),
HRDPS_P_FB_SFC = dict(name='DNSW', units='W/m^2', long_name='Downward Solar Radiation'),
HRDPS_P_FI_SFC = dict(name='DNLW', units='W/m^2', long_name='Downward Longwave Radiation'),
HRDPS_P_PN_SFC = dict(name='pmsl', units='Pa', scalefactor=100., long_name='Sea-level Pressure'),
HRDPS_P_P0_SFC = dict(name='ps', units='Pa', scalefactor=100., long_name='Surface Pressure'),
HRDPS_P_TM_SFC = dict(name='SST', units='K', long_name='Sea Surface Temperature'),
HRDPS_P_GL_SFC = dict(name='seaice', units='', long_name='Sea Ice Fraction'),
HRDPS_P_DN_SFC = dict(name='rho_snw', units='kg/m^3', scalefactor=1, long_name='Snow Density'),
HRDPS_P_SD_Avg = dict(name='snowh', units='m', scalefactor=0.01, long_name='Snow Depth'),
# derived variables
Rn = dict(name='Rn', units='W/m^2', long_name='Net Surface Radiation'),
e_def = dict(name='e_def', units='Pa', long_name='Saturation Deficit'),
e_vap = dict(name='e_vap', units='Pa', long_name='Water Vapor Pressure'),
RH = dict(name='RH', units='', long_name='Relative Humidity'),
delta = dict(name='delta', units='Pa/K', long_name='Saturation Slope'),
u2 = dict(name='u2', units='m/s', long_name='2 m Wind Speed'),
gamma = dict(name='gamma', units='Pa/K', long_name='Psychometric Constant'),
pet_dgu = dict(name='pet_dgu', units='Pa/K', long_name='PET Denominator'),
pet_rad = dict(name='pet_rad', units='kg/m^2/s', long_name='PET Radiation Term'),
pet_wnd = dict(name='pet_wnd', units='kg/m^2/s', long_name='PET Wind Term'),
pet = dict(name='pet', units='kg/m^2/s', long_name='Potential Evapotranspiration'),
snow = dict(name='snow', units='kg/m^2', long_name='Snow Water Equivalent'),)
hrdps_varlist = caldas_varatts.keys()
hrdps_ignore_list = ['HRDPS_P_I2_SFC','HRDPS_P_SD_Glaciers','HRDPS_P_SD_Veg','HRDPS_P_SD_OpenWater','HRDPS_P_SD_IceWater', # from CaLDAS (snow); similar, but not identical
'HRDPS_P_PT_SFC','HRDPS_P_LA_SFC','HRDPS_P_LO_SFC', # empty variables
'HRDPS_P_FSF_SFC', 'HRDPS_P_FSD_SFC', # diffuse/direct radiation
'HRDPS_P_N0_SFC','HRDPS_P_RN_SFC','HRDPS_P_PR_SFC','HRDPS_P_AV_SFC', # water fluxes
'HRDPS_P_TT_09950','HRDPS_P_HU_09950','HRDPS_P_GZ_09950', # upper levels (40m)
'HRDPS_P_VVC_09950','HRDPS_P_VV_09950','HRDPS_P_UU_09950','HRDPS_P_UUC_09950', # upper levels (40m)
'HRDPS_P_VVC_10000','HRDPS_P_UUC_10000'] # geographically corrected winds
# settings for NetCDF-4 files
avgfolder = root_folder + dataset_name.lower()+'avg/'
avgfile = '{DS:s}_{GRD:s}_clim_{PRD:s}.nc' # the filename needs to be extended: biascorrection, grid and period
tsfile = '{DS:s}_{GRD:s}_monthly.nc' # extend with biascorrection, variable and grid type
folder_6hourly = root_folder + dataset_name.lower()+'_6hourly/'
filename_6hourly = '{DS:s}_{VAR:s}_{GRD:s}_6hourly.nc' # dataset and variable name, grid name
netcdf_dtype = np.dtype('<f4') # little-endian 32-bit float
# source data
raw_folder = root_folder + '{DS:s}/{GRD:s}/'
netcdf_filename = '{Y:04d}{M:02d}{D:02d}{H:02d}.nc' # original source file
# list of available datasets/collections
DSNT = namedtuple(typename='Dataset',
field_names=['name','interval','start_date','end_date','varatts', 'ignore_list'])
end_date = '2019-12-30T12'
dataset_attributes = dict(CaSPAr = DSNT(name='CaSPAr',interval='6H', start_date='2017-09-11T12', end_date=end_date,
varatts=caspar_varatts, ignore_list=caspar_ignore_list),
CaPA = DSNT(name='CaPA', interval='6H', start_date='2016-06-11T12', end_date=end_date,
varatts=capa_varatts, ignore_list=capa_ignore_list),
CaLDAS = DSNT(name='CaLDAS',interval='6H', start_date='2017-05-23T00', end_date=end_date,
varatts=caldas_varatts, ignore_list=caldas_ignore_list),
HRDPS = DSNT(name='HRDPS', interval='6H', start_date='2017-05-22T00', end_date=end_date,
varatts=hrdps_varatts, ignore_list=hrdps_ignore_list),)
dataset_list = list(dataset_attributes.keys())
# N.B.: the effective start date for CaPA and all the rest is '2017-09-11T12'
default_dataset_index = dict(precip='CaPA', snow='CaLDAS')
for dataset,attributes in dataset_attributes.items():
for varatts in attributes.varatts.values():
varname = varatts['name']
if varname not in default_dataset_index:
default_dataset_index[varname] = dataset
## load functions
def loadCaSPAr_Raw(dataset=None, filelist=None, folder=raw_folder, grid=None, period=None, biascorrection=None,
lxarray=True, lgeoref=True, lcheck_files=True, lmultifile=None, drop_variables='default', **kwargs):
''' function to load CaSPAr data from NetCDF-4 files using xarray and add some projection information '''
if not lxarray:
raise NotImplementedError("Only loading via xarray is currently implemented.")
if biascorrection:
raise NotImplementedError("Bias correction is currently not supported: ",biascorrection)
if dataset is None:
raise ValueError('Please specify a dataset name ; valid datasets:\n',dataset_list)
if dataset not in dataset_list:
raise ValueError("Dataset name '{}' not recognized; valid datasets:\n".format(dataset),dataset_list)
if not folder and not filelist:
raise IOError("Specify either 'folder' or 'filelist' or both.")
# handle date ranges (frequency based on dataset)
ds_atts = dataset_attributes[dataset]
if period:
if isinstance(period,str): period = (period,)
if len(period)==1:
period = period*2; lmultifile = False
date_list = pd.date_range(start=period[0],end=period[1],freq=ds_atts.interval)
filelist = [netcdf_filename.format(Y=date.year,M=date.month,D=date.day,H=date.hour) for date in date_list]
# construct file list
if folder:
folder = folder.format(DS=dataset,GRD=grid)
if not osp.exists(folder): raise IOError(folder)
if isinstance(filelist,(list,tuple)):
filelist = [osp.join(folder,filename) for filename in filelist]
elif isinstance(filelist,str): filelist = folder + '/' + filelist
elif filelist is None: filelist = folder + '/*.nc'
lraise = False
if isinstance(filelist,(list,tuple)):
for filename in filelist:
if not osp.exists(filename):
if not lraise: print("Missing files:")
print(filename); lraise = True
if lcheck_files and lraise:
raise IOError("Some files apprear to be missing... see above.")
# if folder is None but filelist is not, a list of absolute path is assumed
if lmultifile is None:
# auto-detect multi-file dataset
if isinstance(filelist,(tuple,list)):
lmultifile = True # the only exception is a single date string as period
else:
# detect regex (assuming string)
lmultifile = any([char in filelist for char in r'*?[]^'])
# prepare drop list
if drop_variables is None: drop_variables = []
elif isinstance(drop_variables,str) and drop_variables.lower() == 'default':
drop_variables = ds_atts.ignore_list[:]
ravmap = {atts['name']:varname for varname,atts in ds_atts.varatts.items()}
drop_variables = [ravmap.get(varname,varname) for varname in drop_variables]
if lmultifile:
# load multifile dataset (variables are in different files)
if 'lat' not in drop_variables: drop_variables.append('lat')
if 'lon' not in drop_variables: drop_variables.append('lon')
xds = xr.open_mfdataset(filelist, combine='by_coords', concat_dim='time', join='right', parallel=True,
data_vars='minimal', compat='override', coords='minimal',
drop_variables=drop_variables , **kwargs)
else:
# load a single file/timestep
if isinstance(filelist,(tuple,list)): filename = filelist[0]
else: filename = filelist
xds = xr.open_dataset(filename, drop_variables=drop_variables, **kwargs)
# update attributes using old names
for varname,atts in ds_atts.varatts.items():
if varname in xds.variables:
var = xds.variables[varname]
atts = atts.copy() # because we will pop scalefactor...
if var.attrs['units'] != atts['units']:
if 'scalefactor' in atts and atts['scalefactor'] != 1:
var *= atts['scalefactor'] # this should execute lazily...
if 'offset' in atts and atts['offset'] != 0:
var += atts['offset'] # this should execute lazily...
atts.pop('scalefactor',None)
attrs = var.attrs.copy()
attrs.update(atts)
var.attrs = attrs
# actually rename
varmap = dict()
for varname,atts in ds_atts.varatts.items():
if varname in xds: varmap[varname] = atts['name']
xds = xds.rename(varmap)
# add projection
if lgeoref:
proj4_string = readCFCRS(xds, lraise=True, lproj4=True)
xds = addGeoReference(xds, proj4_string=proj4_string)
# return xarray Dataset
return xds
def loadCaSPAr_6hourly(varname=None, varlist=None, dataset=None, dataset_index=None, folder=folder_6hourly,
lignore_missing=False, grid=None, biascorrection=None, lxarray=True, time_chunks=None, **kwargs):
''' function to load 6-houly CaSPAr data from NetCDF-4 files using xarray and add some projection information '''
if not lxarray:
raise NotImplementedError("Only loading via xarray is currently implemented.")
if varname and varlist: raise ValueError(varname,varlist)
elif varname: varlist = [varname]
elif varlist is None:
if dataset is None:
raise ValueError("Please specify a 'dataset' value in order to load a default variable list.\n"
"Supported datasets: {}".format(dataset_list))
varlist = dataset_attributes[dataset].varatts.keys()
lignore_missing = True
# check dataset time intervals/timesteps
if dataset_index is None: dataset_index = default_dataset_index.copy()
for varname in varlist:
ds_name = dataset_index.get(varname,'CaSPAr')
interval = dataset_attributes[ds_name].interval
if interval != '6H':
raise ValueError(varname,ds_name,interval)
# load variables
if biascorrection is None and 'resolution' in kwargs: biascorrection = kwargs['resolution'] # allow backdoor
if len(varlist) == 1:
varname = varlist[0]
# load a single variable
ds_name = dataset_index.get(varname,'CaSPAr') if dataset is None else dataset
if biascorrection : ds_name = '{}_{}'.format(ds_name,biascorrection) # append bias correction method
filepath = '{}/{}'.format(folder,filename_6hourly.format(DS=ds_name,VAR=varname, GRD=grid))
xds = xr.open_dataset(filepath, **kwargs)
else:
# load multifile dataset (variables are in different files)
filepaths = []
for varname in varlist:
ds_name = dataset_index.get(varname,'CaSPAr') if dataset is None else dataset
if biascorrection : ds_name = '{}_{}'.format(ds_name,biascorrection) # append bias correction method
filename = filename_6hourly.format(DS=ds_name,VAR=varname, GRD=grid)
filepath = '{}/{}'.format(folder,filename)
if os.path.exists(filepath):
filepaths.append('{}/{}'.format(folder,filename))
elif not lignore_missing:
raise IOError(filepath)
# xds = xr.open_mfdataset(filepaths, combine='by_coords', concat_dim='time', join='right', parallel=True,
# data_vars='minimal', compat='override', coords='minimal', **kwargs)
xds = xr.open_mfdataset(filepaths, combine='by_coords', concat_dim=None, parallel=True,
data_vars='minimal', coords='minimal', join='inner', **kwargs)
#xds = xr.merge([xr.open_dataset(fp, chunks=chunks, **kwargs) for fp in filepaths])
return xds
loadHourlyTimeSeries = loadCaSPAr_6hourly
## abuse for testing
if __name__ == '__main__':
import dask, time, gc, shutil
print('xarray version: '+xr.__version__+'\n')
# from dask.distributed import Client, LocalCluster
# # force multiprocessing (4 cores)
# cluster = LocalCluster(n_workers=4, diagnostics_port=18787)
# client = Client(cluster)
# from multiprocessing.pool import ThreadPool
# dask.set_options(pool=ThreadPool(4))
modes = []
# modes += ['load_6hourly']
modes += ['compute_variables']
# modes += ['load_raw']
# modes += ['fix_dataset']
# modes += ['test_georef']
# some settings
grid = 'lcc_snw'
period = ('2017-09-11T12','2019-12-30T12')
# loop over modes
for mode in modes:
if mode == 'load_6hourly':
# varlist = netcdf_varlist
varlist = ['liqwatflx','precip','snow','test']
xds = loadCaSPAr_6hourly(varlist=None, grid=grid, dataset='HRDPS', lignore_missing=True)
print(xds)
print('')
for varname,xv in xds.variables.items():
if xv.ndim == 3: break
xv = xds[varname] # get DataArray instead of Variable object
xv = xv.sel(time=slice('2018-01-01','2018-02-01'),x=slice(-3500,4500),y=slice(-1000,2000))
# xv = xv.loc['2011-01-01',:,:]
print(xv)
print(('Size in Memory: {:6.1f} MB'.format(xv.nbytes/1024./1024.)))
elif mode == 'compute_variables':
tic = time.time()
# compute variable list
# load_variables = dict(CaPA=['precip']); compute_variables = dict(CaPA=['precip'])
# load_variables = dict(CaLDAS=['snowh','rho_snw']); compute_variables = dict(CaLDAS=['snow'])
# load_variables = dict(CaLDAS=['snowh','rho_snw'], CaPA=['precip'])
# compute_variables = dict(CaSPAr=['liqwatflx'])
load_variables = dict(HRDPS=None) # all
# HRDPS/PET variable lists
lderived = True
derived_valist = ['Rn', 'e_def', 'delta', 'u2', 'gamma', 'T2', 'pet_dgu', 'pet_wnd', 'pet_rad']
# compute_variables = dict(HRDPS=['Rn',]); lderived = False
# compute_variables = dict(HRDPS=['Rn', 'e_def', 'delta', 'u2', 'gamma', 'T2']) # 'RH', # first order variables
# compute_variables = dict(HRDPS=['pet_dgu', 'pet_rad', 'pet_wnd',]) # second order variables
# second order variables: denominator, radiation and wind terms, PET
# compute_variables = dict(HRDPS=['pet_dgu',]) # denominator
# compute_variables = dict(HRDPS=['pet_rad','pet_wnd']) # radiation and wind
# derived_valist = ['Rn', 'e_def', 'delta', 'u2', 'gamma', 'T2',]
compute_variables = dict(HRDPS=['pet']) # only PET
drop_variables = 'default' # special keyword
reference_dataset = next(iter(load_variables)) # just first dataset...
# settings
ts_name = 'time'
# period = ('2019-08-19T00','2019-08-19T06')
folder = folder_6hourly # CaSPAr/caspar_6hourly/
# load multi-file dataset (no time slicing necessary)
datasets = dict()
for dataset,varlist in load_variables.items():
if lderived:
datasets[dataset] = loadCaSPAr_6hourly(grid=grid, varlist=derived_valist,
dataset=dataset, lignore_missing=True)
else:
datasets[dataset] = loadCaSPAr_Raw(dataset=dataset, grid=grid,
period=period, drop_variables=drop_variables)
ref_ds = datasets[reference_dataset]
print(ref_ds)
tsvar = ref_ds[ts_name].load()
# print(tsvar)
print("\n\n *** Computing Derived Variables *** ")
# loop over variables: compute and save to file
for dataset,varlist in compute_variables.items():
for varname in varlist:
print('\n\n --- {} ({}) ---\n'.format(varname,dataset))
note = 'derived variable'
nvar = None; netcdf_dtype = np.dtype('<f4')
# compute variable
if dataset == 'CaSPAr':
# derived variables
if varname == 'liqwatflx':
caldas = datasets['CaLDAS']; capa = datasets['CaPA']
ref_var = capa['precip']; ref_ds = capa
# check that the data is 6=hourly
dt = tsvar.diff(dim='time').values / np.timedelta64(1,'h')
assert dt.min() == dt.max() == 6, (dt.min(),dt.max())
note = 'total precipitation - SWE differences'
swe = caldas['rho_snw'] * caldas['snowh']
swe1 = xr.concat([swe[{'time':0}],swe], dim='time') # extend for differencing
dswe = swe1.diff(dim='time') # the extension should yield correct time indexing
dswe /= (6*3600.) # these are 6-hourly differences
nvar = capa['precip'].fillna(0) - dswe.fillna(0)
nvar = nvar.clip(min=0, max=None) # remove negative (unphysical)
del dswe, swe1, swe
elif dataset == 'CaLDAS':
ref_ds = datasets[dataset]
# derived CaLDAS
if varname == 'snow':
ref_var = ref_ds['snowh']
note = 'snow depth x density'
nvar = ref_ds['rho_snw'] * ref_ds['snowh']
elif dataset == 'HRDPS':
ref_ds = datasets[dataset]
# derived HRDPS
if varname == 'RH':
# relative humidity
ref_var = ref_ds['Q2']
# actual water vapor pressure (from mixing ratio)
e_vap = ref_ds['Q2'] * ref_ds['ps'] * ( 28.96 / 18.02 )
# saturation vapor pressure (for temperature T2; Magnus Formula)
e_sat = 610.8 * np.exp( 17.27 * (ref_ds['T2'] - 273.15) / (ref_ds['T2'] - 35.85) )
note = 'e_vap / e_sat (using Magnus Formula)'
nvar = e_vap / e_sat
del e_sat, e_vap
# first order PET variables
elif varname == 'Rn':
from utils.constants import sig
# net radiation
ref_var = ref_ds['DNSW']
note = '0.23*DNSW + DNLW - 0.93*s*T2**4'
nvar = (1-0.23)*ref_ds['DNSW'] + ref_ds['DNLW']- 0.93*sig*ref_ds['T2']**4
# N.B.: Albedo 0.23 and emissivity 0.93 are approximate average values...
elif varname == 'u2':
# wind speed
ref_var = ref_ds['U2']
note = 'SQRT(U2**2 + V2**2)'
nvar = np.sqrt(ref_ds['U2']**2 + ref_ds['V2']**2) # will still be delayed
elif varname == 'e_def':
# saturation deficit
ref_var = ref_ds['Q2']
# actual water vapor pressure (from mixing ratio)
e_vap = ref_ds['Q2'] * ref_ds['ps'] * ( 28.96 / 18.02 )
# saturation vapor pressure (for temperature T2; Magnus Formula)
e_sat = 610.8 * np.exp( 17.27 * (ref_ds['T2'] - 273.15) / (ref_ds['T2'] - 35.85) )
note = 'e_sat - e_vap (using Magnus Formula)'
nvar = e_sat - e_vap
del e_sat, e_vap
# PET helper variables (still first order)
elif varname == 'delta':
# slope of saturation vapor pressure (w.r.t. temperature T2; Magnus Formula)
ref_var = ref_ds['T2']
note = 'd(e_sat)/dT2 (using Magnus Formula)'
nvar = 4098 * ( 610.8 * np.exp( 17.27 * (ref_ds['T2'] - 273.15) / (ref_ds['T2'] - 35.85) ) ) / (ref_ds['T2'] - 35.85)**2
elif varname == 'gamma':
# psychometric constant
ref_var = ref_ds['ps']
note = '665.e-6 * ps'
nvar = 665.e-6 * ref_ds['ps']
# second order PET variables (only depend on first order variables and T2)
elif varname == 'pet_dgu':
# common denominator for PET calculation
ref_var = ref_ds['delta']
note = '( D + g * (1 + 0.34 * u2) ) * 86400'
nvar = ( ref_ds['delta'] + ref_ds['gamma'] * (1 + 0.34 * ref_ds['u2']) ) * 86400
elif varname == 'pet_rad':
# radiation term for PET calculation
ref_var = ref_ds['Rn']
note = '0.0352512 * D * Rn / Dgu'
nvar = 0.0352512 * ref_ds['delta'] * ref_ds['Rn'] / ref_ds['pet_dgu']
elif varname == 'pet_wnd':
# wind/vapor deficit term for PET calculation
ref_var = ref_ds['u2']
note = 'g * u2 * (es - ea) * 0.9 / T / Dgu'
nvar = ref_ds['gamma'] * ref_ds['u2'] * ref_ds['e_def'] * 0.9 / ref_ds['T2'] / ref_ds['pet_dgu']
elif varname == 'pet':
if 'pet_rad' in ref_ds and 'pet_wnd' in ref_ds:
# full PET from individual terms
ref_var = ref_ds['pet_rad']
note = 'Penman-Monteith (pet_rad + pet_wnd)'
nvar = ref_ds['pet_rad'] + ref_ds['pet_wnd']
else:
# or PET from original derived variables (no terms)
ref_var = ref_ds['Rn']
note = 'Penman-Monteith from derived variables'
nvar = ( 0.0352512 * ref_ds['delta'] * ref_ds['Rn'] + ( ref_ds['gamma'] * ref_ds['u2'] * ref_ds['e_def'] * 0.9 / ref_ds['T2'] ) ) / ( ref_ds['delta'] + ref_ds['gamma'] * (1 + 0.34 * ref_ds['u2']) ) / 86400
# fallback is to copy
if nvar is None:
if dataset in datasets:
# generic operation
ref_ds = datasets[dataset]
if varname in ref_ds:
# generic copy
ref_var = ref_ds[varname]
nvar = ref_ds[varname].copy()
else:
raise NotImplementedError("Variable '{}' not found in dataset '{}'".fomat(varname,dataset))
else:
raise NotImplementedError("No method to compute variable '{}' (dataset '{}')".format(varname,dataset))
nvar = nvar.astype(netcdf_dtype)
# assign attributes
nvar.rename(varname)
nvar.attrs = ref_var.attrs.copy()
for srcname,varatts in dataset_attributes[dataset].varatts.items():
if varatts['name'] == varname: break # use these varatts
for att in ('name','units','long_name',):
nvar.attrs[att] = varatts[att]
nvar.attrs['note'] = note
#nvar.chunk(chunks=chunk_settings)
print(nvar)
# save to file
nc_filename = filename_6hourly.format(DS=dataset,VAR=varname,GRD=grid)
nds = xr.Dataset({ts_name:tsvar, varname:nvar,}, attrs=ref_ds.attrs.copy()) # new dataset
# write to NetCDF
var_enc = dict(zlib=True, complevel=1, _FillValue=np.NaN,)
# N.B.: may add chunking for larger grids
nds.to_netcdf(folder+nc_filename, mode='w', format='NETCDF4', unlimited_dims=['time'],
engine='netcdf4', encoding={varname:var_enc,}, compute=True)
del nvar, nds, ref_var
# clean up
gc.collect()
toc = time.time()
print("\n\nOverall Timing:",toc-tic)
elif mode == 'load_raw':
tic = time.time()
xds = loadCaSPAr_Raw(dataset='HRDPS',
# period=period, grid=grid,
grid='lcc_snw', #drop_variables=['confidence','test'],
period=('2019-11-11T12','2019-12-01T12'), lcheck_files=True,
# filelist='2016??????.nc',
# period=('2018-03-03T00','2019-12-30T12'), lcheck_files=True,
# period=('2017-09-11T12','2019-12-30T12'), lcheck_files=True,
# period=('2016-06-11T12','2018-03-02T18'), lcheck_files=True,
# period=('2016-06-11T12','2017-09-11T06'), lcheck_files=True,
# filelist=['2017091518.nc','2017091600.nc','2017091606.nc','2017091612.nc'])
)
toc = time.time()
print(toc-tic)
print(xds)
print('')
dt = xds['time'].diff(dim='time').values / np.timedelta64(1,'h')
assert dt.min() == dt.max() == 6, (dt.min(),dt.max())
# xv = xds['CaPA_fine_exp_A_PR_SFC']
# xv = xv.loc['2016-06-16T06',:,:]
varname = 'time'
if varname in xds:
xv = xds[varname]
print(xv)
print("\nMean value:", xv[:].mean().values, xv.attrs['units'])
print(('Size in Memory: {:6.1f} kB'.format(xv.nbytes/1024.)))
elif mode == 'fix_dataset':
lmissing = False # efault is to persist
ref_delay = 1
# dataset = 'CaPA'; lmissing = True # precip can just 'no happen'
# dataset = 'CaLDAS'
dataset = 'HRDPS'; ref_delay = 4 # diurnal cycle
src_grid = 'snw_rotpol'
ds_atts = dataset_attributes[dataset]
missing_value = np.NaN
grid_mapping_list = ['rotated_pole']
ref_varlen = None; ref_size = None
damaged_folder = 'damaged_files/'
folder = raw_folder.format(DS=dataset, GRD=src_grid)
os.chdir(folder)
with open('missing_files.txt',mode='a',newline='\n') as missing_record:
# loop over dates
date_list = pd.date_range(start=ds_atts.start_date,end=ds_atts.end_date,freq=ds_atts.interval)
for i,date in enumerate(date_list):
filename = netcdf_filename.format(Y=date.year,M=date.month,D=date.day,H=date.hour)
# construct reference file
ref_date = date_list[max(0,i-ref_delay)]
reference_file = netcdf_filename.format(Y=ref_date.year,M=ref_date.month,D=ref_date.day,H=ref_date.hour)
# identify damaged files by size (will be moved)
if ref_size is None:
ref_size = os.path.getsize(filename)/2. # first file has to exist!
with nc.Dataset(filename, mode='a') as ds:
ref_varlen = len(ds.variables)
if osp.exists(filename) and os.path.getsize(filename) < ref_size:
# count variables
with nc.Dataset(filename, mode='a') as ds:
varlen = len(ds.variables)
if ref_varlen is None:
ref_varlen = varlen
elif ref_varlen < varlen:
raise ValueError("Additional variables detected in file '{}' - check reference file.".format(filename))
elif ref_varlen > varlen:
# move to separate folder
os.makedirs(damaged_folder, exist_ok=True)
print(filename,'->',damaged_folder)
shutil.move(filename, damaged_folder)
# add missing (and damaged) files
if not osp.exists(filename):
# add to record
print(filename)
missing_record.write(filename+'\n')
# handle missing
shutil.copy(reference_file,filename) # create new file
# set values to missing
with nc.Dataset(filename, mode='a') as ds:
for varname,variable in ds.variables.items():
if varname == 'time':
# set time units to hours since present
time_str = 'hours since {Y:04d}-{M:02d}-{D:02d} {H:02d}:00:00'.format(Y=date.year,M=date.month,D=date.day,H=date.hour)
variable.setncattr('units',time_str)
elif lmissing and 'time' in variable.dimensions:
# set all values to a fill value
variable[:] = missing_value
variable.setncattr('missing_value',missing_value)
# set flag to indicate dummy nature of file
if lmissing:
ds.setncattr('DUMMY',"This file was recorded missing and a dummy was generated using the file " +
reference_file + "' as reference; values have been replaced by " +
str(missing_value))
else:
ds.setncattr('DUMMY',"This file was recorded missing and a dummy was generated using the file " +
reference_file + "' as reference; values from reference persist")
# make sure CF attributes are correct
with nc.Dataset(filename, mode='a') as ds:
# rename CaPA experimental precip var
if dataset == 'CaPA':
if 'CaPA_fine_exp_A_PR_SFC' in ds.variables:
ds.renameVariable('CaPA_fine_exp_A_PR_SFC','CaPA_fine_A_PR_SFC')
if 'CaPA_fine_exp_A_CFIA_SFC' in ds.variables:
ds.renameVariable('CaPA_fine_exp_A_CFIA_SFC','CaPA_fine_A_CFIA_SFC')
# fix CF grid mappign attributes
grid_mapping = None
for grid_mapping in grid_mapping_list:
if grid_mapping in ds.variables: break
if grid_mapping is None:
raise NotImplementedError("No supported 'grid_mapping' detected.")
for varname,variable in ds.variables.items():
if 'rlat' in variable.dimensions and 'rlon' in variable.dimensions:
# verify or set CF projection attributes
if 'coordinates' in variable.ncattrs():
assert ( variable.getncattr('coordinates',) == 'lon lat' or
variable.getncattr('coordinates',) == 'lat lon' ), variable.getncattr('coordinates',)
else:
variable.setncattr('coordinates','lon lat')
if 'grid_mapping' in variable.ncattrs():
assert variable.getncattr('grid_mapping',) == grid_mapping, variable.getncattr('grid_mapping',)
else:
variable.setncattr('grid_mapping',grid_mapping)
elif varname == 'time':
if 'coordinates' in variable.ncattrs(): variable.delncattr('coordinates')
if 'grid_mapping' in variable.ncattrs(): variable.delncattr('grid_mapping')
## N.B.: in order to concatenate the entire time series of experimental and operational high-res CaPA data,
# we need to rename the variables in the experimental files using the following command (and a loop):
# ncrename -v .CaPA_fine_exp_A_PR_SFC,CaPA_fine_A_PR_SFC -v .CaPA_fine_exp_A_CFIA_SFC,CaPA_fine_A_CFIA_SFC $NC
elif mode == 'test_georef':
import osgeo
print(osgeo.__version__)
# from osgeo.osr import SpatialReference, CoordinateTransformation
from pyproj import Proj, transform
# load single time-step
xds = loadCaSPAr_Raw(dataset='CaPA', grid=grid, period=period[1],
lcheck_files=True, lgeoref=True)
print(xds)
# # proj4 definition for rotated pole (does not work...)
# RP = xds.rotated_pole
# o_lon_p = RP.north_pole_grid_longitude #see https://trac.osgeo.org/gdal/ticket/4285
# o_lat_p = RP.grid_north_pole_latitude
# lon_0 = 180. + RP.grid_north_pole_longitude
# # if lon_0 > 180: lon_0 = lon_0 - 360.
# R = RP.earth_radius
# proj4 = ("+proj=ob_tran +o_proj=longlat" +
# # " +to_meter=0.0174532925199 +a=1 " +
# # " +m 57.295779506" +
# " +o_lon_p={o_lon_p:f} +o_lat_p={o_lat_p:f}".format(o_lon_p=o_lon_p,o_lat_p=o_lat_p) +
# " +lon_0={lon_0:f} +R={R:f}".format(lon_0=lon_0, R=R) )
# # proj4 = " +proj=ob_tran +o_proj=longlat +o_lat_p=90 +o_lon_p=0 +lon_0=0"
# # proj4 = "+proj=longlat +lon_0=0 +lat_0=0 +ellps=WGS84 +datum=WGS84" # default
# print(proj4)
# rCSR = Proj(proj4)
# print(rCSR.definition_string())
# rCSR.ImportFromProj4(proj4)
## test projection
# CSR = SpatialReference()
default_proj4 = "+proj=longlat +lon_0=0 +lat_0=0 +ellps=WGS84 +datum=WGS84"
# CSR.ImportFromProj4(default_proj4)
CSR = Proj(default_proj4)
# coordinate indices
i,j = 27,50
y,x = xds.y.data[j],xds.x.data[i]
pCSR = Proj(xds.attrs['proj4'])
print("\nSource coordinates (y,x):\n {:8.5f}, {:8.5f}".format(y,x))
# reproject source coordinates
print("\n reprojecting...")
# transform = CoordinateTransformation(rCSR,CSR)
# slat,slon,z = transform.TransformPoint(rlat.astype(np.float64),rlon.astype(np.float64))
slon,slat = transform(pCSR, CSR, x, y, radians=False)
print("\nReprojected coordinates (lat,lon):\n {:8.5f}, {:8.5f}".format(slat,slon))
# compare against recorded coordinates
lat,lon = xds.lat.data[j,i],xds.lon.data[j,i]
print("\nActual coordinates:\n {:8.5f}, {:8.5f}".format(lat,lon))
| aerler/GeoPy | src/datasets/CaSPAr.py | Python | gpl-3.0 | 40,304 | [
"NetCDF"
] | 4f63191d4ca0107f100de3079d1dcb1dc44c491cf657d82627fdffb8dd8079e6 |
# for python 3
# You'll need to customize this according to your needs. Proper orientation of
# the kinect is vital; if participants are able to maintain their head or wrists
# continuously inside the word rects, they will repeatedly trigger the collision
# detection
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
from math import ceil
import pygame
import random
import sys
TRACKING_COLOR = pygame.color.Color("purple")
HIGHLIGHT_COLOR = pygame.color.Color("red")
BG_COLOR = pygame.color.Color("white")
GAME_TIME = 60# seconds
class BodyGameRuntime(object):
def __init__(self):
pygame.init()
pygame.mixer.init()
self.beep_sound = pygame.mixer.Sound('audio\\beep.ogg')
self.buzz_sound = pygame.mixer.Sound('audio\\buzz.ogg')
self.click_sound = pygame.mixer.Sound('audio\\click.ogg')
self._infoObject = pygame.display.Info()
self._screen = pygame.display.set_mode((self._infoObject.current_w >> 1,
self._infoObject.current_h >> 1),
pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
pygame.display.set_caption("Kinect Game Framework Test")
self.finished = False
self._clock = pygame.time.Clock()
self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color |
PyKinectV2.FrameSourceTypes_Body)
self._frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width,
self._kinect.color_frame_desc.Height), 0, 32)
self._bodies = None
self.score = 0
self.sentence_list = ["It is not acceptable to eat with your mouth open",
"It is acceptable to use a napkin",
"You shouldn't talk with food in your mouth",
"You shouldn't use bad words at the dinner table",
"This is a test sentence for the game",
"These sentences don't have any periods",
"Giraffes are herbivores, and don't eat elephants",
"Elvia came to visit the teacher in his office",
"My favorite fruits are rambutans and chirimoyas",
"The cat likes to eat the dog's food",
"Sometimes the dog gets angry and barks at the cat"]
self._frame_surface.fill((255, 255, 255))
def text_objects(self, text, font):
text_surface = font.render(text, True, (0, 0, 0))
return text_surface, text_surface.get_rect()
def message_display(self, text, loc_tuple, loc_int):
# loc_int: 1 center, 2 top left, 3 bottom left, 4 bottom right, 5 top right
text_surf, text_rect = self.text_objects(text, pygame.font.Font(None, 36))
loc_dict = {1:'text_rect.center', 2:'text_rect.topleft', 3:'text_rect.bottomleft',
4:'text_rect.bottomright', 5:'text_rect.topright'}
exec(loc_dict[loc_int] + ' = loc_tuple')
self._frame_surface.blit(text_surf, text_rect)
return text_rect
def fragment_sentence(self, sentence):
sentence_list = sentence.split()
sentence_word_count = len(sentence_list)
max_frag_size = ceil(sentence_word_count/3)
frag_list = []
i = 0
while i * max_frag_size <= sentence_word_count:
frag_list.append(sentence_list[i*max_frag_size:(i + 1)*max_frag_size])
i += 1
frag_list = [' '.join(words) for words in frag_list][0:3]
return frag_list
def draw_ind_point(self, joints, jointPoints, color, highlight_color, rect0, rect1, rect2, joint0, frag_list):
joint0State = joints[joint0].TrackingState;
if (joint0State == PyKinectV2.TrackingState_NotTracked or
joint0State == PyKinectV2.TrackingState_Inferred):
return
center = (int(jointPoints[joint0].x), int(jointPoints[joint0].y))
if rect0.collidepoint(center):
self.built_frag = self.built_frag + " " + frag_list[0]
self.click_sound.play()
frag_list[0] = ""
elif rect1.collidepoint(center):
self.built_frag = self.built_frag + " " + frag_list[1]
self.click_sound.play()
frag_list[1] = ""
elif rect2.collidepoint(center):
self.built_frag = self.built_frag + " " + frag_list[2]
self.click_sound.play()
frag_list[2] = ""
if frag_list[0] == "" and frag_list[1] == "" and frag_list[2] == "":
self.built_frag = self.built_frag[1:]
if self.built_frag == self.sentence:
self.score += 1
self.beep_sound.play()
self.end_round(frag_list)
else:
self.score -= 1
self.buzz_sound.play()
self.end_round(frag_list)
else:
try:
pygame.draw.circle(self._frame_surface, color, center, 20, 0)
except:
pass
def update_screen(self, joints, jointPoints, color, highlight_color, frag_list, seconds):
self._frame_surface.fill(BG_COLOR)# blank screen before drawing points
self.message_display(self.built_frag, (300, 900), 2)
rect0 = self.message_display(frag_list[0], (300, 300), 1)
rect1 = self.message_display(frag_list[1], (self._frame_surface.get_width() / 2, 100), 1)
rect2 = self.message_display(frag_list[2], (self._frame_surface.get_width() - 300, 300), 1)
self.message_display(str(self.score), (self._frame_surface.get_width() / 2, 800), 1)
self.message_display(str(seconds), (self._frame_surface.get_width() - 300, 800), 1)
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, rect2, PyKinectV2.JointType_Head, frag_list)
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, rect2, PyKinectV2.JointType_WristRight, frag_list)
# may change PyKinectV2.JointType_WristRight to PyKinectV2.JointType_ElbowRight
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, rect2, PyKinectV2.JointType_WristLeft, frag_list)
def end_round(self, frag_list):
self._frame_surface.fill(BG_COLOR)
self.message_display(self.built_frag, (300, 900), 2)
rect0 = self.message_display(frag_list[0], (300, 300), 1)
rect1 = self.message_display(frag_list[1], (self._frame_surface.get_width() / 2, 100), 1)
rect2 = self.message_display(frag_list[2], (self._frame_surface.get_width() - 300, 300), 1)
self.message_display(str(self.score), (self._frame_surface.get_width() / 2, 800), 1)
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface,
(self._screen.get_width(), target_height));
self._screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
pygame.time.delay(500)
self.new_round()
def end_game(self):
self._frame_surface.fill(BG_COLOR)
self.message_display("Score: {}".format(self.score), (self._frame_surface.get_width() / 2, self._frame_surface.get_height() / 2), 1)
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface,
(self._screen.get_width(), target_height));
self._screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
pygame.time.delay(3000)
self.run()
def new_round(self):
self.sentence = random.sample(self.sentence_list, 1)[0]
self.built_frag = ""
frag_list = self.fragment_sentence(self.sentence)
random.shuffle(frag_list)
pygame.time.delay(500)
while not self.finished:
seconds = GAME_TIME - int((pygame.time.get_ticks() - self.start_ticks)/1000)
if seconds <= 0:
self.end_game()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.finished = True
if self._kinect.has_new_body_frame():
self._bodies = self._kinect.get_last_body_frame()
if self._bodies is not None:
for i in range(0, self._kinect.max_body_count):
body = self._bodies.bodies[i]
if not body.is_tracked:
continue
joints = body.joints
joint_points = self._kinect.body_joints_to_color_space(joints)
self.update_screen(joints, joint_points, TRACKING_COLOR, HIGHLIGHT_COLOR, frag_list, seconds)
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface,
(self._screen.get_width(), target_height));
self._screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
self._clock.tick(60)
self.end_game()
def run(self):
self.score = 0
while not self.finished:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.finished = True
if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
self.finished = True
if event.type == pygame.KEYUP and event.key == pygame.K_SPACE:
self.start_ticks = pygame.time.get_ticks()
self.new_round()
self._kinect.close()
pygame.quit()
sys.exit()
if __name__ == "__main__":
game = BodyGameRuntime()
game.run()
| jgerschler/ESL-Games | Kinect/Sentence Builder/Deprecated/SentenceBuilderOriginal.py | Python | mit | 10,661 | [
"VisIt"
] | 5aeacf10ad723598cdfda9f536b6989c25386faa00203d002fcc519412c508ab |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1453357629.165467
__CHEETAH_genTimestamp__ = 'Thu Jan 21 15:27:09 2016'
__CHEETAH_src__ = '/home/babel/Build/Test/OpenPLi5/openpli5.0/build/tmp/work/tmnanoseplus-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+186ea358f6-r0/git/plugin/controllers/views/web/epgsearchrss.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Jan 21 15:27:08 2016'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class epgsearchrss(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(epgsearchrss, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_82526421 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
\t<channel>
\t\t<title>''')
_v = VFFSL(SL,"title",True) # u'$title' on line 5, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$title')) # from line 5, col 10.
write(u'''</title>
\t\t<link>
\t\t\thttp://
\t\t</link>
\t\t<description>''')
_v = VFFSL(SL,"description",True) # u'$description' on line 9, col 16
if _v is not None: write(_filter(_v, rawExpr=u'$description')) # from line 9, col 16.
write(u'''</description>
\t\t<generator>''')
_v = VFFSL(SL,"generator",True) # u'$generator' on line 10, col 14
if _v is not None: write(_filter(_v, rawExpr=u'$generator')) # from line 10, col 14.
write(u'''</generator>
''')
for event in VFFSL(SL,"events",True): # generated from line 11, col 3
write(u'''\t\t<item>
\t\t\t<title>''')
_v = VFFSL(SL,"event.title",True) # u'$event.title' on line 13, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$event.title')) # from line 13, col 11.
write(u''' (''')
_v = VFFSL(SL,"event.date",True) # u'$event.date' on line 13, col 25
if _v is not None: write(_filter(_v, rawExpr=u'$event.date')) # from line 13, col 25.
write(u''' ''')
_v = VFFSL(SL,"event.begin",True) # u'$event.begin' on line 13, col 37
if _v is not None: write(_filter(_v, rawExpr=u'$event.begin')) # from line 13, col 37.
write(u''')</title>
\t\t\t<description>
\t\t\t\tService: ''')
_v = VFFSL(SL,"event.sname",True) # u'$event.sname' on line 15, col 14
if _v is not None: write(_filter(_v, rawExpr=u'$event.sname')) # from line 15, col 14.
write(u'''
\t\t\t\t<br/>
\t\t\t\tStart Time: ''')
_v = VFFSL(SL,"event.date",True) # u'$event.date' on line 17, col 17
if _v is not None: write(_filter(_v, rawExpr=u'$event.date')) # from line 17, col 17.
write(u''' ''')
_v = VFFSL(SL,"event.begin",True) # u'$event.begin' on line 17, col 29
if _v is not None: write(_filter(_v, rawExpr=u'$event.begin')) # from line 17, col 29.
write(u'''
\t\t\t\t<br/>
\t\t\t\tDuration: ''')
_v = VFFSL(SL,"event.duration",True) # u'$event.duration' on line 19, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$event.duration')) # from line 19, col 15.
write(u''' minutes
\t\t\t\t<br/>
''')
if VFFSL(SL,"event.shortdesc",True): # generated from line 21, col 5
write(u'''\t\t\t\t''')
_v = VFFSL(SL,"event.shortdesc",True) # u'$event.shortdesc' on line 22, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$event.shortdesc')) # from line 22, col 5.
write(u'''
\t\t\t\t<br/>
''')
write(u'''\t\t\t\t<br/>
\t\t\t\t''')
_v = VFFSL(SL,"event.longdesc",True) # u'$event.longdesc' on line 26, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$event.longdesc')) # from line 26, col 5.
write(u'''
\t\t\t</description>
\t\t\t<author>''')
_v = VFFSL(SL,"event.sname",True) # u'$event.sname' on line 28, col 12
if _v is not None: write(_filter(_v, rawExpr=u'$event.sname')) # from line 28, col 12.
write(u'''</author>
\t\t</item>
''')
write(u''' </channel>
</rss>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_82526421
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_epgsearchrss= 'respond'
## END CLASS DEFINITION
if not hasattr(epgsearchrss, '_initCheetahAttributes'):
templateAPIClass = getattr(epgsearchrss, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(epgsearchrss)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=epgsearchrss()).run()
| MOA-2011/e2openplugin-OpenWebif | plugin/controllers/views/web/epgsearchrss.py | Python | gpl-2.0 | 8,144 | [
"VisIt"
] | 32a4c5723d3ad854493a29147c7023648b3df8d6c8be65301423047bb2c89622 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Version: Sat 27 Apr 2013
# Initial build.
#
"""
Scripts for dealing with the SDSS
Catalogue Archive Server (CAS) and
the results obtained herefrom.
"""
import os
import sys
import time
# Will mechanize work with proxies?
# Maybe I should create a wrapper or use socksipy to have all connections use the proxy server.
import mechanize
# .. : ted
from .. import env
from ..time import format_HHMMSS_diff
from . import load_SNe_candidate_list
_path_data = env.paths.get('data')
_path_das = env.paths.get('das')
_path_cas = env.paths.get('cas')
_proxies = env.proxies
"""
Content
-------
__Functions:__
* get_galaxies
* create_galaxy_list
* plot_gxlist
* build_tlist
* get_fields
* get_field_result
* create_unique_field_list
* filter_invalid_from_unique_field_list
* count_field_records
* query
* field_clean_local_dir
"""
def get_galaxies():
"""
This query takes too long?
"""
ipath = env.paths.get('sql')
fn_sql_galaxies = os.path.join(ipath, 'cas', 'stripe82_galaxies.sql')
with open(fn_sql_galaxies, 'r') as fsock:
sql_galaxies = fsock.read()
print sql_galaxies
print ''
print 'Downloading galaxy objects from PhotoObjAll'
time_total_beg = time.time()
galaxies = query(sql_galaxies)
time_total_end = time.time()
print 'Query executed in {:.0f} seconds'.format(
time_total_end - time_total_beg
)
if 'error' in galaxies.lower():
print('ERROR: {}: CAS says something is wrong ...'.format(
sys._getframe().f_code.co_name)
)
print '\nContent of returned result:\n\n{}'.format(galaxies)
with open(env.files.get('galaxies'), 'w+') as fsock:
fsock.write(galaxies)
def create_galaxy_list():
"""
Create a list of galaxy coordinates `gxlist.csv` by
filtering response from SDSS Skyserver in the following way:
0. Exclude duplicate coordinates.
1. Exclude coordinates that are not within covered stripe area.
2. Exclude coordinates that are too close to any supernova coordinate.
Returns
-------
None
Side effects
------------
File `gxlist.csv` in env.paths.get('cas')
"""
from .cutouts import get_covering_fields
import numpy as np
import pandas as pd
# Manipulation and analysis of geometric objects in the Cartesian plane.
# from shapely.geometry import Polygon, Point
# from shapely.ops import cascaded_union
from IPython import embed
print 'Loading data ...'
cols = dict(usecols=['Ra', 'Dec'])
fcols = dict(usecols=['raMin', 'raMax', 'decMin', 'decMax'])
galaxies = pd.read_csv(env.files.get('galaxies'), sep=',', **cols)
snlist = pd.read_csv(env.files.get('snlist'), sep=';', **cols)
fields = pd.read_csv(env.files.get('fields'), sep=',', **fcols)
# Step 0
# ------
print 'Step 0 : Exclude duplicate coordinates ...'
gra = galaxies.Ra.values
gdec = galaxies.Dec.values
# coords = np.zeros_like(gra).astype(str)
coords = []
for i in np.arange(galaxies.shape[0]):
# coords[i] = '{},{}'.format(gra[i], gdec[i])
coords.append('{},{}'.format(gra[i], gdec[i]))
u_coords, uix = np.unique(coords, return_index=True)
print 'Reducing data for the next step ...'
u_galaxies = galaxies.iloc[uix]
u_gra = gra[uix]
u_gdec = gdec[uix]
# Step 1
# ------
print 'Step 1 : Exclude coordinates that are not within covered stripe area ...'
ipath = os.path.dirname(env.files.get('gxlist'))
ifname = os.path.join(ipath, 'cu_gxlist.csv')
if not os.path.isfile(ifname):
"""This step takes ~40 minutes on my laptop."""
ramin, ramax = fields.raMin.values, fields.raMax.values
decmin, decmax = fields.decMin.values, fields.decMax.values
N_gx = u_galaxies.shape[0]
N_fields = fields.shape[0]
# How many times can I have an array of <N_rows> rows by
N_rows = 100
N_gx_eee = N_gx // N_rows
N_gx_rem = N_gx % N_rows
cix = np.array([]).astype(bool)
# Create vectors and matrices that are repeatedly used
N_fields_ZEROS = np.zeros(N_fields)[None, :]
RAMIN = np.zeros(N_rows)[:, None] + ramin[None, :]
RAMAX = np.zeros(N_rows)[:, None] + ramax[None, :]
DECMIN = np.zeros(N_rows)[:, None] + decmin[None, :]
DECMAX = np.zeros(N_rows)[:, None] + decmax[None, :]
for n in range(N_gx_eee):
# How far are we?
beg = n * N_rows
end = (n + 1) * N_rows
print 'n = {: >4d}; {: >6d}; {: >6d};'.format(n, beg, end)
# Create matrices
GRA = u_gra[beg:end, None] + N_fields_ZEROS
GDEC = u_gdec[beg:end, None] + N_fields_ZEROS
CMP = np.ones((N_rows, N_fields)).astype(bool)
CMP &= (GRA > RAMIN)
CMP &= (GRA < RAMAX)
CMP &= (GDEC > DECMIN)
CMP &= (GDEC < DECMAX)
# Append the booleans to my master index file
cix = np.append(cix, np.any(CMP, axis=1))
# Clean up
del GRA, GDEC, CMP
if N_gx_rem > 0:
# Finally, the remaining less than <N_rows> coordinates
beg = (n + 1) * N_rows
end = beg + N_gx_rem
# Create matrices
GRA = u_gra[beg:end, None] + N_fields_ZEROS
GDEC = u_gdec[beg:end, None] + N_fields_ZEROS
RAMIN = np.zeros(N_gx_rem)[:, None] + ramin[None, :]
RAMAX = np.zeros(N_gx_rem)[:, None] + ramax[None, :]
DECMIN = np.zeros(N_gx_rem)[:, None] + decmin[None, :]
DECMAX = np.zeros(N_gx_rem)[:, None] + decmax[None, :]
CMP = np.ones((N_gx_rem, N_fields)).astype(bool)
CMP &= (GRA > RAMIN)
CMP &= (GRA < RAMAX)
CMP &= (GDEC > DECMIN)
CMP &= (GDEC < DECMAX)
# Append the booleans to my master index file
cix = np.append(cix, np.any(CMP, axis=1))
# Check
print ''
print 'N_gx =', N_gx
print 'cix.size =', cix.size
print 'cix.dtype =', cix.dtype
# Embed so that I do not need to re-do this step again...
embed()
print 'Reducing data for the next step ...'
cu_galaxies = u_galaxies.iloc[cix]
cu_gra = u_gra[cix]
cu_gdec = u_gdec[cix]
else:
print 'Step 1. already performed. Loading result ...'
cu_galaxies = pd.read_csv(ifname, sep=',')
cu_gra = cu_galaxies.Ra.values
cu_gdec = cu_galaxies.Dec.values
# Step 2
# ------
print 'Step 2 : Exclude coordinates that are too close to any supernova coordinate ...'
# Criteria?
# Unknown, but for now it should just lie
# outside the range of the cutout extent, i.e. more than 101 px away.
# 101 px in the SDSS frames correspond to about 10 ** -2 degrees.
criteria_distance = .001 # [deg]
# Count how many rows that are left at this step
N_gx = cu_galaxies.shape[0]
N_sn = snlist.shape[0]
# How many times can I have an array of <N_rows> rows by
N_rows = 10000
N_gx_eee = N_gx // N_rows
N_gx_rem = N_gx % N_rows
dix = np.array([]).astype(bool)
# Create repeatedly used vectors
N_sn_ZEROS = np.zeros(N_sn)[None, :]
RA_sn = np.zeros(N_rows)[:, None] + snlist.Ra.values[None, :]
DEC_sn = np.zeros(N_rows)[:, None] + snlist.Dec.values[None, :]
print 'Creating matrices that can calculate all distances simultaneously ...'
# Loop for as many times as needed
for n in range(N_gx_eee):
beg = n * N_rows
end = (n + 1) * N_rows
# How far are we?
print 'n = {: >4d}; {: >6d}; {: >6d};'.format(n, beg, end)
# Create matrices
# Broadcast shapes to get a N_gx-by-N_sn
RA_gx = cu_gra[beg:end, None] + N_sn_ZEROS
DEC_gx = cu_gdec[beg:end, None] + N_sn_ZEROS
# print 'Calculating differences for each coordinate type ...'
# Differences
dRA = RA_gx - RA_sn
dDEC = DEC_gx - DEC_sn
# print 'Calculating the distances between every possible set of coordinates ...'
# Distances from each coordinate to each supernova
dS = np.sqrt(dRA ** 2 + dDEC ** 2)
# print 'Creating boolean vector for each coordinate ...'
# Are there any SNe too close for a given coordinate?
# Check along the columns, i.e .return boolean vector of rows (galaxies)
# that met the criteria of being far enough away that it should be outside
# a cutout which also has a known SDSS supernova candidate within it.
# distance indices
dix = np.append(dix, np.any(dS > criteria_distance, axis=1))
if N_gx_rem > 0:
# Finally, the remaining less than <N_rows> coordinates
beg = (n + 1) * N_rows
end = beg + N_gx_rem
# Create matrices
RA_gx = cu_gra[beg:end, None] + N_sn_ZEROS
DEC_gx = cu_gdec[beg:end, None] + N_sn_ZEROS
RA_sn = np.zeros(N_gx_rem)[:, None] + snlist.Ra.values[None, :]
DEC_sn = np.zeros(N_gx_rem)[:, None] + snlist.Dec.values[None, :]
# print 'Calculating differences for each coordinate type ...'
# Differences
dRA = RA_gx - RA_sn
dDEC = DEC_gx - DEC_sn
# print 'Calculating the distances between every possible set of coordinates ...'
# Distances from each coordinate to each supernova
dS = np.sqrt(dRA ** 2 + dDEC ** 2)
# print 'Creating boolean vector for each coordinate ...'
# Append the booleans to my master index file
dix = np.append(dix, np.any(dS > criteria_distance, axis=1))
# Check
print 'N_gx =', N_gx
print 'dix.size =', dix.size
print 'Reducing data for the next step ...'
dcu_galaxies = cu_galaxies.iloc[dix]
# Step 3:
# -------
print 'Step 3 : Exclude coordinates that are too close to other non-events ...'
"""
Rationale
---------
When I discovered that I had not checked that the distance between the
galaxy coordinates themselves were not too close to essentially stay out
of each other's cutouts, I realised that I never made sure that I could
order them by observation date and then use only the first-observed object
(or it could be the same, but it would make no difference for my algorithm
to begin with), so that I could argue that I should keep the first one.
As it is (2014-02-26), I do not have any means to use observation
dates, since I do not have the luxury of starting over and begin with
Step 0. Also, More importantly, I do not have time to spend on finding out
how to obtain observation dates from the SDSS database.
As this is basically just a proof of concept, where I just need enough
coordinates that as far as the merged snlists are concerned do not have any
known transient events happening in them that are classified as SNe. There
does not seem to be any reason why I should not just choose arbitrarily
between two coordinates whose cutouts (101x101) will overlap.
Algorithm
---------
1. I go through the list from the top.
2. For each coordinate I calculate the distance to all other coordinates.
3. I get an array of the same length, containing the distances.
4. Entries with coordinates too close to be outside the given coordinate's
cutout `view` will be removed from the list.
5. The now potentially reduced list is used in the next step. Since we
start from the top, the previous coordinate entries will all be there in
the reduced list.
6. Increase the entry index and repeat from step 3. until the end of the
final entry is reached (no more remaining possibly too close coordinates
left).
"""
i = 0
ddcu_galaxies = dcu_galaxies.copy()
while i < ddcu_galaxies.shape[0]:
if i and not i % 1000:
print 'i = {: >5d}'.format(i)
templist = ddcu_galaxies.copy()
entry = templist[i:i + 1]
dRa = entry.Ra.values[0] - templist.Ra.values
dDec = entry.Dec.values[0] - templist.Dec.values
dS = np.sqrt(dRa ** 2 + dDec ** 2)
dix = (dS > criteria_distance)
dix[i] = True
ddcu_galaxies = templist.iloc[dix].copy()
del templist
# Yikes :O !!! This turned out to be important :D
i += 1
# print 'Final size of gxlist: {:,.0f}'.format(ddcu_galaxies.shape[0])
# Step 4:
# -------
print 'Step 4 : Exclude coordinates that are covered by too few fields ...'
"""
This was determined from the coordinates that were in the first tlist
made before this step, where I discovered that some of the choden coordintes
had as little as 1 field covering it. With fewer fields covering, the chance
of getting a number of cutouts in a sequence that matches the average number of cutouts
for the coordinates in snlist is smaller. There could be a bias here.
"""
# Lowest frame count 69.0
# Highest frame count 162.0
MIN_NUMBER_OF_FIELDS = 69
MAX_NUMBER_OF_FIELDS = 162
# Check if enough covering fields
# If not enough, exclude the coordinate.
n_fields = np.array([])
for i in range(ddcu_galaxies.shape[0]):
print '{: >5d}'.format(i),
Ra, Dec = ddcu_galaxies.iloc[i]
N = get_covering_fields(np.array([Ra, Dec])[None, :]).shape[0]
n_fields = np.append(n_fields, N)
print '- Fields: {: >3d}'.format(N)
fix = (
(n_fields >= MIN_NUMBER_OF_FIELDS)
&
(n_fields <= MAX_NUMBER_OF_FIELDS)
)
fddcu_galaxies = ddcu_galaxies.iloc[fix]
print 'Final size of gxlist: {:,.0f}'.format(fddcu_galaxies.shape[0])
# Finalise
# --------
print 'Step finalise : save the resulting list to disk.'
fddcu_galaxies.to_csv(env.files.get('gxlist'), index=False, header=True)
def plot_gxlist():
"""
Generate figure showing the galaxy coordinates within the stripe
plotted over the regions covered by the fields that are available.
"""
pass
###############################################################################
def build_tlist():
"""
Build the event/non-event data set and save it as a file.
"""
import numpy as np
import pandas as pd
from ..parse import ra2deg, dec2deg
if 1:
snlist = pd.read_csv(env.files.get('snlist'), sep=';')
else:
snid_sortable = lambda SDSS_id: 'SN{:0>5d}'.format(int(SDSS_id[2:]))
s2valornan = lambda s: s or np.nan
conv = dict(SDSS_id=snid_sortable, Ra=ra2deg, Dec=dec2deg,
redshift=s2valornan, Peak_MJD=s2valornan)
lkw = dict(sep=';', converters=conv)
snlist = pd.read_csv(env.files.get('snlist_1030'), **lkw)
gxlist = pd.read_csv(env.files.get('gxlist'), sep=',')
# print gxlist.info()
# print gxlist.head(10)
print snlist.info()
print snlist.head(10)
# How many needed in total
N_sne = snlist.shape[0]
N_gx = gxlist.shape[0]
N_needed = np.round(N_sne, decimals=-2) * 2
N_gx_c = N_needed - N_sne
gx_ix = np.unique(np.random.randint(0, N_gx, size=N_gx_c))
while gx_ix.size != N_gx_c:
N_cur = gx_ix.size
N_left = N_gx_c - N_cur
gx_ix = np.unique(
np.append(
gx_ix,
np.random.randint(0, N_gx, size=N_left)
)
)
gx_chosen = gxlist.iloc[gx_ix]
# print gx_chosen.info()
# print gx_chosen.head(10)
# raise SystemExit
# Build data set
ra = np.append(snlist.Ra.values, gx_chosen.Ra.values)
dec = np.append(snlist.Dec.values, gx_chosen.Dec.values)
is_sn = np.append(np.ones(N_sne), np.zeros(N_gx_c)).astype(bool)
# Collect and shuffle the lines, so that I only need to split
# the data set N-fold, when using the data.
dataset = np.array([ra,
dec,
is_sn]).T
# Do in-place shuffle
"""
This is an in-place operation on a view of the original array.
It does not create a new, shuffled array, so there's no need
to transpose the result.
REF: https://stackoverflow.com/questions/20546419/shuffle-columns-of-an-array-with-numpy
"""
np.random.shuffle(dataset)
if 0:
coords = np.array([])
# for i in range(dataset.shape[0]):
# coords = np.append(coords, '{:014.9f}_{:014.9f}'.format(
# dataset[i, 0], dataset[i, 1])
# )
for i in range(snlist.shape[0]):
coords = np.append(coords, '{:014.9f}_{:014.9f}'.format(
snlist.Ra.values[i], snlist.Dec.values[i])
)
ucoords, indices = np.unique(coords, return_inverse=True)
print ucoords.size, np.unique(snlist.SDSS_id.values).size, np.unique(snlist.Peak_MJD.values).size
raise SystemExit
# tlist = pd.DataFrame(data=dict(Ra=ra, Dec=dec, is_sn=is_sn))
tlist = pd.DataFrame(
data=dataset,
columns=['Ra', 'Dec', 'is_sn']
)
print tlist.info()
print tlist.head(10)
tlist.to_csv(env.files.get('tlist'), index=False, header=True)
###############################################################################
def build_tlist_sample(N=5):
"""
Build *SAMPLE* event data set and save it as a file.
"""
import numpy as np
import pandas as pd
snlist = pd.read_csv(env.files.get('snlist'), sep=';')
ra = snlist.Ra.values[:N]
dec = snlist.Dec.values[:N]
is_sn = np.ones(N).astype(bool)
dataset = np.array([ra,
dec,
is_sn]).T
tlist = pd.DataFrame(
data=dataset,
columns=['Ra', 'Dec', 'is_sn']
)
print tlist.info()
print tlist.head(N)
tlist.to_csv(env.files.get('tlist'), index=False, header=True)
###############################################################################
def check_snlist():
"""
Check for duplicates in snlist_1030
"""
import numpy as np
import pandas as pd
from ..parse import ra2deg, dec2deg
# Converters
snid_sortable = lambda SDSS_id: 'SN{:0>5d}'.format(int(SDSS_id[2:]))
s2valornan = lambda s: s or np.nan
if 0:
ifname = env.files.get('snlist_1030')
conv = dict(SDSS_id=snid_sortable, Ra=ra2deg, Dec=dec2deg,
redshift=s2valornan, Peak_MJD=s2valornan)
else:
ifname = env.files.get('snlist')
conv = dict(SDSS_id=snid_sortable, redshift=s2valornan,
Peak_MJD=s2valornan)
lkw = dict(sep=';', converters=conv)
snlist = pd.read_csv(ifname, **lkw)
# Check for duplicate coordinate pairs
coords = np.array([])
for i in range(snlist.shape[0]):
coords = np.append(coords, '{:014.9f}_{:014.9f}'.format(
snlist.Ra.values[i], snlist.Dec.values[i])
)
ucoords, indices = np.unique(coords, return_inverse=True)
print 'Number of list entries: {: >4d}'.format(snlist.shape[0])
print 'Number of unique entries: {: >4d}'.format(ucoords.size)
# print 'Number of unique entry IDs: {: >4d}'.format(np.unique(snlist.SDSS_id.values).size)
duplicates = []
for ix in np.unique(indices):
if (indices == ix).sum() > 1:
duplicates.append(ix)
if duplicates:
coord_indices = []
for ix in duplicates:
print ''
for i, uc in enumerate(ucoords[indices[indices == ix]]):
if i == 0:
coord_indices.append((coords == uc).nonzero()[0])
print uc
print '\nIndices of the original list:', duplicates
print coord_indices
print ''
print 'Entries from snlist:'
for cices in coord_indices:
print
print snlist.iloc[cices]
else:
print 'No duplicates found ...'
###############################################################################
def check_tlist():
"""
For each entry in `tlist.csv`, find out how many fields cover it.
"""
from .cutouts import get_covering_fields
import numpy as np
import pandas as pd
ifname = env.files.get('tlist')
tlist = pd.read_csv(ifname)
sn_fields = []
gx_fields = []
for i in range(tlist.shape[0]):
print '{: >4d} -'.format(i),
Ra, Dec, is_sn = tlist.iloc[i]
n_fields = get_covering_fields(np.array([Ra, Dec])[None, :]).shape[0]
if is_sn:
sn_fields.append(n_fields)
print 'SN',
else:
gx_fields.append(n_fields)
print 'GX',
print '- Fields: {: >3d}'.format(n_fields)
for data, name in zip([sn_fields, gx_fields], ['SN', 'GX']):
ofname = os.path.join(
env.paths.get('data'),
'nfieldrecords_{}.csv'.format(name)
)
with open(ofname, 'w+') as fsock:
fsock.write('\n'.join(np.array(data).astype(str)))
if 0:
import matplotlib as mpl
mpl.use('pdf')
import matplotlib.pyplot as plt
from mplconf import mplrc
from mplconf import rmath
mplrc('publish_digital')
fig, ax = plt.subplots(figsize=(12.5, 4))
ax.hist(sn_fields, bins=100)
ax.hist(gx_fields, bins=100)
ax.set_xlabel(rmath('Number of fields for a coordinate'))
ax.set_ylabel(rmath('Counts'))
fig.tight_layout()
ofname_fig = os.path.join(
env.paths.get('data'),
'tlist_nfieldrecords.pdf'
)
plt.savefig(ofname_fig)
###############################################################################
def get_fields(skip_if_exists=True):
"""For each coordinate in the SNe file, get the frames
from the Field table that cover that coordinate.
Saves each result in a seperate file named <SDSS_ID>.csv .
"""
# Clean up local cas directory
# field_clean_local_dir()
# Do it manually from the main program instead
# This way, if the program is interrupted, it does not need to begin all
# over, since the single-field-getter skips requests that have already been made.
# with open('template_fields_that_cover_SN.sql', 'r') as fsock:
# sql_fields_fstr = fsock.read()
sql_fields_fstr = """\
SELECT
fieldID,
-- skyVersion,
run, rerun, camcol, field,
-- nObjects,
-- numStars_r,
-- nCR_r,
-- nBrightObj_r,
-- nFaintObj_r,
quality, -- Quality of field in terms of acceptance
mjd_r, -- Julian Date when row 0 was read
-- Do I need Astrometric transformation constants?
-- Do I need Airmass measurements?
raMin, raMax, decMin, decMax
FROM
-- Stripe82..Field
Field
WHERE
raMin < {obj_ra} AND {obj_ra} < raMax
AND
decMin < {obj_dec} AND {obj_dec} < decMax
AND
(raMax - raMin) > {dra_min}
AND
(raMax - raMin) < {dra_max}
ORDER BY
run ASC,
rerun ASC,
camcol ASC,
field ASC,
raMin ASC,
decMin ASC
"""
opath = os.path.join(_path_cas, 'fields')
if not os.path.exists(opath):
os.makedirs(opath)
df = load_SNe_candidate_list()
SNe_len = df.count().max()
print 'Beginning search through all confirmed SNe ...'
time_total_beg = time.time()
for i, (ra, dec, SDSS_id) in enumerate(zip(df.Ra, df.Dec, df.SDSS_id)):
sql_fields = sql_fields_fstr.format(obj_ra=ra, obj_dec=dec, dra_min=.1, dra_max=1.)
# get_field_result(sql_fields, SDSS_id)
# Define output structure
ofname = os.path.join(opath, '{}.csv'.format(SDSS_id))
# Don't bother requesting anything if the file already exists
if os.path.isfile(ofname) and skip_if_exists:
return
# Update progress output
s = 'Downloading: SN {: >4d} out of {: >4d}, {} ...\r'.format(
(i + 1), SNe_len, format_HHMMSS_diff(time_total_beg, time.time())
)
sys.stdout.write(s)
sys.stdout.flush()
# Request the data
fields = query(sql_fields)
if 'error' in fields.lower():
sys.exit('ERROR: {}: CAS says something is wrong ...'.format(
sys._getframe().f_code.co_name)
)
# And save it
with open(ofname, 'w+') as fsock:
fsock.write(fields)
sys.stdout.write('\n')
sys.stdout.flush()
time_total_end = time.time()
time_total = format_HHMMSS_diff(time_total_beg, time_total_end)
print 'Done downloading field catalogue ... in {}'.format(time_total)
# print 'Downloaded field catalogues for {} SNe'.format(i+1)
def get_field_result(sql_fields, SDSS_id):
"""Get query results from a *single* request for fields."""
# Define output structure
opath = os.path.join(_path_cas, 'fields')
ofname = os.path.join(opath, '{}.csv'.format(SDSS_id))
# Don't bother requesting anything if the file already exists
if os.path.isfile(ofname):
return
# If the file does not exists, make sure that the path does
if not os.path.exists(opath):
os.makedirs(opath)
# Request the data
fields = query(sql_fields)
if 'error' in fields.lower():
sys.exit('ERROR: {}: CAS says something is wrong ...'.format(
sys._getframe().f_code.co_name)
)
# And save it
with open(ofname, 'w+') as fsock:
fsock.write(fields)
def create_unique_field_list():
"""
Creates a file containing all results from *get_fields()*
and saves it in the CAS root directory.
Looks through folder `fields` in the CAS root directory and loads
all .csv files one at the time and adds the lines in each file to
a list. This list is then sorted and only unique entries are kept
before the list is saved in the CAS root directory.
"""
ipath = os.path.join(_path_cas, 'fields')
iglob = os.path.join(ipath, '*.csv')
ofname = env.files.get('fields')
tfname = os.path.join(_path_cas, 'fields.tmp')
# Clean up first, since the file is only appended to in the following
if os.path.isfile(ofname):
os.remove(ofname)
commands = [
# Build one big file with all the results
'cat {iglob} >> {t}'.format(iglob=iglob, t=tfname),
# Sort and remove duplicates
'cat {t} | sort | uniq > {o}'.format(t=tfname, o=ofname),
# Remove the temporary file
'rm {t}'.format(t=tfname),
]
for cmd in commands:
print cmd
os.system(cmd)
# Move last line (with the CSV headers) to the top
with open(ofname, 'r') as fsock:
lines = fsock.readlines()
lines = [lines[-1]] + lines[:-1]
with open(ofname, 'w') as fsock:
fsock.write(''.join(lines))
def filter_invalid_from_unique_field_list(dra_min=.1, dra_max=1.):
"""
Remove invalid entries from the unique field
list created by *create_unique_field_list()*.
Parameters
----------
dra_min : float
the minimum allowable angle separating the RA start and
end coordinate for a given field in the unique field list.
dra_max : float
the maximum allowable angle separating the RA start and
end coordinate for a given field in the unique field list.
Side effects
------------
<del>Creates a backup of the original field list.</del>
Saves the results back into the original destination.
"""
import pandas as pd
import shutil
fname = env.files.get('fields')
# fn_valid = os.path.splitext(fname)[0] + '_valid.csv'
fn_invalid = os.path.splitext(fname)[0] + '_invalid.csv'
shutil.copyfile(fname, '{}.orig'.format(fname))
df = pd.read_csv(fname, sep=',')
"""
2014-07-28
----------
There is an error in this procedure, but the result is the intended:
to remove fields for which the physical extent---as given by the
coordinates raMax, raMin, decMax, decMin---gives too small or even
negative side lengths. This problem is only observed in the RA
coordinates.
The are five observed cases for where the RA coordinate extents land.
Case 1: raMax \in [ 0; 60] and raMin \in [ 0; 60]
Case 2: raMax \in [ 0; 60] and raMin \in [300; 360]
Case 3: raMax \in [300; 360] and raMin \in [300; 360]
Case 4: raMax \in [ 0; 60] and raMin \in [300; 360]
Case 5: raMax > 360 and raMin \in [ 0; 60]
Case 4 should not occur, since this means that the field is obtained
end-first and beginning-last. These fields are considered invalid.
Case 5 is OK, if subtracting 360 deg from raMax, the value is larger
than raMin. Otherwise, the coordinate difference produces a negative
side length again.
It turns out that this is the case, so these fields are also invalid.
The procedure below removes all fields for which the coordinate difference
raMax - raMin < dra_min = .1 (default). Since this also removes all Case-4
and Case-5 records above, the result is what is intended; but with wrong
assumptions.
Since this is the way that my data were processed, I leave it at that,
if I need to reproduce the field list again.
"""
dra = df.raMax - df.raMin
dra_too_small_ix = (dra < dra_min)
dra_too_large_ix = (dra > dra_max)
df_invalid = df.loc[dra_too_small_ix | dra_too_large_ix]
df_valid = df.loc[(~dra_too_small_ix) & (~dra_too_large_ix)]
df_valid.to_csv(fname, index=False, header=True)
df_invalid.to_csv(fn_invalid, index=False, header=True)
def count_field_records():
"""
Count the number for field records obtained for
each SN, and save those numbers for later plotting.
"""
import glob
import pandas as pd
iglob = os.path.join(_path_cas, 'fields', '*.csv')
filenames = sorted(glob.glob(iglob))
df_fields = pd.read_csv(env.files.get('fields'), sep=',')
counts = []
beg = time.time()
for i, ifname in enumerate(filenames):
df_results = pd.read_csv(ifname, sep=',')
count = 0
for j in range(df_results.shape[0]):
count += (df_fields['fieldID'] == df_results.iloc[j]['fieldID']).sum()
step = time.time()
dt_str = format_HHMMSS_diff(beg, step)
print '{: >4d}, {: >3d}, {}'.format(i, count, dt_str)
counts.append(str(count))
# print len(filenames), len(counts)
with open(env.files.get('nrecords'), 'w+') as fsock:
fsock.write('\n'.join(counts))
def count_field_records_by_quality():
"""
Count the number for field records obtained for
each SN, and save those numbers for later plotting.
"""
import glob
import pandas as pd
iglob = os.path.join(_path_cas, 'fields', '*.csv')
filenames = sorted(glob.glob(iglob))
df_fields = pd.read_csv(env.files.get('fields'), sep=',')
print 'Number of fields:', df_fields.shape[0]
# FieldQuality Data values
# name value description
# BAD 1 Not acceptable for the survey
# ACCEPTABLE 2 Barely acceptable for the survey
# GOOD 3 Fully acceptable -- no desire for better data
# MISSING 4 No objects in the field, because data is missing.
# We accept the field into the survey as a HOLE
# HOLE 5 Data in this field is not acceptable, but we will
# put the field into the survey as a HOLE, meaning
# none of the objects in the field are part of the
# survey.
# See: http://cas.sdss.org/stripe82/en/help/browser/enum.asp?n=FieldQuality
qualities = range(1, 4)
qices = [(df_fields.quality.values == q) for q in qualities]
fdict = {q: df_fields.iloc[qix] for (q, qix) in zip(qualities, qices)}
cdict = {q: [] for q in qualities}
for i, qix in enumerate(qices):
print 'Number of fields with quality {:d}: {: >3d}'.format(
i + 1, qix.sum())
print 'For qualities 4 and 5, there were not present in my filtered dataset'
beg = time.time()
for i, ifname in enumerate(filenames):
df_results = pd.read_csv(ifname, sep=',')
counts = [0] * len(qualities)
for j in range(df_results.shape[0]):
for k, q in enumerate(qualities):
ices = (fdict[q]['fieldID'] == df_results.iloc[j]['fieldID'])
counts[k] += ices.sum()
step = time.time()
Dt = format_HHMMSS_diff(beg, step)
print '{: >4d}, {}: {: >3d}, {: >3d}, {: >3d}'.format(i, Dt, *counts)
for k, q in enumerate(qualities):
cdict[q].append(counts[k])
list_of_lists = [cdict[q] for q in qualities]
with open(env.files.get('nrecords_q'), 'w+') as fsock:
for row in zip(*list_of_lists):
fsock.write('{},{},{}\n'.format(*row))
def query(sql_raw):
"""Sends SQL query to the CAS and returns the raw text of the response."""
# form_data_receive_only_url = 'http://cas.sdss.org/astro/en/tools/search/x_sql.asp'
form_url = 'http://cas.sdss.org/stripe82/en/tools/search/sql.asp'
return_format = 'csv'
sql_filtered = ''
for line in sql_raw.split('\n'):
sql_filtered += line.split('--')[0] + ' ' + os.linesep
# Browser
br = mechanize.Browser()
# User-Agent
br.addheaders = [
(
'User-agent',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:21.0) Gecko/20100101 Firefox/21.0'
)
]
br.open(form_url)
br.select_form(nr=0)
# User credentials
br.form['cmd'] = sql_filtered
br.form['format'] = [return_format]
# Search and return the content of the resulting CSV-file
return br.submit().read()
def field_clean_local_dir():
"""
Todo
----
Make it possible to supply paths to folders that should have stuff removed.
"""
opath = os.path.join(_path_cas, 'fields')
if not os.path.exists(opath):
return
oglob = os.path.join(opath, '*')
cmd = 'rm {}'.format(oglob)
print cmd
if not oglob == '/':
os.system(cmd)
else:
print 'Clean-up command not executed ...'
| xidus/ted | ted/sdss/cas.py | Python | bsd-3-clause | 34,382 | [
"Galaxy"
] | 746d9829a180582f73ffd02cf45862181ca2a08a12c99a0ad32efbdebf816397 |
import sys
import urllib.parse
import pycurl
from io import StringIO,BytesIO
import re
import subprocess
from subprocess import check_output
import random
from bs4 import BeautifulSoup
import os
import os.path
import base64
try:
from headlessBrowser import BrowseUrl
except:
from headlessBrowser_webkit import BrowseUrl
import time
from base64 import b64decode
import random
import json
from player_functions import send_notification,ccurl,naturallysorted
def unshorten_url(url):
content = (ccurl(url))
#print(content
try:
html = content
ysmm = re.findall(r"var ysmm =.*\;?", html)
if len(ysmm) > 0:
str_code = re.sub(r'var ysmm \= \'|\'\;', '', ysmm[0])
j = 0
l = ''
r = ''
for i in str_code:
if j < len(str_code):
l = l + str_code[j]
j = j+2
j = len(str_code) - 1
for i in str_code:
if j >=0 :
r = r + str_code[j]
j = j-2
final_decode_url = b64decode(l.encode() + r.encode())[2:].decode()
if re.search(r'go\.php\?u\=', final_decode_url):
final_decode_url = b64decode(re.sub(r'(.*?)u=', '',final_decode_url)).decode()
print(final_decode_url)
return final_decode_url
else:
return url
except Exception as e:
shrink_link = url
return shrink_link
def cloudfareUrl(url,quality,c):
web = BrowseUrl(url,quality,c)
def shrink_url(url,tmp_dir):
hdr = "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:45.0) Gecko/20100101 Firefox/45.0"
if "linkshrink" in url:
url = url.replace('http:','https:')
#content = subprocess.check_output(['curl','-c','/tmp/AnimeWatch/link.txt','-b','/tmp/AnimeWatch/link.txt','-I','-L',url])
#content = getContentUnicode(content)
link_txt = os.path.join(tmp_dir,'link.txt')
content = ccurl(url+'#'+'-Icb'+'#'+link_txt)
#print(content,'----linkshrink---------')
#content = subprocess.check_output(['curl','-c','/tmp/AnimeWatch/link.txt','-b','/tmp/AnimeWatch/link.txt','-L',url])
#content = getContentUnicode(content)
content = ccurl(url+'#'+'-bc'+'#'+link_txt)
print(content,'----linkshrink---------')
soup = BeautifulSoup(content,'lxml')
shrink = soup.find('a',{'class':'bt'})
shrink_link = shrink['href']
shrink_link = shrink_link.replace('http:','https:')
f = open(link_txt,'a')
f.write('\nlinkshrink.net FALSE / FALSE 0 s32 1')
f.close()
content = ccurl(shrink_link+'#'+'-Ieb'+'#'+link_txt+'#'+url)
m = re.findall('Location: [^\n]*', content)
#print(m
if m:
#print(m
final1 = m[0]
final1 = re.sub('Location: |\r', '', final1)
shrink_link = final1
else:
shrink_link = ""
elif "sh.st" in url:
shrink_link = url
elif "adf.ly" in url:
url = re.sub('http:','https:',url)
shrink_link=unshorten_url(url)
print(shrink_link)
elif "linkbucks" in url or "bc.vc" in url:
shrink_link = url
elif "adf.acb.im" in url:
#shrink_link=str(cloudfare(url))
shrink_link=str(unshorten_url(url))
elif "mt0.org" in url:
try:
#content = (subprocess.check_output(['curl','-A',hdr,url]))
#content = getContentUnicode(content)
content = ccurl(url+'#'+'-L')
print(content)
url = (re.search('http[^"]*index.php[^"]*',str(content))).group()
#content = (subprocess.check_output(['curl','-A',hdr,url]))
#content = getContentUnicode(content)
content = ccurl(url+'#'+'-L')
print(content)
url = (re.search('http[^"]*index.php[^"]*',str(content))).group()
except:
pass
#content = (subprocess.check_output(['curl','-A',hdr,'-I','-L',url]))
#content = getContentUnicode(content)
content = ccurl(url+'#'+'-I')
m = re.findall('Location: [^\n]*', content)
#print(m
if m:
#print(m
final1 = m[0]
final1 = re.sub('Location: |\r', '', final1)
shrink_link = final1
else:
shrink_link = ""
else:
url = re.sub('http:','https:',url)
shrink_link = unshorten_url(url)
print(shrink_link)
return shrink_link
def mp4starUrl(content,site):
global qualityVideo
soup = BeautifulSoup(content,'lxml')
#m = soup.findAll('script,{"type":"text/javascript"}')
#if not m:
m = soup.findAll('script')
for i in m:
if site == 'videowing':
if 'eval(' in i.text and 'videofun' in i.text and ('https' in i.text or 'http' in i.text) :
print(i.text)
content = i.text
break
elif site == 'easyvideo':
if 'eval(' in i.text and ('Easyvideo' in i.text or 'videozoo' in i.text or 'easyvideo' in i.text) and ('https' in i.text or 'http' in i.text) :
print(i.text)
content = i.text
break
elif site == 'tusfiles':
if 'eval(function' in i.text and ('https' in i.text or 'http' in i.text):
print(i.text)
content = i.text
break
elif site == 'myvidstream':
if 'eval(' in i.text and 'myvidstream' in i.text and ('https' in i.text or 'http' in i.text):
print(i.text)
content = i.text
break
else:
if 'eval(' in i.text and ('https' in i.text or 'http' in i.text):
print(i.text)
content = i.text
break
print("-------------------------------------------")
#print(content)
print("-------------------------------------------")
m = re.findall("'[^']*",content)
#print(m)
for i in m:
if '|' in i and ('https' in i or 'http' in i):
i = i.replace("'",'')
print(i)
t = i
print('\n****')
m = t.split('|')
#print(m)
j = 0
k = 'a'
l = 'A'
print(chr(ord(k)+1))
arr = ['0','1','2','3','4','5','6','7','8','9']
for i in range(26):
arr.append(chr(ord(k)))
k = chr(ord(k)+1)
if site != 'tusfiles':
for i in range(26):
arr.append(chr(ord(l)))
l = chr(ord(l)+1)
print(arr)
length = len(arr)
k = arr[0]
l = arr[0]
j = 0
n = 0
p = 0
d = []
k = 100
d1 = []
for i in range(len(m)):
if not(m[i]):
k = k+1
if i%length == 0 and i:
p = p+1
n = 0
j = p
if p == 0:
if not m[i]:
r = (k,arr[j])
r1 = (arr[j],k)
else:
r = (m[i],arr[j])
r1 = (arr[j],m[i])
j = j+1
else:
if not m[i]:
r = (k,arr[j])
r1 = (arr[j],k)
else:
r = (m[i],arr[j]+arr[n])
r1 = (arr[j]+arr[n],m[i])
n = n+1
d.append(r)
d1.append(r1)
m = dict(d)
di = dict(d1)
print(di)
#print(di)
if site == 'mp4star':
try:
n = m['https']
except:
n = 'nothong'
v = m['file']
try:
n1 = m['http']
except:
n1 = 'nothing'
o = re.findall(v+"[^:]*:[^']"+n1+"[^']*",content)
print(o)
if o:
if len(o) == 1:
u1 = o[0]
else:
if qualityVideo == 'sd':
u1 = o[0]
else:
u1 = o[-1]
print(o)
u = re.sub(v+'[^:]*:','',u1)
u = u.replace("'",'')
u = u.replace('"','')
else:
print(v,n)
o = re.findall(v+"[^:]*:[^']'"+n+"[^']*",content)
print(o)
if o:
if len(o) == 1:
u1 = o[0]
else:
if qualityVideo == 'sd':
u1 = o[0]
else:
u1 = o[-1]
u = re.sub(v+"[^']*",'',u1)
u = u.replace("'",'')
u = u.replace('"','')
elif site == 'myvidstream':
v = m['file']
n1 = m['http']
o = re.findall("'"+v+'[^)]*',content)
print(o)
if o:
print(o)
u = re.sub("'"+v+'[^,]*','',o[0])
u = u.replace("'",'')
u = u.replace(",",'')
u = u.replace('"','')
elif site == 'tusfiles':
v = '"'+m['src']+'"'
v1 = m['value']
v2 = m['https']
n = v+v1+'="'+v2
print(n)
o = re.findall(n+'[^"]*',content)
print(o)
if o:
print(o)
if len(o) > 1:
o1 = o[-1]
else:
o1 = o[0]
u = re.sub(v+v1+'="','',o1)
u = u.replace("'",'')
u = u.replace(",",'')
u = u.replace('"','')
elif site == 'videowing':
try:
n = m['https']
except:
n = 'nothing'
v = m['url']
try:
n1 = m['http']
except:
n1 = 'nothing'
o = re.findall('"'+v+'[^:]*:'+'"'+n+'[^"]*',content)
if o:
print(o)
if len(o) == 1:
u1 = o[0]
else:
if qualityVideo == 'sd':
u1 = o[0]
else:
u1 = o[-1]
print(o)
u = re.sub('"'+v+'[^:]*:','',u1)
u = u.replace("'",'')
u = u.replace('"','')
else:
o = re.findall('"'+v+'[^:]*:'+'"'+n1+'[^"]*',content)
if o:
print(o)
if len(o) == 1:
u1 = o[0]
else:
if qualityVideo == 'sd':
u1 = o[0]
else:
u1 = o[-1]
print(o)
u = re.sub('"'+v+'[^:]*:','',u1)
u = u.replace("'",'')
u = u.replace('"','')
elif site == 'easyvideo':
try:
n = m['https']
except:
n = 'nothing'
v = m['url']
try:
n1 = m['http']
except:
n1 = 'nothing'
try:
w = m['src']
print(w,'----m[src]---')
except:
w = 'nothing'
o = re.findall(v+'[:]'+'"'+n+'[^"]*',content)
if o:
print(o)
if len(o) == 1:
u1 = o[0]
else:
if qualityVideo == 'sd':
u1 = o[0]
else:
u1 = o[-1]
print(o)
u = re.sub(v+'[:]','',u1)
u = u.replace("'",'')
u = u.replace('"','')
else:
o = re.findall(v+'[:]'+'"'+n1+'[^"]*',content)
if o:
print(o)
u = re.sub(v+'[:]','',o[0])
u = u.replace("'",'')
u = u.replace('"','')
u = u.replace('\\','')
#u = re.sub('["?"]|"','',u)
print(u)
r = re.findall('[0-9a-zA-Z][^\.|\%|\/|\-|\=|\:|\?|\&]*',u)
print(r)
url = ""
token = ''
found = False
special_arr = ['.','%','-','=','/','?',':','&']
i = 0
token_index = 0
l = 0
print(di['c'])
while (i < len(u)):
#print(i)
token = ""
found = False
#print(url)
if u[i] in special_arr:
url = url+u[i]
else:
j = i
while(j < len(u)):
token = token + u[j]
if token in r:
#print(token)
found = True
try:
url = url+di[token]
except:
url = url+token
token_index = j+1
break
j = j+1
if found:
i = token_index
else:
i = i+1
l = l+1
if l > 200:
break
print(l)
print(url)
url = re.sub('"','',url)
url = re.sub("'",'',url)
u = urllib.parse.unquote(url)
print(u)
return(u)
def findurl(i):
hdr = "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:45.0) Gecko/20100101 Firefox/45.0"
found = ""
print(i)
global qualityVideo
if "solidfiles" in i:
#content = (subprocess.check_output(['curl','-L','-A',hdr,i]))
content = ccurl(i)
#content = getContentUnicode(content)
link1 = re.search('download_url":"https[^"]*',content)
link = link1.group()
found1 = re.search('https[^"]*',link)
found = found1.group()
return found
elif "mediafire" in i:
#content = (subprocess.check_output(['curl','-L','-A',hdr,i]))
#content = getContentUnicode(content)
content = ccurl(i)
print(content)
final1 = re.findall('kNO = "[^"]*',content)
if final1:
found = re.sub('kNO = "',"",final1[0])
return found
elif "tusfiles" in i:
found = ''
content = ccurl(i)
found = mp4starUrl(content,'tusfiles')
return found
elif "embedupload" in i:
content = ccurl(i)
m = re.findall('http://www.embedupload.com/\?MF=[^"]*',content)
if m:
content = ccurl(m[0])
n = re.findall('http://www.mediafire.com/\?[^<]*',content)
if n:
#print(n
final1 = re.sub('\t','',n[0])
print(final1)
plugin_path = os.path.expanduser('~')+"/.config/AnimeWatch/src/Plugins/phantom1.js"
#content = (subprocess.check_output(['phantomjs',plugin_path,final1]))
#content = getContentUnicode(content)
#print(content
content = "<html></html>"
final2 = re.findall('kNO = "[^"]*',content)
if final2:
found = re.sub('kNO = "',"",final2[0])
return found
elif "mirrorcreator" in i:
#content = (subprocess.check_output(['plowlist',i]))
#content = getContentUnicode(content)
content = "<html></html>"
url = re.findall('http[^"]*solidfiles[^\n]*',content)
if url:
#content = (subprocess.check_output(['curl','-L','-A',hdr,url[0]]))
#content = getContentUnicode(content)
content = ccurl(url[0])
soup = BeautifulSoup(content,'lxml')
link = soup.find('div',{'class':'btns'})
#print(link
link1 = link.find('a')
found = link1['href']
return found
elif "videoweed" in i:
found = ""
return found
elif "videowing" in i or "easyvideo" in i:
content = ccurl(i)
if 'videowing' in i:
found = mp4starUrl(content,'videowing')
else:
found = mp4starUrl(content,'easyvideo')
if found.startswith('https:'):
pass
elif found.startswith('http:'):
content = ccurl(found+'#'+'-I')
if "Location:" in content:
m = re.findall('Location: [^\n]*',content)
found = re.sub('Location: |\r','',m[-1])
else:
found = url
return found
elif "myvidstream" in i:
packed = ''
final = ""
content = ccurl(i)
final = mp4starUrl(content,'myvidstream')
return final
elif "mp4upload" in i:
content = ccurl(i)
m = re.findall("'file': 'http://[^']*mp4upload.com[^']*video.mp4",content)
print(m)
if m:
url = re.sub("'file': '","",m[0])
else:
m = re.findall('"file": "http://[^"]*mp4upload.com[^"]*video.mp4',content)
if m:
url = re.sub('"file": "',"",m[0])
else:
url = ""
print("File Does Not exist")
print(url)
return url
elif "uploadcrazy" in i or "vidcrazy" in i:
content = ccurl(i)
m = re.findall('file: "http[^"]*uploadcrazy.net[^"]*mp4[^"]*',content)
if m:
url = re.sub('file: "','',m[0])
else:
url = ""
return url
elif "yourupload" in i:
#content = subprocess.check_output(["curl","-L","-A",hdr,i])
content = ccurl(i)
m = re.findall("file: 'http://[^']*video.mp4",content)
print(m)
if m:
url = re.sub("file: '","",m[0])
else:
url = ""
print("File Does Not exist")
print(url)
#content = (subprocess.check_output(["curl","-L","-I","-A",hdr,"-e",i,url]))
#content = getContentUnicode(content)
content = ccurl(url+'#'+'-Ie'+'#'+i)
if "Location:" in content:
m = re.findall('Location: [^\n]*',content)
found = re.sub('Location: |\r','',m[-1])
print(found)
url = found
return url
elif "mp4star" in i or "justmp4" in i:
#content = (subprocess.check_output(["curl","-L","-I","-A",hdr,i]))
#content = getContentUnicode(content)
content = ccurl(i+'#'+'-I')
found = ""
if "Location:" in content:
m = re.findall('Location: [^\n]*',content)
found = re.sub('Location: |\r','',m[-1])
print(found)
if found:
content = ccurl(found)
print(content)
url1 = mp4starUrl(content,'mp4star')
print(url1,'**********')
content = ccurl(url1+'#'+'-I')
if "Location:" in content:
m = re.findall('Location: [^\n]*',content)
found = re.sub('Location: |\r','',m[-1])
print(found)
else:
found = url1
url = str(urllib.parse.unquote(found))
return url
elif "vidkai" in i:
#print("*********vid********kai"
content = ccurl(i)
#print(content
soup = BeautifulSoup(content,'lxml')
src = soup.find('source')['src']
#content = (subprocess.check_output(['curl','-I','-L','-A',hdr,src]))
#content = getContentUnicode(content)
content = ccurl(src+'#'+'-I')
#print(content
if "Location:" in content:
m = re.findall('Location: [^\n]*',content)
found = re.sub('Location: |\r','',m[-1])
print(found)
return found
else:
return ""
elif "arkvid" in i:
content = ccurl(i)
soup = BeautifulSoup(content,'lxml')
src = soup.find('source')['src']
if 'http' not in src:
src = "http:"+src
print(src)
return src
elif "videonest" in i:
content = ccurl(i)
a1 = re.findAll('file:"http[^"]*.mp4',content)
if a1:
src = re.sub('file:"','',a1[0])
else:
return ""
#content = (subprocess.check_output(['curl','-L','-I','-A',hdr,src]))
#content = getContentUnicode(content)
content = ccurl(src+'#'+'-I')
if "Location:" in content:
m = re.findall('Location: [^\n]*',content)
found = re.sub('Location: |\r','',m[0])
return found
elif ("playbb" in i) or ("playpanda" in i) or ("video44" in i):
content = ccurl(i)
m = re.findall("url: 'http[^']*",content)
n = re.sub("url: '",'',m[0])
if m:
#n = re.sub("url: '",'',m[0])
#print(n
found = str(urllib.parse.unquote(n))
print(found)
#content1 = (subprocess.check_output(['curl','-L','-I','-A',hdr,found]))
#content1 = getContentUnicode(content1)
content1 = ccurl(found+'#'+'-I')
if "Location:" in content1:
m = re.findall('Location: [^\n]*',content1)
found = re.sub('Location: |\r','',m[0])
print(found)
return found
else:
m1 = re.findall('_url = "http[^"]*',content)
if m1:
n1 = re.sub('_url = "','',m1[0])
found = str(urllib.parse.unquote(n1))
print(found)
#content2 = (subprocess.check_output(['curl','-L','-I','-A',hdr,found]))
#content2 = getContentUnicode(content2)
content2 = ccurl(found+'#'+'-I')
if "Location:" in content2:
m = re.findall('Location: [^\n]*',content2)
found = re.sub('Location: |\r','',m[-1])
print(found)
return found
elif 'googleusercontent' in i or 'bp.blogspot' in i or 'google' in i:
content1 = ccurl(i+'#'+'-I')
if "Location:" in content1:
m = re.findall('Location: [^\n]*',content1)
found = re.sub('Location: |\r','',m[0])
print(found)
return found
else:
content = ccurl(i)
m = re.findall('["]http://[^"]*.mp4[^"]*|["]http://[^"]*.flv[^"]*|["]https://redirector[^"]*|["]https://[^"]*.mp4', content)
m1 = re.findall("[']http://[^']*.mp4[^']*|[']http://[^']*.flv[^']*|[']https://redirector[^']*|[']https://[^']*.mp4", content)
print(m)
if m:
found = m[0]
#found = found[1:]
found = str(urllib.parse.unquote(found))
elif m1:
found = m1[0]
#found = found[1:]
found = str(urllib.parse.unquote(found))
else:
found = ""
return found
found = found.replace('"','')
found = found.replace("'",'')
print(found)
try:
if type(found) is list:
found1 = found[0]
else:
found1 = found
#content = (subprocess.check_output(['curl','-I','-A',hdr,found1]))
#content = getContentUnicode(content)
content = ccurl(found1+'#'+'-I')
if ('video/mp4' in content) or ('video/x-flv' in content):
return found1
else:
#content = (subprocess.check_output(['curl','-I','-A',hdr,found1]))
#content = getContentUnicode(content)
content = ccurl(found1+'#'+'-I')
m = re.findall('Location: [^\n]*',content)
found = re.sub('Location: |\r','',m[0])
#content = (subprocess.check_output(['curl','-I','-A',hdr,found]))
#content = getContentUnicode(content)
content = ccurl(found+'#'+'-I')
if ('video/mp4' in content) or ('video/x-flv' in content):
return found
else:
found =""
return found
except:
return found
class SubbedAnime():
def __init__(self,tmp):
self.hdr = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:45.0) Gecko/20100101 Firefox/45.0'
self.tmp_dir = tmp
self.cookie_file = ''
def getOptions(self):
criteria = ['Anime1','Anime44','AnimePlus','AnimeWow','Animehere','GoodAnime','AnimeNet','AnimeStream','Animefun','Animegalaxy','Animebox','Anime-Freak','AnimeBaka','AnimeHQ','AnimeSquare','Animeget','AnimeMax','AnimeAll','AnimeMix']
return criteria
def ccurlN(self,url,siteName,cookie=None,post=None):
if siteName == "AnimePlus" or siteName == 'AnimeSquare' or siteName == 'AnimeHQ' or siteName=='Animeget':
if not os.path.exists(self.cookie_file):
content = ccurl(url+'#-c#'+self.cookie_file)
else:
content = ccurl(url+'#-b#'+self.cookie_file)
if siteName == 'AnimeSquare' or siteName == 'AnimeHQ' or siteName=='Animeget':
if 'checking_browser' in content:
cloudfareUrl(url,'',self.cookie_file)
if post:
post_dict = urllib.parse.urlen(post)
content = ccurl(url+'#-d#'+post_dict,self.cookie_file)
else:
content = ccurl(url+'#-b#'+self.cookie_file)
elif siteName == "AnimeNet":
if '#' in url:
url1,url2 = url.split('#')
content1 = ccurl(url1)
content2 = ccurl(url2)
content = content1+content2
else:
content = ccurl(url)
else:
content = ccurl(url)
return content
def getCompleteList(self,siteName,category,opt):
if siteName == "Anime44":
if opt == "Search":
url = "http://www.animenova.org/anime/search?key=" + name
else:
if category == "Movies":
url = "http://www.animenova.org/category/anime-movies"
else:
url = "http://www.animenova.org/anime-list"
elif siteName == "Animegalaxy":
url = "http://www.chia-anime.tv/index/"
elif siteName == "Animeget":
url = "http://www.animeget.io/full-anime-list/"
self.cookie_file = os.path.join(self.tmp_dir,'animeget.txt')
elif siteName == "Animehere":
if category == "Movies":
url = "http://www.animehere.com/anime-movie.html"
else:
url = "http://www.animehere.com/anime-all.html"
elif siteName == "AnimePlus":
url1 = "http://www.animeplus.tv/anime-movies"
if category == "Movies":
url = "http://www.animeplus.tv/anime-movies"
else:
url = "http://www.animeplus.tv/anime-show-list"
self.cookie_file = os.path.join(self.tmp_dir,'cookie_plus.txt')
#tmp_content = ccurl(url1+'#-c#'+self.cookie_file)
elif siteName == "AnimeWow":
if category == "Movies":
url = "http://www.animewow.org/movies"
else:
url = "http://www.animewow.org/anime"
elif siteName == "Animebox":
url = "http://www.animebox.tv/category"
elif siteName == "AnimeHQ":
url = "http://moetube.net/explore"
self.cookie_file = os.path.join(self.tmp_dir,'animehq.txt')
elif siteName == "GoodAnime":
url = "http://www.goodanime.net/new-anime-list"
elif siteName == "Anime-Freak":
url = "http://www.anime-freak.org/anime-list/"
elif siteName == "AnimeBaka":
url = "http://animebaka.tv/browse/shows"
elif siteName == "Animefun":
url = "http://animeonline.one/category/anime-list/"
elif siteName == "AnimeNet":
url = "http://www.watch-anime.net/anime-list-all/"
urlM = "http://www.watch-anime.net/anime-movies/"
url = url+'#'+urlM
elif siteName == "AnimeMax":
url = "http://gogocartoon.us/anime-list.html"
elif siteName == "AnimeStream":
url = "http://www.ryuanime.com/animelist.php"
elif siteName == "AnimeMix":
url = "http://www.animechiby.com/index/"
elif siteName == "AnimeSquare":
url = "http://www.masterani.me/"
#url = "http://www.masterani.me/api/anime-all"
url = 'http://www.masterani.me/api/anime/filter?order=score_desc&page=1'
self.cookie_file = os.path.join(self.tmp_dir,'animeSquare.txt')
elif siteName == "Anime1":
url = "http://www.anime1.com/content/list/"
elif siteName == "AnimeAll":
if category == "Movies":
url = "http://www.watchanimeshows.tv/movies-list/"
else:
url = "http://www.watchanimeshows.tv/anime-shows/"
site_nm = siteName.lower()+'list.txt'
title_file_list = os.path.join(self.tmp_dir,site_nm)
if os.path.exists(title_file_list):
m = []
f =open(title_file_list,'r')
lines = f.readlines()
f.close()
for i in lines:
i = i.strip()
m.append(i)
return m
print(url)
content = self.ccurlN(url,siteName)
if siteName == "Anime44":
m = []
soup = BeautifulSoup(content,'lxml')
if category == "Movies":
link = soup.findAll('div',{'id':'videos'})
else:
link = soup.findAll('table',{'id':'series_grid'})
for i in link:
a = i.findAll('a')
for j in a:
if 'href' in str(j):
k = (j['href']).split('/')
m.append(k[-1])
if opt == "Random":
m = random.sample(m, len(m))
elif siteName == "Animeget" or siteName == "Animegalaxy":
if siteName == "Animeget":
m = re.findall('/anime/[^"]*',content)
else:
m = re.findall('http://www.chia-anime.tv/episode/[^"]*/',content)
#print(m
#del m[0:50]
#m = list(set(m))
m.sort()
j = 0
for i in m:
if siteName == "Animeget":
i = re.sub('/anime/',"",i)
else:
i = re.sub('http://www.chia-anime.tv/episode/',"",i)
i = re.sub('/',"",i)
m[j] = i
j = j + 1
if opt == "Random":
m = random.sample(m, len(m))
elif siteName == "Animehere":
m = []
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('dl')
for i in link:
j = i.findAll('a')
for k in j:
if 'href' in str(k) and "#" not in str(k):
l = (k['href']).split('/')
p = l[-1].split('.')
m.append(p[0])
if opt == "Random":
m = random.sample(m, len(m))
elif siteName == "AnimePlus":
m = []
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('table',{'class':'series_index'})
for i in link:
a = i.findAll('a')
for j in a:
if 'href' in str(j):
k = (j['href']).split('/')
m.append(k[-1])
if opt == "Random":
m = random.sample(m, len(m))
elif siteName == "AnimeWow":
m = re.findall('http://www.animewow.org/watch-[^"]*',content)
#print(m
#del m[0:50]
m = list(set(m))
m.sort()
j = 0
for i in m:
i = re.sub('http://www.animewow.org/watch-',"",i)
#i = re.sub('',"",i)
m[j] = i
j = j + 1
if opt == "Random":
m = random.sample(m, len(m))
elif siteName == "Animebox":
m = re.findall('http://www.animebox.tv/videos/category/[^"]*',content)
#print(m
#del m[0:50]
m = list(set(m))
m.sort()
j = 0
for i in m:
i = re.sub('http://www.animebox.tv/videos/category/',"",i)
i = re.sub('/',"",i)
#i = re.sub('',"",i)
i = str(i)
m[j] = i
j = j + 1
if opt == "Random":
m = random.sample(m, len(m))
elif siteName == "AnimeHQ":
m = []
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('div',{'class':'serieslisted'})
for i in link:
j = i.find('a')['href']
j = j.replace('/anime/','')
k = j.split('/')
m.append(k[1]+'-'+k[0])
if opt == "Random":
m = random.sample(m, len(m))
elif siteName == "GoodAnime":
m = re.findall('category/[^"]*', content)
m = list(set(m))
m.sort()
j=0
for i in m:
i = re.sub("category/","",i)
m[j] = i
j = j+1
if opt == "Random":
m = random.sample(m, len(m))
elif siteName == "Anime-Freak":
m = re.findall('http://www.anime-freak.org/anime-stream/[^/]*', content)
m = list(set(m))
m.sort()
j=0
for i in m:
i = re.sub("http://www.anime-freak.org/anime-stream/","",i)
m[j] = i
j = j+1
if opt == "Random":
m = random.sample(m, len(m))
elif siteName == "AnimeBaka":
m = []
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('div',{'id':'list'})
for i in link:
a = i.findAll('a')
for j in a:
if 'href' in str(j):
k = (j['href']).split('/')
m.append(k[-1])
if opt == "Random":
m = random.sample(m, len(m))
elif siteName == "Animefun":
m = []
arr = re.findall('a href="/category/anime-list/[^"]*',content)
for i in arr:
if '[?]' in i or 'html' in i:
pass
else:
tmp = re.sub('a href="/category/anime-list/','',i)
if tmp:
tmp =tmp.replace('/','')
m.append(tmp)
m = random.sample(m, len(m))
elif siteName == "AnimeNet":
m = []
soup = BeautifulSoup(content2,'lxml')
link = soup.findAll('li')
for i in link:
j = i.findAll('a')
for k in j:
tmp = k['href'].split('/')[-2]
if tmp :
m.append(tmp)
soup = BeautifulSoup(content1,'lxml')
link = soup.findAll('li')
for i in link:
j = i.findAll('a')
for k in j:
tmp = k['href'].split('/')[-2]
if tmp :
m.append(tmp)
m = random.sample(m, len(m))
elif siteName == "AnimeMax":
m = []
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('div',{'class':'box-content list'})
#print(link
for i in link:
j = i.findAll('a')
for k in j:
tmp = k['href'].split('/')[-1]
if tmp :
m.append(tmp)
m = random.sample(m, len(m))
elif siteName == "AnimeStream":
m = []
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('div',{'id':'animeList'})
print(link)
for i in link:
j = i.findAll('a')
for k in j:
if 'href' in str(k):
tmp = k['href'].split('/')[-2]
if tmp :
m.append(tmp)
m = random.sample(m, len(m))
elif siteName == "AnimeSquare":
l = json.loads(content)
n=l['data']
last_page = int(l['last_page'])
index = 2
print(n)
m = []
for i in n:
title = i['title']
nm = i['slug']
ep_cnt = i['episode_count']
nm_app = str(ep_cnt) +','+str(nm)
m.append(nm_app)
for pg in range(index,last_page+1):
url = 'http://www.masterani.me/api/anime/filter?order=score_desc&page='+str(pg)
content = self.ccurlN(url,siteName)
l = json.loads(content)
n = l['data']
for i in n:
title = i['title']
nm = i['slug']
ep_cnt = i['episode_count']
nm_app = str(ep_cnt) +','+str(nm)
m.append(nm_app)
time.sleep(0.2)
print(pg)
elif siteName == "AnimeMix":
m = []
#content = open('1.txt','r').read()
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('ul',{'class':'links'})
index = 0
for i in link :
j = i.findAll('a')
for k in j:
if 'href' in str(k):
l = k['href'].split('/')[-2]
m.append(l)
#print(l + " :index "+str(index)
index = index + 1
m = list(set(m))
m.sort()
#m = random.sample(m, len(m))
elif siteName == "Anime1":
m = []
#content = open('1.txt','r').read()
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('ul',{'class':'anime-list'})
index = 0
for i in link :
j = i.findAll('a')
for k in j:
if 'href' in str(k):
l = k['href'].split('/')[-1]
m.append(l)
#print(l + " :index "+str(index)
index = index + 1
m = list(set(m))
#m.sort()
if opt == "Random":
m = random.sample(m, len(m))
elif siteName == "AnimeAll":
m = []
#content = open('1.txt','r').read()
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('ul',{'class':'animelist'})
#print(link
index = 0
for i in link :
j = i.findAll('li')
for k in j:
l = k.findAll('a')
for r in l:
if 'href' in str(r):
t = r['href'].split('/')[-2]
m.append(t)
if opt == "Random":
m = random.sample(m, len(m))
if not os.path.exists(title_file_list):
f = open(title_file_list,'w')
for i in range(len(m)):
if i == 0:
f.write(m[i])
else:
f.write('\n'+m[i])
f.close()
m.sort()
return m
def getEpnList(self,name,opt,depth_list,extra_info,siteName,category):
url = ""
depth_list = 1
display_list = True
embed = depth_list
if siteName == "Anime44":
url = "http://www.animenova.org/category/" + name
base = "http://www.animenova.org/"
elif siteName == "Animegalaxy":
url = "http://www.chia-anime.tv/episode/" + name + '/'
base = "http://www.chia-anime.tv/"
elif siteName == "Animeget":
url = "http://www.animeget.io/anime/" + name + '/'
base = "/watch/"
self.cookie_file = os.path.join(self.tmp_dir,'animeget.txt')
elif siteName == "Animehere":
url = "http://www.animehere.com/anime/" + name + ".html"
base = "http://www.animehere.com/"
elif siteName == "AnimePlus":
url = "http://www.animeplus.tv/" + name
base = "http://www.animeplus.tv/"
self.cookie_file = os.path.join(self.tmp_dir,'cookie_plus.txt')
elif siteName == "AnimeWow":
url = "http://www.animewow.org/watch-" + name
base = "http://www.animewow.org/"
elif siteName == "Animebox":
url = "http://www.animebox.tv/videos/category/" + name
base = "http://www.animebox.tv/video/"
elif siteName == "AnimeHQ":
new_name = name.rsplit('-',1)[0]
new_c = name.split('-')[-1]
url = "http://moetube.net/anime/" + new_c + '/' + new_name
base = "http://moetube.net/watch/"
self.cookie_file = os.path.join(self.tmp_dir,'animehq.txt')
elif siteName == "GoodAnime":
url = "http://www.goodanime.net/category/" + name
base = "http://www.goodanime.net/"
elif siteName == "Anime-Freak":
url = "http://www.anime-freak.org/anime-stream/" + name + '/'
base = "http://www.anime-freak.org/anime-stream/"
elif siteName == "AnimeBaka":
url = "http://animebaka.tv/anime/" + name + '/'
base = "http://animebaka.tv/anime/"
elif siteName == "Animefun":
url = "http://animeonline.one/category/anime-list/" + name+'/'
base = "http://animeonline.one/"
elif siteName == "AnimeNet":
url = "http://www.watch-anime.net/" + name + "/"
base = "http://www.watch-anime.net/"
elif siteName == "AnimeMax":
url = "http://gogocartoon.us/category-anime/" + name
base = "http://gogocartoon.us/"
elif siteName == "AnimeStream":
url = "http://www.ryuanime.com/watch-anime/" + name + '/'
base = "http://www.ryuanime.com/"
elif siteName == "AnimeSquare":
name1 = name.split(',',1)[1]
epncnt = name.split(',',1)[0]
url = "http://www.masterani.me/anime/info/" + name1
base = "http://www.masterani.me/anime/"
self.cookie_file = os.path.join(self.tmp_dir,'animeSquare.txt')
print(url)
elif siteName == "AnimeMix":
if embed == 0:
url = "http://www.animechiby.com/tag/" + name + '/'
elif embed == 1:
url = "http://www.animechiby.com/" + name + '/'
base = "http://www.animechiby.com/"
elif siteName == "Anime1":
url = "http://www.anime1.com/watch/" + name
base = "http://www.anime1.com/watch/"
elif siteName == "AnimeAll":
if category == "Movies":
url = "http://www.watchanimeshows.tv/watch-movie/" + name+'/'
base = "http://www.watchanimeshows.tv/watch-movie/"
else:
url = "http://www.watchanimeshows.tv/watch-anime/" + name+'/'
base = "http://www.watchanimeshows.tv/watch-anime/"
if siteName == "AnimeMix" and embed == 2:
content = "<html>Hello World</html>"
else:
content = self.ccurlN(url,siteName)
#content = ccurl(url)
soup = BeautifulSoup(content,'lxml')
summary = ""
#print(link
print(url)
if (siteName == "Anime44") or (siteName == "AnimePlus") or (siteName == "AnimeWow"):
link = soup.findAll('div', { "id" : 'series_details' })
for i in link:
summary = i.text
#summary = re.sub('\n\n','\n',summary)
#summary = re.sub('\n\n','',summary)
summary = re.sub(' | | | | | | ',"",summary)
summary = re.sub('\n\n\n',"\n",summary)
summary = re.sub('\n\n',"\n",summary)
summary = re.sub(':\n'," : ",summary)
summary = re.sub(':[^"]\n'," : ",summary)
summary = re.sub(' \n'," ",summary)
summary = re.sub('[^.]*Category :',"\nCategory :",summary)
summary = summary[1:]
elif siteName == "Animehere":
link = soup.findAll('section', { "class" : 'info' })
for i in link:
summary = i.text
#summary = re.sub('\n\n','\n',summary)
#summary = re.sub('\n\n','',summary)
summary = re.sub('"',"",summary)
summary = re.sub('var[^"]*;',"",summary)
summary = re.sub('\n\n\n',"\n",summary)
summary = re.sub('\n\n',"\n",summary)
summary = re.sub('\n'," ",summary)
summary = re.sub(' ',"",summary)
summary = re.sub(':\n',":",summary)
summary = re.sub('\n',"",summary)
summary = re.sub('Summary',"\nSummary",summary)
summary = re.sub('Genre',"\nGenre",summary)
#summary = re.sub('\n[^"]*[a-zA-Z0-9]'," ",summary)
#summary = re.sub(' ',"",summary)
summary=summary[1:]
elif siteName == "Animefun":
img = []
link = soup.findAll('div', { "class" : 'box-info-summary' })
for i in link:
summary = i.text
#summary = re.sub('\n\n','\n',summary)
#summary = re.sub('\n\n','',summary)
#summary = re.sub('"',"",summary)
#summary = re.sub('var[^"]*;',"",summary)
summary = summary[1:]
summary = re.sub('\n\n\n',"\n",summary)
summary = re.sub('\n\n',"\n",summary)
#summary = re.sub('Summary',"\nSummary",summary)
#summary = re.sub('Genre',"\nGenre",summary)
#summary = re.sub('\n[^"]*[a-zA-Z0-9]'," ",summary)
#summary = re.sub(' ',"",summary)
#summary=summary[1:]
try:
link = soup.find('div',{ 'class':'box-info-cover'})
img_src = link.find('img')['src']
print(img_src+'***********')
if ' ' in img_src:
img_src = re.sub(" ","%20",img_src)
print(img_src)
if img_src:
img.append(img_src)
print(img)
except:
img[:]=[]
elif siteName == "Animegalaxy":
genre = ""
summary = ""
link = soup.findAll('span', { "class" : "info" })
for i in link:
summary = i.text
link = soup.findAll('div', { "class" : "dm" } )
for i in link:
if "Genres:" in i.text:
genre = i.text
genre = genre[:-1]
summary = name + "\n" + summary + "\n" + genre
elif siteName == "Animeget":
genre = ""
summary = ""
link = soup.find('div', { "class" : "details" })
#for i in link:
summary = link.text
elif siteName == "Animebox":
link = soup.find('div',{'id':'main-content'})
links = link.findAll('p')
img_links = link.find('img')
summary = ""
j = 1
for i in links:
summary = summary + i.text + '\n'
j = j+1
if j == 4:
break
if img_links:
img1 = "http://animebox.tv"+img_links['src']
elif siteName == "AnimeNet":
link = soup.find('div',{'class':'det'})
link1 = link.findAll('p')
for i in link1:
summary = summary + i.text
#print(summary)
elif siteName == "Anime1":
summary = ""
img = []
link = soup.find('div',{'class':'detail-left'})
k = 0
if link:
link1 = link.findAll('span')
for i in link1:
summary = summary + i.text + '\n'
k = k+1
if k == 4:
break
link = soup.find('div',{'class':'detail-cover'})
link1 = link.find('a')
if link1:
i = link1.find('img')
img.append(i['src'])
#print(summary)
elif siteName == "AnimeAll":
summary = ""
img = []
m = []
n = []
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('div',{'class':'row'})
for i in link:
j = i.findAll('p')
l = i.findAll('img')
for k in j:
m.append(k.text)
for p in l:
n.append(p['src'])
if m and len(m) > 1:
m = m[:-1]
m = list(set(m))
print(len(m))
if len(m) == 2:
print(m[1])
summary = m[1]
if n:
n = list(set(n))
img.append(n[0])
elif siteName == "AnimeBaka":
link = soup.findAll('span',{'itemprop':"description"})
for i in link:
summary = i.text
summary = re.sub('\n\n','\n',summary)
summary = re.sub('\n\n','',summary)
#summary = summary[1:]
#print(summary)
elif siteName == "AnimeHQ":
img_hq = ''
link = soup.findAll('div', {'id' : 'desc'})
if link:
summary = link[0].text
"""
summary = re.sub('\n\n','\n',summary)
summary = re.sub('\n\n','',summary)
summary = re.sub('Genres:','\nGenres:',summary)
#summary = re.sub('Synopsis:\n','Synopsis:',summary)
#summary = re.sub('Synopsis:[^\n]*\n','Synopsis:',summary)
summary = re.sub('[^\n]*Synopsis:[^a-zA-Z0-9]*','Synopsis : ',summary)
#summary = re.sub('\n',' ',summary)
summary=summary[1:]
"""
img_l = soup.find('div',{'id':'img'})
if img_l:
img_hq = img_l.find('img')['src']
elif siteName == "Anime-Freak":
l = re.findall('http://www.animeboy.info/anime-info/[^"]*',content)
if l:
content1 = ccurl(l[0])
soup = BeautifulSoup(content1,'lxml')
m =[]
link = soup.findAll('body')
for i in link:
k = i.text
m = re.findall('Genre[^$]*Anime Info',k)
if m:
genre = re.sub('Anime Info','',m[0])
else:
genre = ""
n = re.findall('Plot Summary[^$]*Screen shots',k)
if not n:
n = re.findall('Plot Summary[^$]*var[ ]',k)
if n:
info = re.sub('Screen shots|var[ ]','',n[0])
else:
info = ""
summary = name + '\n' + genre + '\n' + info
#print(summary)
elif siteName == "GoodAnime":
summary = ""
link = soup.findAll('div',{ 'class':'catdescription'})
img = []
print(link)
if link:
img1 = link[0].find('img')
if img1:
img.append(img1['src'])
j = 0
for i in link:
summary = summary + link[j].text
j = j+1
elif siteName == "AnimeMax":
summary = ""
link = soup.find('div',{ 'class':'description'})
img = []
summary = link.text
elif siteName == "AnimeStream":
summary = ""
link = soup.find('div',{ 'class':'postbg'})
img = []
summary = link.text
elif siteName == "AnimeMix":
summary = ""
img = []
if embed == 1:
link = soup.find('div',{ 'id':'content'})
summary = link.text
summary = re.sub('var cpmstar[^#]*','',summary)
link1 = link.find('img')
if link1:
image = link1['src']
img.append(image)
print(img,'---------------img-------------')
#picn = "/tmp/AnimeWatch/" + name + ".jpg"
picn = os.path.join(self.tmp_dir,name+'.jpg')
if not img:
link = soup.findAll('meta')
print(link,'-------------LinkMeta-----------')
for i in link:
if 'meta content=' in str(i):
k = i['content']
if k:
k = re.sub(' ','',k)
img.append(k)
print(img)
if not img:
img.append("No.jpg")
picn = "No.jpg"
print(picn)
if img and '#' not in picn:
print(img[0])
if img[0] != 'No.jpg':
ccurl(img[0]+'#'+'-o'+'#'+picn)
elif embed == 0:
img.append("No.jpg")
elif embed == 2:
img.append("No.jpg")
elif siteName == "AnimeSquare":
try:
link = soup.find('div',{ 'class':'info'})
link1 = link.findAll('p')
emb = ""
for i in link1:
if 'Episodes' in i.text:
emb = i.text
break
if emb:
emb = emb.replace('Episodes','')
emb = emb.replace(' ','')
embed = int(emb)
summary = ""
link = soup.find('div',{ 'class':'synopsis'})
img = []
summary = link.text
link = soup.find('div',{ 'class':'title'})
link1 = link.find('h1')
title = link1.text
desc = link.find('div',{ 'class':'description'})
descr = desc.text
summary = title + " (" + descr + ")\n" + summary
except:
pass
if not summary:
summary = "Summary Not Available"
try:
if (siteName == "Anime44") or (siteName == "AnimePlus") or (siteName == "AnimeWow"):
img = re.findall(base+'images/series/big/[^"]*.jpg',content)
elif siteName == "Animehere":
img = re.findall('/res/covers/[^"]*.jpg[^"]*|/images/[^"]*.jpg',content)
img[0] = "http://www.animehere.com" + img[0]
elif siteName == "Animegalaxy":
img = re.findall('http[^"]*.jpg|http[^"]*jpeg',content)
elif siteName == "Animeget":
img = re.findall('/cover-anime/[^"]*.jpg[^"]*',content)
if img:
img[0]='http://www.animeget.io'+img[0]
elif siteName == "Animebox":
img =[]
img.append(img1)
elif siteName == "AnimeHQ":
img = []
if img_hq:
img.append(img_hq)
else:
img = re.findall('http[^"]*.jpg|http[^"]*jpeg|http[^"]*png',content)
print(img[0])
elif siteName == "AnimeStream":
img = re.findall('/[^"]*.jpg',content)
img[0] = "http://www.ryuanime.com" + img[0]
elif siteName == "AnimeSquare":
img = re.findall('http[^"]*.jpg[^"]*',content)
print(img)
elif siteName == "AnimeNet":
img = []
link = soup.find('div',{ 'class':'anm_ifo'})
print(link)
img_src = link.find('img')['src']
if ' ' in img_src:
img_src = re.sub(" ","%20",img_src)
print(img_src)
if img_src:
img.append(img_src)
elif siteName == "GoodAnime":
#img = re.findall('images/[^"]*.jpg',content)
#img[0] = "http://www.goodanime.net/"+img[2]
if img:
if not 'http://' in img[0]:
img1 = re.findall('images/[^"]*.jpg',img[0])
if img1:
img[0] = base+img1[0]
print(img)
elif siteName == "Anime-Freak" or siteName == "AnimeBaka":
img = re.findall('http[^"]*.jpg|http[^"]*jpeg',content)
if not img:
img = re.findall('//[^"]*.jpg',content)
img[0] = "http:" + img[0]
elif siteName == "AnimeMax":
img = []
link = soup.find('div',{ 'class':'box-content'})
img1_src = link.find('div',{ 'class':'img'})
img_src = link.find('img')['src']
if ' ' in img_src:
img_src = re.sub(" ","%20",img_src)
print(img_src)
if img_src:
img.append(img_src)
print(img)
if not img:
link = soup.findAll('meta')
for i in link:
if 'meta content=' in str(i):
k = i['content']
if k:
k = re.sub(' ','',k)
img.append(k)
print(img)
#print(img
#if img[0] != "No.jpg":
#jpgn = img[0].split("/")[-1]
#print("Pic Name=" + jpgn
#picn = "/tmp/AnimeWatch/" + name + ".jpg"
picn = os.path.join(self.tmp_dir,name+'.jpg')
if not img:
img[0] = "No.jpg"
picn = "No.jpg"
print(picn)
if not os.path.isfile(picn) and '#' not in picn:
print(img[0])
#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img[0]])
if siteName == "Animeget" or siteName == 'AnimePlus':
ccurl(img[0]+'#'+'-o'+'#'+picn,self.cookie_file)
else:
ccurl(img[0]+'#'+'-o'+'#'+picn)
else:
print("No Image")
except:
picn = "No.jpg"
j=0
if (siteName == "Anime-Freak"):
m = re.findall(base+'[^/]*[^"]*', content)
elif (siteName == "Anime1"):
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('ul',{'class':'anime-list'})
m = []
k = 0
for i in link:
link1 = i.findAll('a')
for j in link1:
if 'href' in str(j):
final = j['href'].split('/')[-1]
print(final + " :index "+str(k))
m.append(final)
k = k+1
elif (siteName == "AnimeSquare"):
m = []
epn_n = ""
epn_n = int(epncnt)
if not epn_n:
epn_n = 0
i = 1
while(i <= epn_n):
m.append(str(i))
i = i+1
elif (siteName == "AnimeAll"):
m1 =[]
m2 =[]
m=[]
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('div',{'class':'toggles'})
for i in link:
j = i.findAll('a')
for k in j:
if 'href' in str(k) and '#' not in str(k):
l = k['href'].split('/')
p = l[-2]+'-'+l[-3]
dub_sub = l[-3].lower()
if 'sub' in dub_sub:
m1.append(p)
else:
m2.append(p)
m1 = naturallysorted(m1)
m2 = naturallysorted(m2)
m[:]=[]
m = m1+m2
elif (siteName == "AnimeMix"):
m = []
if embed == 0:
#content = (subprocess.check_output(['curl','-L','-A',self.hdr,url]))
#content = getContentUnicode(content)
content = ccurl(url)
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('div',{'class':'post_content'})
m = []
k = 0
for i in link:
link1 = i.find('a')
final = link1['href'].split('/')[-2]
print(final + " :index "+str(k))
m.append(final)
k = k+1
m.insert(0,"LINK:")
elif embed == 1:
print("hello")
m =[]
m[:]=[]
#content = (subprocess.check_output(['curl','-L','-A',self.hdr,str(url)]))
#content = getContentUnicode(content)
content = ccurl(str(url))
soup = BeautifulSoup(content,'lxml')
link = []
link = soup.findAll('div',{'class':'su-spoiler-title'})
if link:
for i in link:
tmp = str(i.text)
s_text = re.search('a-zA-Z0-9[^"]*',tmp)
#if s_text:
m.append(s_text)
#j = i.findNext('input',{'class':'button-auto'})
r = i.findNext('p')
if r:
q = r.findAll('input',{'class':'button-auto'})
if not q:
q = r.findAll('input',{'type':'button'})
for j in q:
if 'value' in str(j) and 'onclick' in str(j):
val = j['onclick']
m.append(j['value']+'#'+(re.search("http[^']*",val)).group())
#k = j.findNextSiblings()
#if k:
# for l in k:
# if 'value' in str(l) and 'onclick' in str(l):
# val = l['onclick']
# m.append(l['value']+'#'+(re.search("http[^']*",val)).group())
t_links = len(m) - len(link)
linkC = soup.findAll('p')
print(linkC)
n=[]
n[:]=[]
if linkC:
for t in linkC:
linkB = t.findAll('input')
for j in linkB:
if 'value' in str(j) and 'onclick' in str(j):
q = j.findPrevious('p')
q_text = str(q.text)
if not q_text:
q = q.findPrevious('p')
q_text = str(q.text)
s = re.search('a-zA-Z0-9[^"]*',q_text)
if s:
print("hello")
n.append(q_text)
if not s:
q = j.findPrevious('div',{'class':'su-spoiler-title'})
if q:
n.append(str(q.text))
val = j['onclick']
n.append(j['value']+'#'+(re.search("http[^']*",val)).group())
if m:
if not m[0]:
m[0] = name
if not m:
m.append(name)
#if len(n) > t_links:
m.append('Links')
m=m+n
m.insert(0,"LINK:INFO")
elif "#" in name:
#print(mir_output
#output = subprocess.check_output(["bash","-c","./mirror.sh"])
#forward = str(raw_input("Enter Next Link Index\n"))
#print(forward
#forward_link = str(mir[int(forward)-1])
#individual_epn = "False"
#for i in epn_v:
# if "Episode" in i:
# individual_epn = "True"
# break
output = re.sub('\n','',name)
forward_link = output.split('#')[1]
output1 = output.split('#')[0]
output1 = output1.replace(' ','-')
print(forward_link,'----')
if forward_link.endswith('.jpg'):
forward_link = forward_link.replace('.jpg','')
print(forward_link,'.jpg removed')
if '=http' in forward_link:
forward_link = forward_link.split('=')[-1]
print(forward_link,'--split--')
if "linkbucks" in forward_link or "bc.vc" in forward_link or "qqc.co" in forward_link or "urlbeat.net" in forward_link:
if "linkbucks" in forward_link or "qqc.co" in forward_link or "urlbeat.net" in forward_link:
#content = (subprocess.check_output(['curl','-I','-L',forward_link]))
#content = getContentUnicode(content)
content = ccurl(forward_link+'#'+'-I')
m = re.findall('Location: [^\n]*', content)
#print(m
if m:
#print(m
final1 = m[0]
final1 = re.sub('Location: |\r', '', final1)
print(final1)
else:
final1 = forward_link
profile = os.path.expanduser('~')+'/.mozilla/firefox/webdriver'
ff_profile = webdriver.FirefoxProfile(profile)
browser = webdriver.Firefox(ff_profile)
browser.get(final1)
time.sleep(15)
content = str(browser.page_source)
#print(content
browser.close()
else:
final1 = ''
if "adf.ly" in forward_link:
forward_link = re.sub('http:','https:',forward_link)
final1= unshorten_url(forward_link)
elif "q.gs" in forward_link:
#final1 = cloudfare(forward_link)
final1=unshorten_url(forward_link)
print(final1)
elif "adf.acb.im" in forward_link:
#final1 = cloudfare(forward_link)
final1= unshorten_url(forward_link)
print(final1)
elif "lnk.acb.im" in forward_link:
final1= forward_link
print(final1)
elif "mt0.org" in forward_link:
final1= shrink_url(forward_link,self.tmp_dir)
print(final1)
else:
#content = (subprocess.check_output(['curl','-I','-L',forward_link]))
#content = getContentUnicode(content)
content = ccurl(forward_link+'#'+'-IA')
print(content)
m = re.findall('Location: [^\n]*', content)
#print(m
if m:
#print(m
final1 = m[0]
final1 = re.sub('Location: |\r', '', final1)
print(final1)
if not final1:
final1 = forward_link
#content = open('1.txt','r').read()
hdr = "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:45.0) Gecko/20100101 Firefox/45.0"
#content = (subprocess.check_output(['curl','-L','-A',self.hdr,final1]))
#content = getContentUnicode(content)
content = ccurl(final1)
m = []
m[:]=[]
soup = BeautifulSoup(content,'lxml')
link = soup.find('table')
table1 = str(link)
t_n = re.findall('<div class="title"[^#]*</table>',table1)
k =''
for i in t_n:
l = re.sub('\t|\r|-|\n','',i)
j = re.sub('</div><br />|</div><br/>','</div><p>',l)
k = re.sub('</a>','</a></p>',j)
k = re.sub('</a></p><br />|</a></p><br/>','</a></p><p>',k)
#k = re.sub('\n','',k)
#print(k
if k:
soup = BeautifulSoup(k,'lxml')
link = soup.findAll('p')
for i in link:
if 'href' in str(i):
l=i.text.split('=')[0]
l = re.sub(' ','-',l)
j = i.find('a')
k = j['href']
print(l + ' '+ k )
m.append(l+' '+k)
else:
m.append(output1+' '+forward_link)
m.insert(0,"LINK:FINAL")
elif (siteName == "AnimeMax"):
m = []
link = soup.find('ul',{'id':'episode_related'})
"""
epstart = int(link.find('a')['ep_start'])
epend = int(link.find('a')['ep_end'])
if not epstart:
epstart = 0
if not epend:
epend = 0
i = epstart
while(i<=epend):
ep_n = name+"-episode-"+str(i)+"-anime"
m.append(ep_n)
i = i+1
"""
j = link.findAll('a')
for k in j:
tmp = k['href'].split('/')[-1]
m.append(tmp)
elif (siteName == "AnimeNet"):
m = []
link = soup.findAll('li')
for i in link:
j = i.findAll('a')
for k in j:
tmp = k['href'].split('/')[-2]
if tmp and (tmp != "anime-list-all" and tmp != "anime-movies" and tmp != "most-popular"):
m.append(tmp)
elif (siteName == "AnimeStream"):
m = []
link = soup.find('ul',{ "id":"anime-episode-list-sub"})
if link:
j = link.findAll('a')
for k in j:
tmp = k['href'].split('/')[-1]
if tmp:
m.append("Subbed-"+tmp)
link = soup.find('ul',{ "id":"anime-episode-list-dub"})
if link:
j = link.findAll('a')
for k in j:
tmp = k['href'].split('/')[-1]
if tmp:
m.append("Dubbed-"+tmp)
elif (siteName == "Animefun"):
m = []
mydivs = soup.findAll("ul", { "class" : "list-episode" })
for i in mydivs:
j = i.findAll('a')
for k in j:
links = k['href'].split('/')[-2]
m.append(links)
print(m)
"""
t = int(m[0].split('-')[-1])
m[:] = []
while(t > 0):
epn = "episode-"+str(t)
m.append(epn)
t = t -1
"""
elif (siteName == "AnimeBaka"):
m = []
link = soup.findAll('tbody')
for i in link:
a = i.findAll('a')
for j in a:
if 'href' in str(j):
k = (j['href']).split('/')
m.append(k[-1])
print(m)
print(len(m))
elif (siteName == "AnimeWow") or (siteName == "AnimePlus") or (siteName == "Anime44") or (siteName == "Animegalaxy") or (siteName == "Animehere") or (siteName == "GoodAnime") or (siteName == "Animeget"):
m=[]
if category == "Movies" and siteName == "Anime44":
m.append(name)
if siteName == "Animehere":
link = soup.findAll('section',{ 'class':'date-list'})
elif siteName == "GoodAnime":
link = soup.findAll('div',{ 'class':'postlist'})
elif siteName == "Animeget":
#link = soup.findAll('div',{ 'class':'abso'})
#print(content)
li = re.findall("/ep.php[^']id[^']*",content)
if li:
li1 = 'http://www.animeget.io'+li[0]
print('---',li1,'-----')
#content1 = ccurlGet(li1,self.cookie_file)
content1 = self.ccurlN(li1,siteName)
soup1 = BeautifulSoup(content1,'lxml')
link = soup1.findAll('a')
elif siteName == "Animegalaxy":
#link = soup.findAll('div',{ 'class':'post'})
link = soup.findAll('a',{ 'itemprop':'url'})
else:
link = soup.findAll('div',{ 'id':'videos'})
if siteName == "Animegalaxy" or siteName == "Animeget":
for i in link:
if 'href' in str(i):
m.append(i['href'])
else:
for i in link:
j = i.findAll('a')
for k in j:
m.append(k['href'])
print(k['href'])
elif (siteName == "Animebox"):
m = re.findall(base + '[^"]*', content)
if not m:
m = re.findall(base +'[^"]*ova[^"]*', content)
if not m:
m = re.findall(base + '[^"]*episode[^"]*', content)
elif (siteName == "AnimeHQ"):
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('li')
m = []
print(link,'---------animehq---epn')
for i in link:
j = i.find('a')
if j and 'href' in str(j):
k = j.text
if 'Episode' in k:
print(k)
l = re.findall('[0-9][^ ]*',k)
if l and len(l) == 2:
m.append(l[0])
m.append(l[1])
print(m)
try:
lower = int(m[0])
upper = int(m[-1])
m[:]=[]
for i in range(lower,upper+1):
m.append(str(i))
except:
print('No Episode Found')
print(m)
n = []
nxt = ""
for i in m:
if siteName != "AnimeHQ" and siteName != "AnimeHQ" and siteName != "Anime-Freak" and siteName != "AnimeBaka" and siteName != "Animeget" and siteName != "Animegalaxy" and siteName != "AnimeMix" and siteName != "GoodAnime" and siteName != "Animebox" and siteName != "AnimeAll":
i = re.sub(base,"",i)
i = re.sub("/","",i)
i = re.sub(".html","",i)
prev = i
if ('share=' not in i) and (prev != nxt) :
n.append(i)
nxt = i
elif siteName == "AnimeHQ":
i = re.sub("/watch/","",i)
n.append(i)
elif siteName == "AnimeWow":
i = re.sub(base,"",i)
n.append(i)
elif siteName == "GoodAnime":
i = i.split('/')[-1]
n.append(i)
elif siteName == "Anime-Freak":
k = i.split('/')
i = k[-1]
i = re.sub(".html","",i)
if i:
n.append(i)
elif siteName == "Animeget":
i = re.sub(base,"",i)
i = re.sub("/","",i)
n.append(i)
elif siteName == "Animegalaxy":
k = i.split('/')
print(k)
if len(k) >3:
j = k[-2]
if "chia-anime" not in j:
n.append(j)
elif siteName == "Animebox":
k = i.split('/')
i = k[-1]+'-'+k[-2]
if i:
n.append(i)
if siteName != "AnimeBaka" and siteName != "AnimeMix" and siteName != "AnimeAll":
m = n
if siteName != "AnimeMix" and siteName != "AnimeAll":
m = list(set(m))
#m.sort()
m=naturallysorted(m)
if m and (siteName == "Anime44" or siteName == "Animehere" or siteName == "AnimePlus" or siteName == "AnimeWow" or siteName == "GoodAnime") and category!="Movies":
if siteName == "AnimePlus":
tmp = m[0].split('-')[-2]
else:
tmp = m[0].split('-')[-1]
try:
num = int(tmp)
except:
num = 0
if num > 1:
arr = m[0].split('-')
if siteName == "AnimePlus":
arr.pop()
try:
t_name = arr[0]
except:
t_name = ""
i = 1
while(i<len(arr)-1):
t_name = t_name+'-'+arr[i]
i = i+1
i = 1
new_arr=[]
while(i<num):
if siteName == "AnimePlus":
new_arr.append(t_name+'-'+str(i)+'-online')
else:
new_arr.append(t_name+'-'+str(i))
i = i+1
m[:0]=new_arr
#m.append(picn)
#m.append(summary)
record_history = True
display_list = True
return (m,summary,picn,record_history,depth_list)
def getNextPage(self,opt_val,pgn,genre_num,search_term):
if (pgn >= 1):
pgnum = str(pgn)
if opt_val.lower() == 'anime44':
if pgn == 1:
url = "http://www.animenova.org/category/" + name
else:
url = "http://www.animenova.org/category/" + name + '/page/' + pgnum
print(url)
content = ccurl(url)
m = re.findall('http://www.animenova.org/[^"]*episode[^"]*', content)
m = list(set(m))
m.sort()
j=0
for i in m:
i = re.sub("http://www.animenova.org/","",i)
m[j] = i
j = j+1
elif opt_val.lower() == 'animesquare':
url = 'http://www.masterani.me/api/anime/filter?order=score_desc&page='+str(pgn)
content = self.ccurlN(url,opt_val)
l = json.loads(content)
n=l['data']
print(n)
m = []
for i in n:
title = i['title']
print(title)
nm = i['slug']
print(nm)
ep_cnt = i['episode_count']
print(ep_cnt)
nm_app = str(ep_cnt) +','+str(nm)
m.append(nm_app)
return m
def getFinalUrl(self,siteName,name,epn,mirrorNo,category,quality):
global qualityVideo
qualityVideo = quality
if siteName == "Anime44":
url = "http://www.animenova.org/" + epn
elif siteName == "Animegalaxy":
url = "http://www.chia-anime.tv/" + epn + '/'
elif siteName == "Animeget":
url = "http://www.animeget.io/watch/" + epn
self.cookie_file = os.path.join(self.tmp_dir,'animeget.txt')
elif siteName == "Animehere":
url = "http://www.animehere.com/" + epn + ".html"
elif siteName == "AnimePlus":
url = "http://www.animeplus.tv/" + epn
self.cookie_file = os.path.join(self.tmp_dir,'cookie_plus.txt')
elif siteName == "AnimeWow":
url = "http://www.animewow.org/" + epn
elif siteName == "Animebox":
epn = (epn)
new_epn = epn.rsplit('-',1)[0]
epn_c = epn.split('-')[-1]
url = "http://www.animebox.tv/video/" + epn_c+'/'+new_epn + "/"
print(url)
elif siteName == "AnimeHQ":
new_name = name.rsplit('-',1)[0]
new_c = name.split('-')[-1]
url = "http://moetube.net/watch/" + new_c+'/'+new_name + "/" + epn
print('url=',url)
self.cookie_file = os.path.join(self.tmp_dir,'animehq.txt')
elif siteName == "GoodAnime":
url = "http://www.goodanime.net/" + epn
elif siteName == "Anime-Freak":
url = "http://www.anime-freak.org/anime-stream/" + name + '/' + epn + '.html'
elif siteName == "AnimeBaka":
url = "http://animebaka.tv/watch/" +name+'/'+ epn
elif siteName == "Animefun":
url = "http://animeonline.one/" + epn +'/'
elif siteName == "AnimeNet":
url = "http://www.watch-anime.net/" +name+'/'+ epn +'/'
elif siteName == "AnimeMax":
url = "http://gogocartoon.us/" + epn
elif siteName == "AnimeStream":
if "Subbed" in epn:
epn = re.sub('Subbed-',"",epn)
url = "http://www.ryuanime.com/watch/subbed/episode/"+epn
elif "Dubbed" in epn:
epn = re.sub('Dubbed-',"",epn)
url = "http://www.ryuanime.com/watch/dubbed/episode/"+epn
elif siteName == "AnimeSquare":
name1 = name.split(',',1)[1]
epncnt = name.split(',',1)[0]
url = "http://www.masterani.me/anime/watch/" + name1+"/" + epn
self.cookie_file = os.path.join(self.tmp_dir,'animeSquare.txt')
elif siteName == "Anime1":
url = "http://www.anime1.com/watch/"+name+"/" + epn
elif siteName == "AnimeAll":
epn1=epn.rsplit('-',1)[0]
epn2=epn.rsplit('-',1)[1]
url = "http://www.watchanimeshows.tv/"+epn2+'/'+epn1+'/'
print(url)
elif siteName == "AnimeMix":
url = epn.split(' ')[1]
print(url)
content = self.ccurlN(url,siteName)
if siteName == "Animegalaxy":
final = ""
soup = BeautifulSoup(content,'lxml')
if siteName == "Animeget":
link = soup.findAll('iframe')
else:
link = soup.findAll('a',{'id':'download'})
link1 = soup.findAll('div',{'id':'video44'})
for i in link1:
a = i.findAll('a')
for j in a:
k = j['href']
t = k.split('/')
num = t[-1]
if siteName == "Animeget":
final1 = "http://www.animeget.io/watch/" + t[-3] +'/' + num + '/'
else:
final1 = "http://www.chia-anime.tv/view/" + t[-3] +'/' + num + '/'
print(final1)
print(len(link))
j = 0
m = []
if link:
for i in link:
if siteName == "Animeget":
print(i['src'])
m.append(i['src'])
else:
print(i['href'])
m.append(i['href'])
if m:
print(m[0])
content = ccurl(m[0])
links = re.findall('http[^"]*.mp4',content)
if links:
final = links[0]
print(final)
if not final:
content = ccurl(final1)
final2 = re.findall('http://[^"]*video44[^"]*',content)
if final2:
final = findurl(final2[0])
elif siteName == "Animeget":
m = re.findall('/download[^"]id[^"]*',content)
print(m)
if m:
urlN = "http://www.animeget.io"+m[0]
content = ccurl(urlN+'#'+'-Ib'+'#'+self.cookie_file)
n = re.findall('Location: [^\n]*', content)
print(n)
if n:
#print(m
final1 = n[-1]
final = re.sub('Location: |\r', '', final1)
elif siteName == "Animebox":
m = re.findall('http[^"]*hash[^"]*',content)
print(m)
if m:
content = ccurl(m[0])
n = re.findall('http[^"]*hash[^"]*mp4[^"]*',content)
print(n)
if n:
final = n[0]
elif siteName == "AnimeAll":
m =[]
n = []
o = []
final = ''
mirrorNo = mirrorNo - 1
soup = BeautifulSoup(content,'lxml')
#link = soup.findAll('iframe',{'id':'embed'})
link = soup.findAll('iframe',{'class':'iframe-embed'})
if not link:
url_v =''
n_v = ''
id_v = ''
link1 = soup.findAll('script',{'type':'text/javascript'})
for i in link1:
j = i.text
if 'var datas' in j:
var_datas = j
break
m_v = re.findall("n:'[^']*|id:'[^']*|url:[^,]*",var_datas)
print(m_v)
for i in m_v:
if 'n:' in i:
n_v = re.sub("n:|'",'',i)
elif 'id:' in i:
id_v = re.sub("id:|'",'',i)
elif 'url: ' in i:
url_v = re.sub('url:|"','',i)
if not url_v:
url_v = "http://www.watchanimeshows.tv/vload.php"
url_n = url_v+'?n='+n_v+'&id='+id_v
print (url_n)
if url_n:
content = ccurl(url_n)
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('iframe',{'class':'iframe-embed'})
for i in link:
try:
j = i['data-lazy-src']
except:
j = i['src']
if "vidcrazy" in j or "uploadcrazy" in j or "auengine" in j:
m.append(j)
elif "videoweed" in j:
print("")
elif "mp4upload" in j:
o.append(j)
else:
n.append(j)
if qualityVideo == "hd":
m = o+m+n
else:
m = m+o+n
print(m)
if mirrorNo == 0:
for i in range(len(m)):
msg = "Total " + str(len(m)) + " Mirrors, Selecting Mirror "+str(mirrorNo + 1)
#subprocess.Popen(["notify-send",msg])
send_notification(msg)
final = findurl(str(m[i]))
print(i)
print(final)
if final:
print(final)
break
else:
msg = "Total " + str(len(m)) + " Mirrors, Selecting Mirror "+str(mirrorNo + 1)
#subprocess.Popen(["notify-send",msg])
send_notification(msg)
final = findurl(str(m[mirrorNo]))
elif siteName == "AnimeSquare":
soup = BeautifulSoup(content,'lxml')
content1 = soup.findAll('script',{'type':'text/javascript'})
final = ""
for i in content1:
#print (i.text)
if 'mirrors:' in i.text:
content11 = (i.text)
m = re.findall('"quality"[^,]*|"embed_id"[^,]*|"embed_prefix"[^,]*',content11)
#print(m)
for i in range(len(m)):
m[i] = re.sub('"|embed_id":|embed_prefix":|quality":','',m[i])
m[i] = re.sub("'",'',m[i])
m[i] = m[i].replace('[\\]','')
#print(m)
n = []
for i in m:
if '/' in i:
#print (i)
j = i.split('\\')
#print(j)
nm = j[0]
for k in range(len(j)-1):
nm = nm + j[k+1]
#print(nm)
n.append(nm)
else:
#print(i)
n.append(i)
sd_arr =[]
hd_arr =[]
for i in range(0,len(n),3):
if '480' in n[i+1]:
sd_arr.append(n[i+2]+n[i])
elif '720' in n[i+1]:
hd_arr.append(n[i+2]+n[i])
print(sd_arr)
print(hd_arr)
total_cnt = 0
final_sd_hd_arr =[]
if quality == 'sd' and sd_arr:
url = sd_arr[mirrorNo-1]
total_cnt = len(sd_arr)
final_sd_hd_arr = sd_arr
elif quality == 'hd' and hd_arr:
url = hd_arr[mirrorNo-1]
total_cnt = len(hd_arr)
final_sd_hd_arr = hd_arr
elif quality == 'hd' and not hd_arr:
url = sd_arr[mirrorNo-1]
total_cnt = len(sd_arr)
final_sd_hd_arr = sd_arr
quality = 'sd'
print(url)
msg = "Total " + str(len(sd_arr)) + " SD Mirrors And \n"+ str(len(hd_arr)) + " HD Mirrors+\n"+'Selecting '+str(quality) + " Mirror No. " + str(mirrorNo)
#subprocess.Popen(["notify-send",msg])
send_notification(msg)
if mirrorNo == 1:
for i in range(len(final_sd_hd_arr)):
msg = 'Selecting '+str(quality) + " Mirror No. " + str(i+1)
#subprocess.Popen(["notify-send",msg])
send_notification(msg)
url = final_sd_hd_arr[i]
if 'mp4upload' in url and not url.endswith('.html'):
url = url+'.html'
if url.startswith('null'):
url=url.replace('null','')
final = findurl(url)
if final:
break
else:
url = final_sd_hd_arr[mirrorNo-1]
if 'mp4upload' in url and not url.endswith('.html'):
url = url+'.html'
if url.startswith('null'):
url=url.replace('null','')
final = findurl(url)
elif siteName == "AnimeMix":
shrink_link = ''
if "adf.acb.im" in url:
#shrink_link=str(cloudfare(url))
shrink_link=str(unshorten_url(url))
elif "q.gs" in url:
shrink_link=str(unshorten_url(url))
else:
shrink_link = shrink_url(str(url),self.tmp_dir)
if 'linkshrink' in shrink_link:
shrink_link = shrink_url(str(url),self.tmp_dir)
if not shrink_link:
shrink_link = url
if "mediafire" in shrink_link or "embedupload" in shrink_link or "solidfiles" in shrink_link or "mirrorcreator" in shrink_link or "tusfiles" in shrink_link:
final = findurl(shrink_link)
else:
#content = (subprocess.check_output(['curl','-I','-L',shrink_link]))
#content = getContentUnicode(content)
#print(content
content = ccurl(shrink_link+'#'+'-IA')
m = []
m[:] = []
m = re.findall('Location: [^\n]*', content)
print(m)
if m:
#print(m
final1 = m[0]
final1 = re.sub('Location: |\r', '', final1)
print(final1)
final = findurl(final1)
elif siteName == "AnimeStream":
soup = BeautifulSoup(content,'lxml')
link = soup.find('div',{'id':'content'})
print(link)
final1 = link.find('iframe')['src']
if not final1:
final1 = link.find('IFRAME')['SRC']
final = findurl(final1)
elif siteName == "Anime1":
m = re.findall('file: "[^"]*',content)
if m:
final = re.sub('file: "','',m[0])
final = re.sub(' ','%20',final)
else:
"No Url"
elif siteName == "AnimeMax":
final = ''
soup = BeautifulSoup(content,'lxml')
#link = soup.find('div',{'class':'anime_video_body_watch'})
link = soup.find('div',{'class':'main-video'})
sd = ''
hd = ''
sd480 = ''
if link:
link2 = link.find('iframe')
if link2:
if 'src' in str(link2):
link1 = link2['src']
print(link1,'---')
if link1:
content1 = ccurl(link1)
soup = BeautifulSoup(content1,'lxml')
links = soup.findAll('source')
for i in links:
if 'src' in str(i):
j = i['src']
if 'itag=22' in j:
hd = j
elif 'itag=18' in j:
sd = j
elif 'itag=59' in j:
sd480 = j
elif 'itag=43' in j:
sd = j
print (sd)
print(sd480)
print(hd)
if not sd and not hd and not sd480:
soup = BeautifulSoup(content,'lxml')
link = soup.find('select',{'id':'selectQuality'})
if link:
link1 = link.findAll('option')
for i in link1:
j = i['value']
if 'itag=18' in j:
sd = j
elif 'itag=22' in j:
hd = j
elif 'itag=37' in j:
full_hd = j
elif '=m18' in j:
sd = j
elif '=m22' in j:
hd = j
final_cnt = 0
final_quality = ''
if sd:
final_cnt = final_cnt+1
final_quality = final_quality + 'SD '
if sd480:
final_cnt = final_cnt+1
final_quality = final_quality + '480P '
if hd:
final_cnt = final_cnt+1
final_quality = final_quality + 'HD '
msg = "Total " + str(final_cnt) + " Quality Video Available "+final_quality+" Selecting "+str(quality) + " Quality"
#subprocess.Popen(["notify-send",msg])
send_notification(msg)
if quality == "sd":
final_q = sd
elif quality == 'sd480p':
final_q = sd480
elif quality == 'hd':
final_q = hd
if not final_q and sd:
final_q = sd
print(final_q)
if final_q:
#content = (subprocess.check_output(['curl','-L','-I','-A',self.hdr,final_q]))
#content = getContentUnicode(content)
content = ccurl(final_q+'#'+'-I')
print(content)
m = re.findall('Location: https[^\n]*', content)
#print(m
if m:
#print(m
final = m[0]
final = re.sub('Location: |\r', '', final)
else:
final = ''
elif siteName == "AnimeNet":
finalArr = []
soup = BeautifulSoup(content,'lxml')
link = soup.findAll('iframe')
print(link)
for i in link:
j = i['src']
if "video44" in j or "playpanda" in j or "easyvideo" in j or "yourupload" in j or "playbb" in j or "auengine" in j or "play44" in j:
j = re.sub('amp;','',j)
finalArr.append(j)
print(finalArr)
if mirrorNo == 1:
for i in finalArr:
final1 = findurl(i)
print(final1)
if final1:
final = final1
break
else:
final1 = findurl(finalArr[mirrorNo-1])
if final1:
final = final1
elif siteName == "Animefun":
arr =[]
final =""
m = []
link2 =[]
soup = BeautifulSoup(content,'lxml')
link = soup.find('div',{'id':'video_inner'})
if link:
link1 = link.findAll('iframe')
if link1:
for i in link1:
link2.append(i['src'])
link4 = re.findall('src="http[^"]*mp4upload[^"]*',content)
for i in link4:
i = re.sub('src="',"",i)
link2.append(i)
else:
link1 = ""
link2 = ""
if link2:
print(link2)
mirrorNo = mirrorNo - 1
if mirrorNo < len(link2):
link3 = link2[mirrorNo]
else:
link3 = link2[0]
if "mp4upload" in link3:
final = findurl(link3)
else:
content = ccurl(link3)
soup = BeautifulSoup(content,'lxml')
j = soup.find('source')
if j:
tmp = j['src']
arr.append(tmp)
final1 = arr[0]
print(final1)
#content = (subprocess.check_output(["curl","-A",self.hdr,"-I",final1]))
#content = getContentUnicode(content)
content = ccurl(final1+'#'+'-I')
print(content)
m[:]=[]
m = re.findall('https[^\n]*', content)
#print(m
if m:
#print(m
final = m[0]
final = re.sub('\r', '', final)
elif siteName == "AnimeBaka":
soup = BeautifulSoup(content,'lxml')
final1 = re.findall('data-src="[^"]*',content)
final2 = re.sub('data-src="','',final1[0])
print(final2)
code = final2.split('/')[-1]
final3 = "https://bakavideo.tv/get/files.embed?f="+code
print(final3)
content = ccurl(final3)
m = re.findall('"content":"[^"]*',content)
content = re.sub('"content":"','',m[0])
#content = content.decode("base64")
content = str(base64.b64decode(content).decode('utf-8'))
print(content)
soup = BeautifulSoup(content,'lxml')
final = soup.find('source')['src']
print("Beautifulsoup="+final)
elif siteName == "Anime-Freak":
m = []
soup = BeautifulSoup(content,'lxml')
link1 = soup.find('div',{'id':'play_options'})
link = link1.findAll('a')
j = 0
for i in link:
k = i['href']
if not "gogoupload" in str(k) and not "javascript" in str(k):
m.append(k.replace(' ','+'))
j = j+1
if j == 2:
break
#print(i.text + "----" + urllib.unquote(k).decode('utf8')
print(m)
if not m:
n = re.findall("http://www.anime-freak.org/anime_player.php[^']*",content)
if n:
tmp = re.sub('[ ]','+',n[0])
m.append(tmp)
print(m)
print(len(m))
final=[]
i = 0
for i in m:
content = ccurl(i)
#server = i.split('/')[2]
#print(server
n = re.findall('http://[^"]*',content)
if n:
lnk = n[0]
print(lnk)
k = lnk.split('/')
server = k[2]
content = ccurl(lnk)
arr = re.findall("[']fname=[^']*",content)
if arr:
url1 = arr[0]
print(url1)
url2 = re.findall('ddata[^"]*&uid',url1)
url3 = re.sub('&uid','',url2[0])
final1 = "http://" + server + "/" + url3
final1 = str(urllib.parse.unquote(final1))
url4 = re.sub('[+]','%20',final1)
final1 = url4
print(final1)
final.append(final1)
elif siteName == "AnimeHQ":
#content = ccurl(url)
#soup = BeautifulSoup(content,'lxml')
##content = ccurl(url)
#cloudfareUrl(url,'')
#content = open('/tmp/AnimeWatch/moetube.txt').read()
#print(content)
#soup = BeautifulSoup(content,'lxml')
post_dict = {'id':new_c,'ep':epn,'chk':'2'}
#content = ccurlHQ('http://www.moetube.net/rui.php',self.cookie_file,post)
#print(content)
content = self.ccurlN('http://www.moetube.net/rui.php',cookie=self.cookie_file,post=post_dict)
print(content)
#final = content
glink = content.split('/')[-1]
"""
#link = soup.find('div',{'id':'vidholder'})
link = soup.find('div',{'id':'moaroptions'})
#link1 = link.find('source')['src']
link2 = link.findAll('a')
for i in link2:
if 'href' in str(i):
k = i['href']
link1 = 'http://moetube.net'+k
if 'download' in k:
break
print(link1)
"""
#glink1 = re.findall("var glink = '[^']*",content)
#print(glink1)
#glink = re.sub("var glink = '",'',glink1[0])
print(glink)
url1 = urllib.parse.quote(url)
link1 = "https://docs.google.com/get_video_info?eurl="+url1+"&authuser=&docid="+glink
print(link1)
#content = (subprocess.check_output(['curl','-L','-A',self.hdr,link1]))
#content = getContentUnicode(content)
content = ccurl(link1)
content = urllib.parse.unquote(content)
#print(content)
cont1 = content.split('|')
#print(cont1)
sd =""
hd =""
sd480 =""
sd44 =""
for i in range(len(cont1)):
if 'itag=18' in cont1[i]:
sd = cont1[i]
break
for i in range(len(cont1)):
if 'itag=22' in cont1[i]:
hd = cont1[i]
break
for i in range(len(cont1)):
if 'itag=35' in cont1[i]:
sd480 = cont1[i]
break
for i in range(len(cont1)):
if 'itag=44' in cont1[i]:
sd44 = cont1[i]
break
print(sd)
print(hd)
print(sd480)
print(sd44)
final_cnt = 0
final_quality = ''
if sd:
final_cnt = final_cnt+1
final_quality = final_quality + 'SD '
if sd44:
final_cnt = final_cnt+1
final_quality = final_quality + 'SD '
if sd480:
final_cnt = final_cnt+1
final_quality = final_quality + '480P '
if hd:
final_cnt = final_cnt+1
final_quality = final_quality + 'HD '
msg = "Total " + str(final_cnt) + " Quality Video Available "+final_quality+" Selecting "+str(quality) + " Quality"
#subprocess.Popen(["notify-send",msg])
send_notification(msg)
if quality == 'sd':
link1 = sd
elif quality == "sd480p":
if sd480:
link1 = sd480
elif sd:
link1 = sd
elif quality == 'hd':
if hd:
link1 = hd
elif sd480:
link1 = sd480
elif sd:
link1 = sd
#content = (subprocess.check_output(['curl','-I','-L','-A',self.hdr,link1]))
#content = getContentUnicode(content)
content = ccurl(link1+'#'+'-I')
m = re.findall('Location: [^\n]*',content)
if m:
final = re.sub('Location: |\r','',m[-1])
else:
final = link1
print(final)
elif (siteName == "AnimeWow") or (siteName == "AnimePlus") or (siteName == "Anime44") or (siteName == "Animegalaxy") or (siteName == "Animehere") or (siteName == "GoodAnime"):
print(epn)
#print("Pre_Opt="+pre_opt
#print("Opt="+opt
opt = category
if (siteName == "AnimeWow" and opt == "History" and category == "Movies"):
opt = "Movies"
if (siteName == "Anime44" or siteName == "AnimePlus" or siteName == "Animegalaxy" or siteName == "Animehere" or siteName == "AnimeWow") and category == "Movies":
opt = "Movies"
if opt == "Movies":
soup = BeautifulSoup(content,'lxml')
if siteName != "Animehere":
link = soup.findAll('ul',{ 'class':'ver_list'})
else:
link = soup.findAll('ul',{ 'class':'version cfix'})
mir =[]
mirror_n =""
for i in link:
a = i.findAll('a')
for k in a:
if siteName != "Animehere":
mir.append(k['href'])
else:
mir.append("http://www.animehere.com"+k['href'])
print(mir)
if not mir or len(mir) == 1:
mirror = mirrorNo
mirror_n= "NO"
else:
mirror = mirrorNo
if mirror == 1:
if siteName != "Animehere":
link = soup.findAll('div',{ 'class':'vmargin'})
if not link:
link = soup.findAll('div',{ 'id':'streams'})
else:
link = soup.findAll('div',{ 'id':'playbox'})
l = 0
final = []
m =[]
for i in link:
a = i.findAll('iframe')
for k in a:
m.append(k['src'])
l = l + 1
print(m)
if mirror_n == "NO":
return m
if m:
final1 = findurl(m[0])
if not final1:
if mir:
url = mir[1]
final = []
print(url)
#if siteName != "AnimePlus":
# content = ccurl(url)
#else:
# content = ccurl_cookie(url,self.cookie_file)
content = self.ccurl(url,siteName)
soup = BeautifulSoup(content,'lxml')
if siteName != "Animehere":
link = soup.findAll('div',{ 'class':'vmargin'})
if not link:
link = soup.findAll('div',{ 'id':'streams'})
else:
link = soup.findAll('div',{ 'id':'playbox'})
l = 0
n = []
for i in link:
a = i.findAll('iframe')
for k in a:
n.append(k['src'])
print(n)
return n
else:
return m
else:
if mir:
url = mir[mirrorNo-1]
final = []
print(url)
#if siteName != "AnimePlus":
# content = ccurl(url)
#else:
# content = ccurl_cookie(url,self.cookie_file)
content = self.ccurl(url,siteName)
soup = BeautifulSoup(content,'lxml')
if siteName != "Animehere":
link = soup.findAll('div',{ 'class':'vmargin'})
if not link:
link = soup.findAll('div',{ 'id':'streams'})
else:
link = soup.findAll('div',{ 'id':'playbox'})
l = 0
n = []
for i in link:
a = i.findAll('iframe')
for k in a:
n.append(k['src'])
print(n)
return n
else:
#if siteName != "AnimePlus":
# content = ccurl(url)
#else:
# content = ccurl_cookie(url,self.cookie_file)
soup = BeautifulSoup(content,'lxml')
if siteName == "Animehere":
link = soup.findAll('div',{'id':'playbox'})
elif siteName == "GoodAnime":
link = soup.findAll('div',{'class':'postcontent'})
else:
link = soup.findAll('div',{'id':'streams'})
print(len(link))
j = 0
arr =[]
for i in link:
a = i.findAll('iframe')
for k in a:
arr.append(k['src'])
j = 1
length = len(arr)
while (j <= length):
mirrorNo = mirrorNo - 1
print(arr)
msg = "Total " + str(len(arr)) + " Mirrors, Selecting Mirror "+str(mirrorNo + 1)
#subprocess.Popen(["notify-send",msg])
send_notification(msg)
final = findurl(arr[mirrorNo])
if final:
break
j = j + 1
mirrorNo = j
print(final)
print(mirrorNo)
return final
def urlResolve(self,url):
final = findurl(url)
if final:
return final
else:
return 0
| kanishka-linux/AnimeWatch | Python-SETUP/AnimeWatch_PyQt5/Plugins/SubbedAnime.py | Python | gpl-3.0 | 82,738 | [
"ADF"
] | e882ec5bc144155ff9083e894b3ef478695df33e3eeed50587810634cbbdf35d |
#! /usr/bin/env python
"""
This script performs event averaging for particle
spectra and anisotropic flow coefficients calculated
from event-by-event simulations
v_n is analyzed up to n = 6
Format for particle_XXX_vndata.dat file:
n_order real_part real_part_err imag_part imag_part_err
Format for particle_XXX_vndata_diff.dat file:
pT(GeV) pT_err(GeV) dN/(2pi dy pT dpT)(GeV^-2) dN/(2pi dy pT dpT)_err(GeV^-2)
vn_real vn_real_err vn_imag vn_imag_err
All the errors are only statistic errors
"""
from sys import argv, exit
from os import path, mkdir
from glob import glob
from numpy import *
import shutil
# define colors
purple = "\033[95m"
green = "\033[92m"
blue = "\033[94m"
yellow = "\033[93m"
red = "\033[91m"
normal = "\033[0m"
try:
working_folder = path.abspath(argv[1])
avg_folder = path.join(path.abspath(argv[2]),
working_folder.split('/')[-1])
print("output folder: %s" % avg_folder)
if(path.isdir(avg_folder)):
print("folder %s already exists!" % avg_folder)
var = input("do you want to delete it? [y/N]")
if 'y' in var:
shutil.rmtree(avg_folder)
else:
print("please choose another folder path~")
exit(0)
mkdir(avg_folder)
except IndexError:
print("Usage: average_event_spvn.py working_folder results_folder")
exit(1)
particle_list = ['9999', '211', '321', '2212', '-211', '-321', '-2212',
'3122', '-3122', '3312', '-3312', '3334', '-3334',
'333']
particle_name_list = ['charged_hadron', 'pion_p', 'kaon_p', 'proton',
'pion_m', 'kaon_m', 'anti_proton',
'Lambda', 'anti_Lambda', 'Xi_m', 'anti_Xi_p',
'Omega', 'anti_Omega', 'phi']
nonlinear_reponse_correlator_name_list = [
'v4_L', 'v4(Psi2)', 'rho_422', 'chi_422',
'v5_L', 'v5(Psi23)', 'rho_523', 'chi_523',
'v6_L', 'v6(Psi2)', 'v6(Psi3)',
'rho_6222', 'rho_633', 'chi_6222', 'chi_633']
symmetric_cumulant_name_list = ['SC_32', 'SC_42']
n_order = 7
def calcualte_inte_vn(pT_low, pT_high, data):
"""
this function calculates the pT-integrated vn in a
given pT range (pT_low, pT_high) for every event in the data
"""
npT = 50
pT_inte_array = linspace(pT_low, pT_high, npT)
dpT = pT_inte_array[1] - pT_inte_array[0]
dN_event = data[:, 2]
pT_event = data[:, 0]
dN_interp = exp(interp(pT_inte_array, pT_event, log(dN_event+1e-30)))
N_event = data[:, -1]
N_interp = exp(interp(pT_inte_array, pT_event, log(N_event+1e-30)))
N = sum(N_interp)*dpT/0.1
temp_vn_array = [N,]
for iorder in range(1, n_order):
vn_real_event = data[:, 4*iorder]
vn_imag_event = data[:, 4*iorder+2]
vn_real_interp = interp(pT_inte_array, pT_event, vn_real_event)
vn_imag_interp = interp(pT_inte_array, pT_event, vn_imag_event)
vn_real_inte = (
sum(vn_real_interp*dN_interp*pT_inte_array)
/sum(dN_interp*pT_inte_array))
vn_imag_inte = (
sum(vn_imag_interp*dN_interp*pT_inte_array)
/sum(dN_interp*pT_inte_array))
vn_inte = vn_real_inte + 1j*vn_imag_inte
temp_vn_array.append(vn_inte)
return(temp_vn_array)
def calculate_chi_422(vn_array):
"""
chi_422 = Re(v4*conj(v2)**2.)/|v2|^4
"""
dN = real(vn_array[:, 0])
Q2 = dN*vn_array[:, 2]
Q4 = dN*vn_array[:, 4]
nev = len(dN)
N4_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)
Q2_4 = ((abs(Q2)**4.) - 2.*real(Q4*conj(Q2)*conj(Q2))
- 4.*(dN - 2.)*(abs(Q2)**2.) + abs(Q4)**2.
+ 2*dN*(dN - 3.))
N3_weight = dN*(dN - 1.)*(dN - 2.)
chi_422_num = Q4*conj(Q2)*conj(Q2) - 2.*Q2*conj(Q2) - Q4*conj(Q4) + 2.*dN
chi_422_JK = zeros(nev)
for iev in range(nev):
array_idx = [True]*nev
array_idx[iev] = False
array_idx = array(array_idx)
chi_422_JK[iev] = (
mean(real(chi_422_num[array_idx])/N3_weight[array_idx])
/(mean(real(Q2_4[array_idx])/N4_weight[array_idx])))
chi_422_mean = mean(chi_422_JK)
chi_422_err = sqrt((nev - 1.)/nev*sum((chi_422_JK - chi_422_mean)**2.))
return(chi_422_mean, chi_422_err)
def calculate_chi_523(vn_array):
dN = real(vn_array[:, 0])
Q1 = dN*vn_array[:, 1]
Q2 = dN*vn_array[:, 2]
Q3 = dN*vn_array[:, 3]
Q5 = dN*vn_array[:, 5]
nev = len(dN)
N4_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)
Q_32 = ((abs(Q2)**2.)*(abs(Q3)**2.) - 2.*real(Q5*conj(Q2)*conj(Q3))
- 2.*real(Q3*conj(Q1)*conj(Q2)) + abs(Q5)**2. + abs(Q1)**2.
- (dN - 4.)*(abs(Q2)**2. + abs(Q3)**2.) + dN*(dN - 6.)
)
N3_weight = dN*(dN - 1.)*(dN - 2.)
chi_523_num = (Q5*conj(Q2)*conj(Q3) - Q3*conj(Q3) - Q2*conj(Q2)
- Q5*conj(Q5) + 2.*dN)
chi_523_JK = zeros(nev)
for iev in range(nev):
array_idx = [True]*nev
array_idx[iev] = False
array_idx = array(array_idx)
chi_523_JK[iev] = (
mean(real(chi_523_num[array_idx])/N3_weight[array_idx])
/(mean(real(Q_32[array_idx])/N4_weight[array_idx])))
chi_523_mean = mean(chi_523_JK)
chi_523_err = sqrt((nev - 1.)/nev*sum((chi_523_JK - chi_523_mean)**2.))
return(chi_523_mean, chi_523_err)
def calculate_chi_6222(vn_array):
dN = real(vn_array[:, 0])
Q2 = dN*vn_array[:, 2]
Q4 = dN*vn_array[:, 4]
Q6 = dN*vn_array[:, 6]
nev = len(dN)
N6_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)*(dN - 4.)*(dN - 5.)
Q2_6 = (abs(Q2)**6. + 9*(abs(Q4)**2.)*(abs(Q2)**2.)
- 6.*real(Q4*Q2*conj(Q2)*conj(Q2)*conj(Q2))
+ 4.*real(Q6*conj(Q2)*conj(Q2)*conj(Q2))
- 12.*real(Q6*conj(Q4)*conj(Q2))
+ 18.*(dN - 4.)*real(Q4*conj(Q2)*conj(Q2))
+ 4.*(abs(Q6)**2.)
- 9.*(dN - 4.)*((abs(Q2)**4.) + (abs(Q4)**2.))
+ 18.*(dN - 5.)*(dN - 2.)*(abs(Q2)**2.)
- 6.*dN*(dN - 4.)*(dN - 5.))
N4_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)
chi_6222_num = (Q6*conj(Q2)*conj(Q2)*conj(Q2) - 3.*Q6*conj(Q4)*conj(Q2)
- 3.*Q4*conj(Q2)*conj(Q2) + 2.*Q6*conj(Q6) + 6.*Q2*conj(Q2)
+ 3.*Q4*conj(Q4) - 6.*dN)
chi_6222_JK = zeros(nev)
for iev in range(nev):
array_idx = [True]*nev
array_idx[iev] = False
array_idx = array(array_idx)
chi_6222_JK[iev] = (
mean(real(chi_6222_num[array_idx])/N4_weight[array_idx])
/(mean(real(Q2_6[array_idx])/N6_weight[array_idx])))
chi_6222_mean = mean(chi_6222_JK)
chi_6222_err = sqrt((nev - 1.)/nev*sum((chi_6222_JK - chi_6222_mean)**2.))
return(chi_6222_mean, chi_6222_err)
def calculate_chi_633(vn_array):
dN = real(vn_array[:, 0])
Q3 = dN*vn_array[:, 3]
Q6 = dN*vn_array[:, 6]
nev = len(dN)
N4_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)
Q3_4 = ((abs(Q3)**4.) - 2.*real(Q6*conj(Q3)*conj(Q3))
- 4.*(dN - 2.)*(abs(Q3)**2.) + abs(Q6)**2.
+ 2*dN*(dN - 3.))
N3_weight = dN*(dN - 1.)*(dN - 2.)
chi_633_num = Q6*conj(Q3)*conj(Q3) - 2.*Q3*conj(Q3) - Q6*conj(Q6) + 2.*dN
chi_633_JK = zeros(nev)
for iev in range(nev):
array_idx = [True]*nev
array_idx[iev] = False
array_idx = array(array_idx)
chi_633_JK[iev] = (
mean(real(chi_633_num[array_idx])/N3_weight[array_idx])
/(mean(real(Q3_4[array_idx])/N4_weight[array_idx])))
chi_633_mean = mean(chi_633_JK)
chi_633_err = sqrt((nev - 1.)/nev*sum((chi_633_JK - chi_633_mean)**2.))
return(chi_633_mean, chi_633_err)
def calculate_v4_Psi2(chi_422, chi_422_err, vn_array):
"""
v4(Psi2) = chi_422*sqrt(<abs(V2)**4>)
"""
dN = real(vn_array[:, 0])
Q2 = dN*vn_array[:, 2]
Q4 = dN*vn_array[:, 4]
nev = len(dN)
N4_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)
Q2_4 = ((abs(Q2)**4.) - 2.*real(Q4*conj(Q2)*conj(Q2))
- 4.*(dN - 2.)*(abs(Q2)**2.) + abs(Q4)**2.
+ 2*dN*(dN - 3.))
v2_factor = sqrt(mean(Q2_4/N4_weight))
v2_factor_err = std(Q2_4/N4_weight)/(2.*v2_factor)/sqrt(nev)
v4_Psi2 = chi_422*v2_factor
v4_Psi2_err = sqrt((chi_422_err*v2_factor)**2.
+ (chi_422*v2_factor_err)**2.)
return(v4_Psi2, v4_Psi2_err)
def calculate_v5_Psi23(chi_523, chi_523_err, vn_array):
"""
v5(Psi23) = chi_523*sqrt(<abs(V2)**2*abs(V3)**2>)
"""
dN = real(vn_array[:, 0])
Q1 = dN*vn_array[:, 1]
Q2 = dN*vn_array[:, 2]
Q3 = dN*vn_array[:, 3]
Q5 = dN*vn_array[:, 5]
nev = len(dN)
N4_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)
Q_32 = ((abs(Q2)**2.)*(abs(Q3)**2.) - 2.*real(Q5*conj(Q2)*conj(Q3))
- 2.*real(Q3*conj(Q1)*conj(Q2)) + abs(Q5)**2. + abs(Q1)**2.
- (dN - 4.)*(abs(Q2)**2. + abs(Q3)**2.) + dN*(dN - 6.)
)
v23_factor = sqrt(mean(Q_32/N4_weight))
v23_factor_err = std(Q_32/N4_weight)/(2.*v23_factor)/sqrt(nev)
v5_Psi23 = chi_523*v23_factor
v5_Psi23_err = sqrt((chi_523_err*v23_factor)**2.
+ (chi_523*v23_factor_err)**2.)
return(v5_Psi23, v5_Psi23_err)
def calculate_v6_Psi2(chi_6222, chi_6222_err, vn_array):
"""
v6(Psi2) = chi_6222*sqrt(<abs(V2)**6>)
"""
dN = real(vn_array[:, 0])
Q2 = dN*vn_array[:, 2]
Q4 = dN*vn_array[:, 4]
Q6 = dN*vn_array[:, 6]
nev = len(dN)
N6_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)*(dN - 4.)*(dN - 5.)
Q2_6 = (abs(Q2)**6. + 9*(abs(Q4)**2.)*(abs(Q2)**2.)
- 6.*real(Q4*Q2*conj(Q2)*conj(Q2)*conj(Q2))
+ 4.*real(Q6*conj(Q2)*conj(Q2)*conj(Q2))
- 12.*real(Q6*conj(Q4)*conj(Q2))
+ 18.*(dN - 4.)*real(Q4*conj(Q2)*conj(Q2))
+ 4.*(abs(Q6)**2.)
- 9.*(dN - 4.)*((abs(Q2)**4.) + (abs(Q4)**2.))
+ 18.*(dN - 5.)*(dN - 2.)*(abs(Q2)**2.)
- 6.*dN*(dN - 4.)*(dN - 5.))
v2_factor = sqrt(mean(Q2_6/N6_weight))
v2_factor_err = std(Q2_6/N6_weight)/(2.*v2_factor)/sqrt(nev)
v6_Psi2 = chi_6222*v2_factor
v6_Psi2_err = sqrt((chi_6222_err*v2_factor)**2.
+ (chi_6222*v2_factor_err)**2.)
return(v6_Psi2, v6_Psi2_err)
def calculate_v6_Psi3(chi_633, chi_633_err, vn_array):
"""
v6(Psi3) = chi_633*sqrt(<abs(V3)**4>)
"""
dN = real(vn_array[:, 0])
Q3 = dN*vn_array[:, 3]
Q6 = dN*vn_array[:, 6]
nev = len(dN)
N4_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)
Q3_4 = ((abs(Q3)**4.) - 2.*real(Q6*conj(Q3)*conj(Q3))
- 4.*(dN - 2.)*(abs(Q3)**2.) + abs(Q6)**2.
+ 2*dN*(dN - 3.))
v3_factor = sqrt(mean(Q3_4/N4_weight))
v3_factor_err = std(Q3_4/N4_weight)/(2.*v3_factor)/sqrt(nev)
v6_Psi3 = chi_633*v3_factor
v6_Psi3_err = sqrt((chi_633_err*v3_factor)**2.
+ (chi_633*v3_factor_err)**2.)
return(v6_Psi3, v6_Psi3_err)
def calculate_rho_422(v4_Psi2, v4_Psi2_err, vn_array):
"""
rho_422 = v4(Psi2)/v4(Psi4)
"""
dN = real(vn_array[:, 0])
Q4 = dN*vn_array[:, 4]
nev = len(dN)
N2_weight = dN*(dN - 1.)
Q4_2 = abs(Q4)**2. - dN
v4_Psi4 = sqrt(mean(Q4_2/N2_weight))
v4_Psi4_err = std(Q4_2/N2_weight)/(2.*v4_Psi4)/sqrt(nev)
rho_422 = v4_Psi2/v4_Psi4
rho_422_err = sqrt((v4_Psi2_err/v4_Psi4)**2.
+ (v4_Psi2*v4_Psi4_err/v4_Psi4**2.)**2.)
return(rho_422, rho_422_err)
def calculate_rho_523(v5_Psi23, v5_Psi23_err, vn_array):
"""
rho_523 = v5(Psi23)/v5(Psi5)
"""
dN = real(vn_array[:, 0])
Q5 = dN*vn_array[:, 5]
nev = len(dN)
N2_weight = dN*(dN - 1.)
Q5_2 = abs(Q5)**2. - dN
v5_Psi5 = sqrt(mean(Q5_2/N2_weight))
v5_Psi5_err = std(Q5_2/N2_weight)/(2.*v5_Psi5)/sqrt(nev)
rho_523 = v5_Psi23/v5_Psi5
rho_523_err = sqrt((v5_Psi23_err/v5_Psi5)**2.
+ (v5_Psi23*v5_Psi5_err/v5_Psi5**2.)**2.)
return(rho_523, rho_523_err)
def calculate_rho_6222(v6_Psi2, v6_Psi2_err, vn_array):
"""
rho_6222 = v6(Psi2)/v6(Psi6)
"""
dN = real(vn_array[:, 0])
Q6 = dN*vn_array[:, 6]
nev = len(dN)
N2_weight = dN*(dN - 1.)
Q6_2 = abs(Q6)**2. - dN
v6_Psi6 = sqrt(mean(Q6_2/N2_weight))
v6_Psi6_err = std(Q6_2/N2_weight)/(2.*v6_Psi6)/sqrt(nev)
rho_6222 = v6_Psi2/v6_Psi6
rho_6222_err = sqrt((v6_Psi2_err/v6_Psi6)**2.
+ (v6_Psi2*v6_Psi6_err/v6_Psi6**2.)**2.)
return(rho_6222, rho_6222_err)
def calculate_rho_633(v6_Psi3, v6_Psi3_err, vn_array):
"""
rho_633 = v6(Psi3)/v6(Psi6)
"""
dN = real(vn_array[:, 0])
Q6 = dN*vn_array[:, 6]
nev = len(dN)
N2_weight = dN*(dN - 1.)
Q6_2 = abs(Q6)**2. - dN
v6_Psi6 = sqrt(mean(Q6_2/N2_weight))
v6_Psi6_err = std(Q6_2/N2_weight)/(2.*v6_Psi6)/sqrt(nev)
rho_633 = v6_Psi3/v6_Psi6
rho_633_err = sqrt((v6_Psi3_err/v6_Psi6)**2.
+ (v6_Psi3*v6_Psi6_err/v6_Psi6**2.)**2.)
return(rho_633, rho_633_err)
def calculate_v4_L(v4_Psi2, v4_Psi2_err, vn_array):
"""
v4_L = sqrt(v4(Psi4)^2 - v4(Psi2)^2)
"""
dN = real(vn_array[:, 0])
Q4 = dN*vn_array[:, 4]
nev = len(dN)
N2_weight = dN*(dN - 1.)
Q4_2 = abs(Q4)**2. - dN
v4_Psi4_sq = mean(Q4_2/N2_weight)
v4_Psi4_sq_err = std(Q4_2/N2_weight)/sqrt(nev)
v4_L = sqrt(v4_Psi4_sq - v4_Psi2**2.)
v4_L_err = (sqrt(v4_Psi4_sq_err**2. + (2.*v4_Psi2*v4_Psi2_err)**2.)
/(2.*v4_L))
return(v4_L, v4_L_err)
def calculate_v5_L(v5_Psi23, v5_Psi23_err, vn_array):
"""
v5_L = sqrt(v5(Psi5)^2 - v5(Psi23)^2)
"""
dN = real(vn_array[:, 0])
Q5 = dN*vn_array[:, 5]
nev = len(dN)
N2_weight = dN*(dN - 1.)
Q5_2 = abs(Q5)**2. - dN
v5_Psi5_sq = mean(Q5_2/N2_weight)
v5_Psi5_sq_err = std(Q5_2/N2_weight)/sqrt(nev)
v5_L = sqrt(v5_Psi5_sq - v5_Psi23**2.)
v5_L_err = (sqrt(v5_Psi5_sq_err**2. + (2.*v5_Psi23*v5_Psi23_err)**2.)
/(2.*v5_L))
return(v5_L, v5_L_err)
def calculate_v6_L(chi_6222, chi_6222_err, chi_633, chi_633_err, vn_array):
"""
v6_L = sqrt(v6(Psi6)^2 - chi_6222^2 v2^6
- chi_633^2 v3^4 - 2 Re(chi_6222*chi_633*v2^3 v3^{2*}))
"""
dN = real(vn_array[:, 0])
v2_array = vn_array[:, 2]
v3_array = vn_array[:, 3]
v6_array = vn_array[:, 6]
nev = len(dN)
v6_Psi6_sq = mean(abs(v6_array)**2.)
v6_Psi6_sq_err = std(abs(v6_array)**2.)/sqrt(nev)
v2_6 = mean(abs(v2_array)**6.)
v2_6_err = std(abs(v2_array)**6.)/sqrt(nev)
v3_4 = mean(abs(v3_array)**4.)
v3_4_err = std(abs(v3_array)**4.)/sqrt(nev)
v23 = real(mean(v2_array**3.*conj(v3_array)**2.))
v23_err = real(std(v2_array**3.*conj(v3_array)**2.))/sqrt(nev)
v6_L = (v6_Psi6_sq - chi_6222**2.*v2_6 - chi_633**2.*v3_4
- 2.*chi_6222*chi_633*v23)
v6_L_err = sqrt(
v6_Psi6_sq_err**2.
+ (2.*chi_6222*chi_6222_err*v2_6)**2. + (chi_6222**2.*v2_6_err)**2.
+ (2.*chi_633*chi_633_err*v3_4)**2. + (chi_633**2.*v3_4_err)**2.
+ (2.*chi_6222_err*chi_633*v23)**2.
+ (2.*chi_6222*chi_633_err*v23)**2.
+ (2.*chi_6222*chi_633*v23_err)**2.)
return(v6_L, v6_L_err)
def calculate_nonlinear_reponse(vn_array):
"""
this function computes all the nonlinear response coefficients
proposed in the paper arXiv: 1502.02502 up to v6
"""
chi_422, chi_422_err = calculate_chi_422(vn_array)
v4_Psi2, v4_Psi2_err = calculate_v4_Psi2(chi_422, chi_422_err, vn_array)
rho_422, rho_422_err = calculate_rho_422(v4_Psi2, v4_Psi2_err, vn_array)
v4_L, v4_L_err = calculate_v4_L(v4_Psi2, v4_Psi2_err, vn_array)
chi_523, chi_523_err = calculate_chi_523(vn_array)
v5_Psi23, v5_Psi23_err = calculate_v5_Psi23(chi_523, chi_523_err, vn_array)
rho_523, rho_523_err = calculate_rho_523(v5_Psi23, v5_Psi23_err, vn_array)
v5_L, v5_L_err = calculate_v5_L(v5_Psi23, v5_Psi23_err, vn_array)
chi_6222, chi_6222_err = calculate_chi_6222(vn_array)
v6_Psi2, v6_Psi2_err = calculate_v6_Psi2(chi_6222, chi_6222_err, vn_array)
rho_6222, rho_6222_err = calculate_rho_6222(v6_Psi2, v6_Psi2_err, vn_array)
chi_633, chi_633_err = calculate_chi_633(vn_array)
v6_Psi3, v6_Psi3_err = calculate_v6_Psi3(chi_633, chi_633_err, vn_array)
rho_633, rho_633_err = calculate_rho_633(v6_Psi3, v6_Psi3_err, vn_array)
v6_L, v6_L_err = calculate_v6_L(chi_6222, chi_6222_err,
chi_633, chi_633_err, vn_array)
results = [v4_L, v4_L_err, v4_Psi2, v4_Psi2_err, rho_422, rho_422_err,
chi_422, chi_422_err,
v5_L, v5_L_err, v5_Psi23, v5_Psi23_err, rho_523, rho_523_err,
chi_523, chi_523_err,
v6_L, v6_L_err, v6_Psi2, v6_Psi2_err, v6_Psi3, v6_Psi3_err,
rho_6222, rho_6222_err, rho_633, rho_633_err,
chi_6222, chi_6222_err, chi_633, chi_633_err]
return(results)
def calcualte_vn_2(vn_data_array):
"""
this function computes vn{2} and its stat. err.
self correlation is substracted
"""
vn_data_array = array(vn_data_array)
nev = len(vn_data_array[:, 0])
dN = real(vn_data_array[:, 0])
dN = dN.reshape(len(dN), 1)
Qn_array = dN*vn_data_array[:, 1:]
corr = 1./(dN*(dN - 1.))*(Qn_array*conj(Qn_array) - dN)
vn_2 = sqrt(real(mean(corr, 0))) + 1e-30
vn_2_err = std(real(corr), 0)/sqrt(nev)/2./vn_2
return(nan_to_num(vn_2), nan_to_num(vn_2_err))
def calculate_diff_vn_single_event(pT_ref_low, pT_ref_high, data, data_ref):
npT = 50
pT_inte_array = linspace(pT_ref_low, pT_ref_high, npT)
dpT = pT_inte_array[1] - pT_inte_array[0]
dN_event = data[:, -1]
dN_ref_event = data_ref[:, -1]
pT_ref_event = data_ref[:, 0]
dN_ref_interp = exp(interp(pT_inte_array, pT_ref_event,
log(dN_ref_event + 1e-30)))
dN_ref = sum(dN_ref_interp)*dpT
temp_vn_real_array = []
temp_vn_imag_array = []
temp_vn_denorm_array1 = []
temp_vn_denorm_array2 = []
for iorder in range(1, n_order):
vn_real_event = data[:, 4*iorder]
vn_imag_event = data[:, 4*iorder+2]
vn_ref_real_event = data_ref[:, 4*iorder]
vn_ref_imag_event = data_ref[:, 4*iorder+2]
vn_ref_real_interp = interp(pT_inte_array, pT_ref_event,
vn_ref_real_event)
vn_ref_imag_interp = interp(pT_inte_array, pT_ref_event,
vn_ref_imag_event)
vn_ref_real_inte = (
sum(vn_ref_real_interp*dN_ref_interp)/sum(dN_ref_interp))
vn_ref_imag_inte = (
sum(vn_ref_imag_interp*dN_ref_interp)/sum(dN_ref_interp))
vn_ref = vn_ref_real_inte + 1j*vn_ref_imag_inte
vn_pt = vn_real_event + 1j*vn_imag_event
numerator_real = real(dN_event*vn_pt*dN_ref*conj(vn_ref))
numerator_imag = imag(dN_event*vn_pt*dN_ref*conj(vn_ref))
denorm1 = dN_event
denorm2 = real(dN_ref*vn_ref*dN_ref*conj(vn_ref))
temp_vn_real_array.append(numerator_real)
temp_vn_imag_array.append(numerator_imag)
temp_vn_denorm_array2.append(denorm2)
temp_vn_denorm_array1.append(denorm1)
return(temp_vn_real_array, temp_vn_imag_array,
temp_vn_denorm_array1, temp_vn_denorm_array2)
def get_vn_diff_2PC_from_single_event(data):
dN_event = data[:, -1]
temp_vn_real_array = []
temp_vn_imag_array = []
temp_vn_denorm_array = []
for iorder in range(1, n_order):
vn_real_event = data[:, 4*iorder]
vn_imag_event = data[:, 4*iorder+2]
vn_pt = vn_real_event + 1j*vn_imag_event
numerator_real = real(dN_event*vn_pt)
numerator_imag = imag(dN_event*vn_pt)
denorm = dN_event
temp_vn_real_array.append(numerator_real)
temp_vn_imag_array.append(numerator_imag)
temp_vn_denorm_array.append(denorm)
return(temp_vn_real_array, temp_vn_imag_array, temp_vn_denorm_array)
def calculate_vn_diff_SP(vn_diff_real, vn_diff_imag,
vn_diff_denorm1, vn_diff_denorm2):
"""
this funciton calculates the scalar-product vn
assumption: no overlap between particles of interest
and reference flow Qn vectors
"""
vn_diff_real = array(vn_diff_real)
vn_diff_imag = array(vn_diff_imag)
vn_diff_denorm1 = array(vn_diff_denorm1) + 1e-30
vn_diff_denorm2 = array(vn_diff_denorm2)
nev = len(vn_diff_denorm1[:, 0])
#vn_denorm = vn_2.reshape(len(vn_2), 1)
#vn_denorm_err = vn_2_err.reshape(len(vn_2_err), 1)
dn_diff_denorm = mean(vn_diff_denorm1, 0)
dn_diff_denorm_err = std(vn_diff_denorm1, 0)/sqrt(nev)
vn_denorm = sqrt(mean(vn_diff_denorm2, 0))
vn_denorm_err = (std(vn_diff_denorm2, 0)/sqrt(nev)/2./vn_denorm)
vn_denorm = vn_denorm.reshape(len(vn_denorm), 1)
vn_denorm_err = vn_denorm_err.reshape(len(vn_denorm), 1)
vn_diff_SP = mean(vn_diff_real, 0)/dn_diff_denorm/vn_denorm
vn_diff_SP_err = sqrt(
(std(vn_diff_real, 0)/sqrt(nev)/dn_diff_denorm/vn_denorm)**2.
+ (vn_diff_SP*dn_diff_denorm_err/dn_diff_denorm)**2.
+ (vn_diff_SP*vn_denorm_err/vn_denorm)**2.)
return(vn_diff_SP, vn_diff_SP_err)
def calculate_vn_diff_2PC(vn_diff_real, vn_diff_imag, vn_diff_denorm):
"""
this funciton calculates the rms vn[2](pT)
"""
vn_diff_real = array(vn_diff_real)
vn_diff_imag = array(vn_diff_imag)
vn_diff_denorm = array(vn_diff_denorm)
nev = len(vn_diff_denorm[:, 0])
vn_diff_2PC = sqrt(
mean((vn_diff_real**2. + vn_diff_imag**2. - vn_diff_denorm)
/(vn_diff_denorm**2. - vn_diff_denorm + 1e-15), 0))
vn_diff_2PC_err = (
std((vn_diff_real**2. + vn_diff_imag**2. - vn_diff_denorm)
/(vn_diff_denorm**2. - vn_diff_denorm + 1e-15), 0)
/sqrt(nev)/(2.*vn_diff_2PC + 1e-15))
return(nan_to_num(vn_diff_2PC), nan_to_num(vn_diff_2PC_err))
def calculate_vn_distribution(vn_array):
nbin = 20
vn_array = array(vn_array)
vn_dim = len(vn_array[0, :])
output = []
for vn_order in range(vn_dim):
vn_mag_array = abs(vn_array[:, vn_order])
vn_min = min(vn_mag_array)
vn_max = max(vn_mag_array)*1.0001
bin_boundaries = linspace(vn_min, vn_max, nbin+1)
bin_width = bin_boundaries[1] - bin_boundaries[0]
bin_center = zeros([nbin])
bin_value = zeros([nbin])
for vn_elem in vn_mag_array:
vn_idx = int(floor((vn_elem - vn_min)/bin_width))
if (vn_idx == 20):
print(vn_elem, vn_min, bin_width)
bin_value[vn_idx] += 1.
bin_center[vn_idx] += vn_elem
bin_center = bin_center/(bin_value + 1e-15)
bin_value = bin_value/len(vn_array)
bin_value_err = sqrt(bin_value/len(vn_array))
bin_value = bin_value/bin_width
bin_value_err = bin_value_err/bin_width
for i in range(nbin):
if abs(bin_center[i]) < 1e-15:
bin_center[i] = (bin_boundaries[i] + bin_boundaries[i+1])/2.
output.append(bin_center)
output.append(bin_value)
output.append(bin_value_err)
output = array(output)
return(output.transpose())
def calcualte_event_plane_correlations(vn_array):
"""
this function compute the scalar-product event plane correlations
vn_array is a matrix [event_idx, vn_order]
"""
vn_array = array(vn_array)
nev = len(vn_array[:, 0])
v2_array = vn_array[:, 2]
v3_array = vn_array[:, 3]
v4_array = vn_array[:, 4]
v5_array = vn_array[:, 5]
v6_array = vn_array[:, 6]
corr_224_JK = zeros(nev)
corr_22233_JK = zeros(nev)
corr_2226_JK = zeros(nev)
corr_336_JK = zeros(nev)
corr_235_JK = zeros(nev)
corr_246_JK = zeros(nev)
corr_234_JK = zeros(nev)
for iev in range(nev):
array_idx = [True]*nev
array_idx[iev] = False
array_idx = array(array_idx)
v2_2 = mean(abs(v2_array[array_idx])**2.)
v3_2 = mean(abs(v3_array[array_idx])**2.)
v4_2 = mean(abs(v4_array[array_idx])**2.)
v5_2 = mean(abs(v5_array[array_idx])**2.)
v6_2 = mean(abs(v6_array[array_idx])**2.)
# cos(4(Psi_2 - Psi_4))
corr_224_num = mean(real((v2_array[array_idx]**2.)
*conj(v4_array[array_idx])))
corr_224_JK[iev] = corr_224_num/sqrt(v2_2*v2_2*v4_2)
# cos(6(Psi_2 - Psi_3))
corr_22233_num = mean(real((v2_array[array_idx]**3.)
*conj(v3_array[array_idx])**2.))
corr_22233_JK[iev] = corr_22233_num/sqrt(v2_2**3.*v3_2**2.)
# cos(6(Psi_2 - Psi_6))
corr_2226_num = mean(real(v2_array[array_idx]**3.
*conj(v6_array[array_idx])))
corr_2226_JK[iev] = corr_2226_num/sqrt((v2_2**3.)*v6_2)
# cos(6(Psi_3 - Psi_6))
corr_336_num = mean(real((v3_array[array_idx]**2.)
*conj(v6_array[array_idx])))
corr_336_JK[iev] = corr_336_num/sqrt((v3_2**2.)*v6_2)
# cos(2Psi_2 + 3Psi_3 - 5Psi_5)
corr_235_num = mean(real(v2_array[array_idx]*v3_array[array_idx]
*conj(v5_array[array_idx])))
corr_235_JK[iev] = corr_235_num/sqrt(v2_2*v3_2*v5_2)
# cos(2Psi_2 + 4Psi_4 - 6Psi_6)
corr_246_num = mean(real(v2_array[array_idx]*v4_array[array_idx]
*conj(v6_array[array_idx])))
corr_246_JK[iev] = corr_246_num/sqrt(v2_2*v4_2*v6_2)
# cos(2Psi_2 - 6Psi_3 + 4Psi_4)
corr_234_num = mean(real(v2_array[array_idx]
*(conj(v3_array[array_idx])**2.)
*v4_array[array_idx]))
corr_234_JK[iev] = corr_234_num/sqrt(v2_2*(v3_2**2.)*v4_2)
corr_224 = mean(corr_224_JK)
corr_224_err = sqrt((nev - 1.)/nev*sum((corr_224_JK - corr_224)**2.))
corr_22233 = mean(corr_22233_JK)
corr_22233_err = sqrt((nev - 1.)/nev*sum((corr_22233_JK - corr_22233)**2.))
corr_2226 = mean(corr_2226_JK)
corr_2226_err = sqrt((nev - 1.)/nev*sum((corr_2226_JK - corr_2226)**2.))
corr_336 = mean(corr_336_JK)
corr_336_err = sqrt((nev - 1.)/nev*sum((corr_336_JK - corr_336)**2.))
corr_235 = mean(corr_235_JK)
corr_235_err = sqrt((nev - 1.)/nev*sum((corr_235_JK - corr_235)**2.))
corr_246 = mean(corr_246_JK)
corr_246_err = sqrt((nev - 1.)/nev*sum((corr_246_JK - corr_246)**2.))
corr_234 = mean(corr_234_JK)
corr_234_err = sqrt((nev - 1.)/nev*sum((corr_234_JK - corr_234)**2.))
results = [corr_224, corr_22233, corr_2226, corr_336,
corr_235, corr_246, corr_234]
results_err = [corr_224_err, corr_22233_err, corr_2226_err, corr_336_err,
corr_235_err, corr_246_err, corr_234_err]
return(results, results_err)
def calculate_vn_arrays_for_rn_ratios(data):
# this function compute the complex pT-integrated Vn vector
# in different pT ranges for a single event
# it returns a 2d matrix vn_arrays[pT_idx, n_order_idx]
pT_boundaries = [0.3, 0.5, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0]
npT = 50
vn_arrays = []
for ipT in range(len(pT_boundaries)-1):
pT_low = pT_boundaries[ipT]
pT_high = pT_boundaries[ipT + 1]
pT_mid = (pT_low + pT_high)/2.
vn_array = calcualte_inte_vn(pT_low, pT_high, data)
vn_array.insert(0, pT_mid)
vn_arrays.append(vn_array)
return(vn_arrays)
def calculate_rn_ratios(vn_event_arrays):
# this function compute rn ratio in different pT bins
# according to the CMS measurements
# it reads in a 3d data cube
# vn_event_arrays[event_idx, pT_idx, n_order_idx]
# it returns rn_arrays[iorder, pT_idx, 3]
vn_event_arrays = array(vn_event_arrays)
rn_arrays = []
for iorder in range(3, 6):
# compute r2, r3, r4
rn_array = []
for itrig in range(3, len(vn_event_arrays[0, :, 0])):
pT_trig = real(vn_event_arrays[0, itrig, 0])
dN_trig = real(vn_event_arrays[:, itrig, 1])
Qn_trig_array = dN_trig*vn_event_arrays[:, itrig, iorder]
nev = len(Qn_trig_array)
denorm2_dN = dN_trig*(dN_trig - 1.)
denorm2_array = abs(Qn_trig_array)**2. - dN_trig
for iasso in range(0, itrig+1):
pT_asso = real(vn_event_arrays[0, iasso, 0])
dN_asso = real(vn_event_arrays[:, iasso, 1])
Qn_asso_array = dN_asso*vn_event_arrays[:, iasso, iorder]
num_dN = dN_trig*dN_asso
num_array = real(Qn_asso_array*conj(Qn_trig_array))
if iasso == itrig:
num_dN -= dN_asso
num_array = (real(Qn_asso_array*conj(Qn_trig_array))
- dN_asso)
denorm1_dN = dN_asso*(dN_asso - 1.)
denorm1_array = abs(Qn_asso_array)**2. - dN_asso
rn_jackknife = zeros(nev)
for iev in range(nev):
array_idx = [True]*nev
array_idx[iev] = False
array_idx = array(array_idx)
num = mean(num_array[array_idx]/num_dN[array_idx])
denorm1 = mean(denorm1_array[array_idx]
/denorm1_dN[array_idx])
denorm2 = mean(denorm2_array[array_idx]
/denorm2_dN[array_idx])
if denorm1 > 0. and denorm2 > 0.:
rn_jackknife[iev] = num/sqrt(denorm1*denorm2)
rn_mean = mean(rn_jackknife)
rn_err = sqrt((nev - 1.)/nev*sum((rn_jackknife - rn_mean)**2.))
rn_array.append([pT_trig - pT_asso, rn_mean, rn_err])
rn_arrays.append(rn_array)
rn_arrays = array(rn_arrays)
return(rn_arrays)
def calculate_symmetric_cumulant(vn_data_array):
"""
this funciton computes the symmetric cumulant
SC(m,n) = <v_m*conj(v_m)*v_n*conj(v_n)>
- <v_m*conj(v_m)>*<v_n*conj(v_n)>
we use Jackknife resampling method to estimate the statistical error
"""
vn_data_array = array(vn_data_array)
nev = len(vn_data_array[:, 0])
dN = real(vn_data_array[:, 0])
Q1 = dN*vn_data_array[:, 1]
Q2 = dN*vn_data_array[:, 2]
Q3 = dN*vn_data_array[:, 3]
Q4 = dN*vn_data_array[:, 4]
Q5 = dN*vn_data_array[:, 5]
Q6 = dN*vn_data_array[:, 6]
# two-particle correlation
N2_weight = dN*(dN - 1.)
Q2_2 = abs(Q2)**2. - dN
Q3_2 = abs(Q3)**2. - dN
Q4_2 = abs(Q4)**2. - dN
# four-particle correlation
N4_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)
Q_32 = ((abs(Q2)**2.)*(abs(Q3)**2.) - 2.*real(Q5*conj(Q2)*conj(Q3))
- 2.*real(Q3*conj(Q1)*conj(Q2)) + abs(Q5)**2. + abs(Q1)**2.
- (dN - 4.)*(abs(Q2)**2. + abs(Q3)**2.) + dN*(dN - 6.)
)
Q_42 = ((abs(Q2)**2.)*(abs(Q4)**2.) - 2.*real(Q6*conj(Q2)*conj(Q4))
- 2.*real(Q4*conj(Q2)*conj(Q2)) + abs(Q6)**2. + abs(Q2)**2.
- (dN - 4.)*(abs(Q2)**2. + abs(Q4)**2.) + dN*(dN - 6.)
)
# calcualte observables with Jackknife resampling method
SC32_array = zeros(nev)
SC42_array = zeros(nev)
for iev in range(nev):
array_idx = [True]*nev
array_idx[iev] = False
array_idx = array(array_idx)
# SC(3,2)
SC32_array[iev] = (mean(Q_32[array_idx]/N4_weight[array_idx])
- mean(Q3_2[array_idx]/N2_weight[array_idx])
*mean(Q2_2[array_idx]/N2_weight[array_idx]))
# SC(4,2)
SC42_array[iev] = (mean(Q_42[array_idx]/N4_weight[array_idx])
- mean(Q4_2[array_idx]/N2_weight[array_idx])
*mean(Q2_2[array_idx]/N2_weight[array_idx]))
SC32_mean = mean(SC32_array)
SC32_err = sqrt((nev - 1.)/nev*sum((SC32_array - SC32_mean)**2.))
SC42_mean = mean(SC42_array)
SC42_err = sqrt((nev - 1.)/nev*sum((SC42_array - SC42_mean)**2.))
results = [SC32_mean, SC32_err, SC42_mean, SC42_err]
return(results)
def calculate_vn4(vn_data_array):
"""
this funciton computes the 4 particle cumulant vn{4}
vn{4} = (2 <v_n*conj(v_n)>**2 - <(v_n*conj(v_n))**2.>)**(1/4)
"""
vn_data_array = array(vn_data_array)
nev = len(vn_data_array[:, 0])
dN = real(vn_data_array[:, 0])
Q1 = dN*vn_data_array[:, 1]
Q2 = dN*vn_data_array[:, 2]
Q3 = dN*vn_data_array[:, 3]
Q4 = dN*vn_data_array[:, 4]
Q5 = dN*vn_data_array[:, 5]
Q6 = dN*vn_data_array[:, 6]
# two-particle correlation
N2_weight = dN*(dN - 1.)
Q1_2 = abs(Q1)**2. - dN
Q2_2 = abs(Q2)**2. - dN
Q3_2 = abs(Q3)**2. - dN
# four-particle correlation
N4_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)
Q1_4 = ((abs(Q1)**4.) - 2.*real(Q2*conj(Q1)*conj(Q1))
- 4.*(dN - 2.)*(abs(Q1)**2.) + abs(Q2)**2.
+ 2*dN*(dN - 3.))
Q2_4 = ((abs(Q2)**4.) - 2.*real(Q4*conj(Q2)*conj(Q2))
- 4.*(dN - 2.)*(abs(Q2)**2.) + abs(Q4)**2.
+ 2*dN*(dN - 3.))
Q3_4 = ((abs(Q3)**4.) - 2.*real(Q6*conj(Q3)*conj(Q3))
- 4.*(dN - 2.)*(abs(Q3)**2.) + abs(Q6)**2.
+ 2*dN*(dN - 3.))
# C_n{4}
C_1_4 = mean(Q1_4/N4_weight) - 2.*((mean(Q1_2/N2_weight))**2.)
stat_err_1 = std(Q1_4/N4_weight)/sqrt(nev)
stat_err_2 = std(Q1_2/N2_weight)/sqrt(nev)
C_1_4_err = sqrt(stat_err_1**2.
+ (4.*(mean(Q1_2/N2_weight))*stat_err_2)**2.)
v1_4 = 0.0
v1_4_err = 0.0
if C_1_4 < 0:
v1_4 = (-C_1_4)**0.25
v1_4_err = 0.25*((-C_1_4)**(-0.75))*C_1_4_err
C_2_4 = mean(Q2_4/N4_weight) - 2.*((mean(Q2_2/N2_weight))**2.)
stat_err_1 = std(Q2_4/N4_weight)/sqrt(nev)
stat_err_2 = std(Q2_2/N2_weight)/sqrt(nev)
C_2_4_err = sqrt(stat_err_1**2.
+ (4.*(mean(Q1_2/N2_weight))*stat_err_2)**2.)
v2_4 = 0.0
v2_4_err = 0.0
if C_2_4 < 0:
v2_4 = (-C_2_4)**0.25
v2_4_err = 0.25*((-C_2_4)**(-0.75))*C_2_4_err
C_3_4 = mean(Q3_4/N4_weight) - 2.*((mean(Q3_2/N2_weight))**2.)
stat_err_1 = std(Q3_4/N4_weight)/sqrt(nev)
stat_err_2 = std(Q3_2/N2_weight)/sqrt(nev)
C_3_4_err = sqrt(stat_err_1**2.
+ (4.*(mean(Q3_2/N2_weight))*stat_err_2)**2.)
v3_4 = 0.0
v3_4_err = 0.0
if C_3_4 < 0:
v3_4 = (-C_3_4)**0.25
v3_4_err = 0.25*((-C_3_4)**(-0.75))*C_3_4_err
results = [v1_4, v1_4_err, C_1_4, C_1_4_err,
v2_4, v2_4_err, C_2_4, C_2_4_err,
v3_4, v3_4_err, C_3_4, C_3_4_err,]
return(results)
def calculate_vn4_over_vn2(vn_data_array):
"""
this funciton computes the ratio of
the 4-particle cumulant vn{4} over the 2-particle cumulant vn{2}
and Fn = sqrt((vn{2}^2 - vn{4}^2)/(vn{2}^2 + vn{4}^2))
vn{4} = (2 <v_n*conj(v_n)>**2 - <(v_n*conj(v_n))**2.>)**(1/4)
vn{2} = (<v_n*conj(v_n)>)**(1/2)
we will use Jackknife resampling method to estimate
the statistical error
"""
vn_data_array = array(vn_data_array)
nev = len(vn_data_array[:, 0])
dN = real(vn_data_array[:, 0])
Q1 = dN*vn_data_array[:, 1]
Q2 = dN*vn_data_array[:, 2]
Q3 = dN*vn_data_array[:, 3]
Q4 = dN*vn_data_array[:, 4]
Q5 = dN*vn_data_array[:, 5]
Q6 = dN*vn_data_array[:, 6]
# two-particle correlation
N2_weight = dN*(dN - 1.)
Q1_2 = abs(Q1)**2. - dN
Q2_2 = abs(Q2)**2. - dN
Q3_2 = abs(Q3)**2. - dN
# four-particle correlation
N4_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)
Q1_4 = ((abs(Q1)**4.) - 2.*real(Q2*conj(Q1)*conj(Q1))
- 4.*(dN - 2.)*(abs(Q1)**2.) + abs(Q2)**2.
+ 2*dN*(dN - 3.))
Q2_4 = ((abs(Q2)**4.) - 2.*real(Q4*conj(Q2)*conj(Q2))
- 4.*(dN - 2.)*(abs(Q2)**2.) + abs(Q4)**2.
+ 2*dN*(dN - 3.))
Q3_4 = ((abs(Q3)**4.) - 2.*real(Q6*conj(Q3)*conj(Q3))
- 4.*(dN - 2.)*(abs(Q3)**2.) + abs(Q6)**2.
+ 2*dN*(dN - 3.))
# calcualte observables with Jackknife resampling method
r1_array = zeros(nev)
r2_array = zeros(nev)
r3_array = zeros(nev)
F1_array = zeros(nev)
F2_array = zeros(nev)
F3_array = zeros(nev)
for iev in range(nev):
array_idx = [True]*nev
array_idx[iev] = False
array_idx = array(array_idx)
# C_n{4}
C_1_4 = (mean(Q1_4[array_idx]/N4_weight[array_idx])
- 2.*((mean(Q1_2[array_idx]/N2_weight[array_idx]))**2.))
C_1_2 = mean(Q1_2[array_idx]/N2_weight[array_idx])
if C_1_4 < 0. and C_1_2 > 0.:
v1_4 = (-C_1_4)**0.25
v1_2 = sqrt(C_1_2)
r1_array[iev] = v1_4/(v1_2 + 1e-15)
F1_array[iev] = sqrt((v1_2**2. - v1_4**2.)
/(v1_2**2. + v1_4**2. + 1e-15))
C_2_4 = (mean(Q2_4[array_idx]/N4_weight[array_idx])
- 2.*((mean(Q2_2[array_idx]/N2_weight[array_idx]))**2.))
C_2_2 = mean(Q2_2[array_idx]/N2_weight[array_idx])
if C_2_4 < 0. and C_2_2 > 0.:
v2_4 = (-C_2_4)**0.25
v2_2 = sqrt(C_2_2)
r2_array[iev] = v2_4/v2_2
F2_array[iev] = sqrt((v2_2**2. - v2_4**2.)
/(v2_2**2. + v2_4**2. + 1e-15))
C_3_4 = (mean(Q3_4[array_idx]/N4_weight[array_idx])
- 2.*((mean(Q3_2[array_idx]/N2_weight[array_idx]))**2.))
C_3_2 = mean(Q3_2[array_idx]/N2_weight[array_idx])
if C_3_4 < 0. and C_3_2 > 0.:
v3_4 = (-C_3_4)**0.25
v3_2 = sqrt(C_3_2)
r3_array[iev] = v3_4/v3_2
F3_array[iev] = sqrt((v3_2**2. - v3_4**2.)
/(v3_2**2. + v3_4**2. + 1e-15))
r1_mean = mean(r1_array)
r1_err = sqrt((nev - 1.)/nev*sum((r1_array - r1_mean)**2.))
r2_mean = mean(r2_array)
r2_err = sqrt((nev - 1.)/nev*sum((r2_array - r2_mean)**2.))
r3_mean = mean(r3_array)
r3_err = sqrt((nev - 1.)/nev*sum((r3_array - r3_mean)**2.))
F1_mean = mean(F1_array)
F1_err = sqrt((nev - 1.)/nev*sum((F1_array - F1_mean)**2.))
F2_mean = mean(F2_array)
F2_err = sqrt((nev - 1.)/nev*sum((F2_array - F2_mean)**2.))
F3_mean = mean(F3_array)
F3_err = sqrt((nev - 1.)/nev*sum((F3_array - F3_mean)**2.))
results = [r1_mean, r1_err, F1_mean, F1_err,
r2_mean, r2_err, F2_mean, F2_err,
r3_mean, r3_err, F3_mean, F3_err]
return(results)
def calculate_vn6_over_vn4(vn_data_array):
"""
this funciton computes the ratio of
the 6-particle cumulant vn{6} over the 4-particle cumulant vn{4}
cn{6} = <<6>> - 9<<2>><<4>> + 12<<2>>^3
vn{6} = (cn{6}/4)**(1/6)
vn{4} = (2 <v_n*conj(v_n)>**2 - <(v_n*conj(v_n))**2.>)**(1/4)
and compute skewness estimator gamma_1
gamma_1 = -6\sqrt{2}*vn{4}^2*(vn{4} - vn{6})
/(vn{2}^2 - vn{4}^2)^(3/2)
we will use Jackknife resampling method to estimate
the statistical error
"""
vn_data_array = array(vn_data_array)
nev = len(vn_data_array[:, 0])
dN = real(vn_data_array[:, 0])
Q1 = dN*vn_data_array[:, 1]
Q2 = dN*vn_data_array[:, 2]
Q3 = dN*vn_data_array[:, 3]
Q4 = dN*vn_data_array[:, 4]
Q5 = dN*vn_data_array[:, 5]
Q6 = dN*vn_data_array[:, 6]
# two-particle correlation
N2_weight = dN*(dN - 1.)
Q2_2 = abs(Q2)**2. - dN
# four-particle correlation
N4_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)
Q2_4 = ((abs(Q2)**4.) - 2.*real(Q4*conj(Q2)*conj(Q2))
- 4.*(dN - 2.)*(abs(Q2)**2.) + abs(Q4)**2.
+ 2*dN*(dN - 3.))
# six-particle correlation
N6_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)*(dN - 4.)*(dN - 5.)
Q2_6 = (abs(Q2)**6. + 9*(abs(Q4)**2.)*(abs(Q2)**2.)
- 6.*real(Q4*Q2*conj(Q2)*conj(Q2)*conj(Q2))
+ 4.*real(Q6*conj(Q2)*conj(Q2)*conj(Q2))
- 12.*real(Q6*conj(Q4)*conj(Q2))
+ 18.*(dN - 4.)*real(Q4*conj(Q2)*conj(Q2))
+ 4.*(abs(Q6)**2.)
- 9.*(dN - 4.)*((abs(Q2)**4.) + (abs(Q4)**2.))
+ 18.*(dN - 5.)*(dN - 2.)*(abs(Q2)**2.)
- 6.*dN*(dN - 4.)*(dN - 5.))
# calcualte observables with Jackknife resampling method
r2_array = zeros(nev)
gamma1_array = zeros(nev)
for iev in range(nev):
array_idx = [True]*nev
array_idx[iev] = False
array_idx = array(array_idx)
# C_n{4}
C_2_2 = mean(Q2_2[array_idx]/N2_weight[array_idx])
C_2_4 = (mean(Q2_4[array_idx]/N4_weight[array_idx])
- 2.*(C_2_2**2.))
C_2_6 = (mean(Q2_6[array_idx]/N6_weight[array_idx])
- 9.*C_2_2*mean(Q2_4[array_idx]/N4_weight[array_idx])
+ 12.*(C_2_2**3.))
if C_2_6 > 0. and C_2_4 < 0. and C_2_2 > 0.:
v2_2 = sqrt(C_2_2)
v2_6 = (C_2_6/4.)**(1./6.)
v2_4 = (-C_2_4)**(1./4.)
r2_array[iev] = v2_6/v2_4
gamma1_array[iev] = (-6.*sqrt(2)*(v2_4**2.)*(v2_4 - v2_6)
/(v2_2**2. - v2_4**2.)**(1.5))
r2_mean = mean(r2_array)
r2_err = sqrt((nev - 1.)/nev*sum((r2_array - r2_mean)**2.))
gamma1_mean = mean(gamma1_array)
gamma1_err = sqrt((nev - 1.)/nev*sum((gamma1_array - gamma1_mean)**2.))
results = [r2_mean, r2_err, gamma1_mean, gamma1_err]
return(results)
def calculate_vn_eta(dN_array, vn_array):
nev, neta = dN_array.shape
dN_array = dN_array.reshape((nev, 1, neta))
vn_ref = sum(dN_array*vn_array, axis=2)/(sum(dN_array, axis=2) + 1e-15)
vnshape = vn_ref.shape
nvn = vnshape[1]
vn_ref = vn_ref.reshape((vnshape[0], vnshape[1], 1))
vn_SP_ev = real(vn_array*conj(vn_ref))
vn_SP_array = zeros([nev, nvn, neta])
for iev in range(nev):
array_idx = [True]*nev
array_idx[iev] = False
array_idx = array(array_idx)
vn_den = mean((absolute(vn_ref[array_idx, :, :]))**2., axis=0)
vn_SP = mean(vn_SP_ev[array_idx, :, :], axis=0)/sqrt(vn_den)
vn_SP_array[iev, :, :] = vn_SP
vn_SP_mean = mean(vn_SP_array, axis=0)
vn_SP_err = sqrt((nev - 1.)/nev*sum((vn_SP_array - vn_SP_mean)**2., axis=0))
return([vn_SP_mean, vn_SP_err])
def calculate_rn_eta(eta_array, dN_array, vn_array):
nev, neta = dN_array.shape
dN_array = dN_array.reshape((nev, 1, neta))
Qn_array = vn_array
Qnshape = Qn_array.shape
nQn = Qnshape[1]
# calculate the reference flow vector for every event
eta_b_min = 2.5
eta_b_max = 4.0
eta_ref1_tmp = linspace(eta_b_min, eta_b_max, 16)
eta_ref2_tmp = linspace(-eta_b_max, -eta_b_min, 16)
Qn_ref1 = []
Qn_ref2 = []
for iev in range(nev):
dN1_interp = interp(eta_ref1_tmp, eta_array, dN_array[iev, 0, :])
dN2_interp = interp(eta_ref2_tmp, eta_array, dN_array[iev, 0, :])
Qn_ref1_vec = []
Qn_ref2_vec = []
for iorder in range(nQn):
Qn1_interp = interp(eta_ref1_tmp, eta_array, Qn_array[iev, iorder, :])
Qn2_interp = interp(eta_ref2_tmp, eta_array, Qn_array[iev, iorder, :])
Qn_ref1_vec.append(sum(dN1_interp*Qn1_interp)/(sum(dN1_interp) + 1e-15))
Qn_ref2_vec.append(sum(dN2_interp*Qn2_interp)/(sum(dN2_interp) + 1e-15))
Qn_ref1.append(Qn_ref1_vec)
Qn_ref2.append(Qn_ref2_vec)
Qn_ref1 = array(Qn_ref1).reshape((nev, nQn, 1))
Qn_ref2 = array(Qn_ref2).reshape((nev, nQn, 1))
rn_num = real(Qn_array[:, :, ::-1]*conj(Qn_ref1))
rn_den = real(Qn_array*conj(Qn_ref1))
rnn_num = real((Qn_ref2*conj(Qn_array))
*(Qn_array[:, :, ::-1]*conj(Qn_ref1)))
rnn_den = real((Qn_ref2*conj(Qn_array[:, :, ::-1]))
*(Qn_array*conj(Qn_ref1)))
rn_array = zeros([nev, nQn, neta])
rnn_array = zeros([nev, nQn, neta])
for iev in range(nev):
array_idx = [True]*nev
array_idx[iev] = False
array_idx = array(array_idx)
rn_ev = (mean(rn_num[array_idx], axis=0)
/(mean(rn_den[array_idx], axis=0) + 1e-15))
rnn_ev = (mean(rnn_num[array_idx], axis=0)
/(mean(rnn_den[array_idx], axis=0) + 1e-15))
rn_array[iev, :, :] = rn_ev
rnn_array[iev, :, :] = rnn_ev
rn_mean = mean(rn_array, axis=0)
rn_err = sqrt((nev - 1.)/nev*sum((rn_array - rn_mean)**2., axis=0))
rnn_mean = mean(rnn_array, axis=0)
rnn_err = sqrt((nev - 1.)/nev*sum((rnn_array - rnn_mean)**2., axis=0))
return([rn_mean, rn_err, rnn_mean, rnn_err])
file_folder_list = glob(path.join(working_folder, '*'))
nev = len(file_folder_list)
for ipart, particle_id in enumerate(particle_list):
print("processing %s ..." % particle_name_list[ipart])
# first particle yield dN/dy
if particle_id == '9999':
file_name = 'particle_%s_vndata_eta_-0.5_0.5.dat' % particle_id
else:
file_name = 'particle_%s_vndata_y_-0.5_0.5.dat' % particle_id
dN_dy = []
for ifolder in range(nev):
results_folder = path.abspath(file_folder_list[ifolder])
temp_data = loadtxt(path.join(results_folder, file_name))
temp_data = nan_to_num(temp_data)
dN_dy.append(temp_data[0, 1])
dN_dy = array(dN_dy)
dN_dy_avg = mean(dN_dy)
dN_dy_avg_err = std(dN_dy)/sqrt(nev)
# then <pT>, vn, dN/(2pi dy pT dpT), vn{SP}(pT)
if particle_id == '9999':
file_name = 'particle_%s_vndata_diff_eta_-0.5_0.5.dat' % particle_id
else:
file_name = 'particle_%s_vndata_diff_y_-0.5_0.5.dat' % particle_id
file_name_ref = 'particle_9999_vndata_diff_eta_0.5_2.5.dat'
pT_array = []
dN_array = []
vn_phenix_array = []
vn_star_array = []
vn_alice_array = []
vn_cms_array = []
vn_cms_arrays_for_rn = []
vn_atlas_array = []
vn_diff_phenix_real = []; vn_diff_phenix_imag = [];
vn_diff_phenix_denorm1 = []; vn_diff_phenix_denorm2 = []
vn_diff_star_real = []; vn_diff_star_imag = [];
vn_diff_star_denorm1 = []; vn_diff_star_denorm2 = []
vn_diff_alice_real = []; vn_diff_alice_imag = [];
vn_diff_alice_denorm1 = []; vn_diff_alice_denorm2 = []
vn_diff_2PC_real = []; vn_diff_2PC_imag = []; vn_diff_2PC_denorm = []
vn_diff_cms_real = []; vn_diff_cms_imag = [];
vn_diff_cms_denorm1 = []; vn_diff_cms_denorm2 = []
vn_diff_atlas_real = []; vn_diff_atlas_imag = [];
vn_diff_atlas_denorm1 = []; vn_diff_atlas_denorm2 = []
for ifolder in range(nev):
results_folder = path.abspath(file_folder_list[ifolder])
temp_data = loadtxt(path.join(results_folder, file_name))
temp_data = nan_to_num(temp_data)
temp_data_ref = loadtxt(path.join(results_folder, file_name_ref))
temp_data_ref = nan_to_num(temp_data_ref)
dN_event = temp_data[:, 2] # dN/(2pi dy pT dpT)
pT_event = temp_data[:, 0]
# record particle spectra
pT_array.append(pT_event)
dN_array.append(dN_event)
# pT-integrated vn
# vn with PHENIX pT cut
temp_vn_array = calcualte_inte_vn(0.2, 2.0, temp_data)
vn_phenix_array.append(temp_vn_array)
# vn with STAR pT cut
temp_vn_array = calcualte_inte_vn(0.15, 2.0, temp_data)
vn_star_array.append(temp_vn_array)
# vn with ALICE pT cut
temp_vn_array = calcualte_inte_vn(0.2, 3.0, temp_data)
vn_alice_array.append(temp_vn_array)
# vn with CMS pT cut
temp_vn_array = calcualte_inte_vn(0.3, 3.0, temp_data)
vn_cms_array.append(temp_vn_array)
if particle_id == "9999":
temp_vn_arrays = (
calculate_vn_arrays_for_rn_ratios(temp_data))
vn_cms_arrays_for_rn.append(temp_vn_arrays)
# vn with ATLAS pT cut
temp_vn_array = calcualte_inte_vn(0.5, 3.0, temp_data)
vn_atlas_array.append(temp_vn_array)
# pT-differential vn using scalar-product method
# vn{SP}(pT) with PHENIX pT cut
temp_vn_diff_real, temp_vn_diff_imag, temp_dn_diff, temp_vn2 = (
calculate_diff_vn_single_event(0.2, 2.0, temp_data,
temp_data_ref))
vn_diff_phenix_real.append(temp_vn_diff_real);
vn_diff_phenix_imag.append(temp_vn_diff_imag);
vn_diff_phenix_denorm1.append(temp_dn_diff);
vn_diff_phenix_denorm2.append(temp_vn2);
# vn{SP}(pT) with STAR pT cut
temp_vn_diff_real, temp_vn_diff_imag, temp_dn_diff, temp_vn2 = (
calculate_diff_vn_single_event(0.15, 2.0, temp_data,
temp_data_ref))
vn_diff_star_real.append(temp_vn_diff_real);
vn_diff_star_imag.append(temp_vn_diff_imag);
vn_diff_star_denorm1.append(temp_dn_diff);
vn_diff_star_denorm2.append(temp_vn2);
# vn{SP}(pT) with ALICE pT cut
temp_vn_diff_real, temp_vn_diff_imag, temp_dn_diff, temp_vn2 = (
calculate_diff_vn_single_event(0.2, 3.0, temp_data,
temp_data_ref))
vn_diff_alice_real.append(temp_vn_diff_real);
vn_diff_alice_imag.append(temp_vn_diff_imag);
vn_diff_alice_denorm1.append(temp_dn_diff);
vn_diff_alice_denorm2.append(temp_vn2);
# vn{SP}(pT) with CMS pT cut
temp_vn_diff_real, temp_vn_diff_imag, temp_dn_diff, temp_vn2 = (
calculate_diff_vn_single_event(0.3, 3.0, temp_data,
temp_data_ref))
vn_diff_cms_real.append(temp_vn_diff_real);
vn_diff_cms_imag.append(temp_vn_diff_imag);
vn_diff_cms_denorm1.append(temp_dn_diff);
vn_diff_cms_denorm2.append(temp_vn2);
# vn{SP}(pT) with ATLAS pT cut
temp_vn_diff_real, temp_vn_diff_imag, temp_dn_diff, temp_vn2 = (
calculate_diff_vn_single_event(0.5, 3.0, temp_data,
temp_data_ref))
vn_diff_atlas_real.append(temp_vn_diff_real);
vn_diff_atlas_imag.append(temp_vn_diff_imag);
vn_diff_atlas_denorm1.append(temp_dn_diff);
vn_diff_atlas_denorm2.append(temp_vn2);
# pT-differential vn using 2PC method
# vn[2](pT)
temp_vn_diff_real, temp_vn_diff_imag, temp_dn_diff = (
get_vn_diff_2PC_from_single_event(temp_data))
vn_diff_2PC_real.append(temp_vn_diff_real)
vn_diff_2PC_imag.append(temp_vn_diff_imag)
vn_diff_2PC_denorm.append(temp_dn_diff)
# now we perform event average
dN_array = array(dN_array)
pT_array = array(pT_array)
n_pT = len(pT_array[0, :])
pT_spectra = zeros([n_pT])
for ipT in range(len(pT_array[0, :])):
dN_temp = sum(dN_array[:, ipT]*pT_array[:, ipT])
if(dN_temp > 0):
pT_spectra[ipT] = (
sum(pT_array[:, ipT]**2.*dN_array[:, ipT])/dN_temp)
else:
pT_spectra[ipT] = mean(pT_array[:, ipT])
dN_spectra = mean(pT_array*dN_array, 0)/pT_spectra # dN/(2pi dy pT dpT)
dN_spectra_err = std(pT_array*dN_array, 0)/pT_spectra/sqrt(nev)
# calculate mean pT
pT_interp = linspace(0.05, 2.95, 30)
dN_interp = exp(interp(pT_interp, pT_spectra, log(dN_spectra+1e-30)))
dN_interp_err = interp(pT_interp, pT_spectra, dN_spectra_err)
mean_pT = sum(pT_interp**2.*dN_interp)/sum(pT_interp*dN_interp)
mean_pT_upper = (sum(pT_interp**2.*(dN_interp+dN_interp_err))
/sum(pT_interp*(dN_interp+dN_interp_err)))
mean_pT_lower = (sum(pT_interp**2.*(dN_interp-dN_interp_err))
/sum(pT_interp*(dN_interp-dN_interp_err)))
mean_pT_err = max(abs(mean_pT_upper - mean_pT),
abs(mean_pT - mean_pT_lower))
pT_interp = linspace(0.15, 2.95, 30)
dN_interp = exp(interp(pT_interp, pT_spectra, log(dN_spectra+1e-30)))
dN_interp_err = interp(pT_interp, pT_spectra, dN_spectra_err)
mean_pT_1 = sum(pT_interp**2.*dN_interp)/sum(pT_interp*dN_interp)
mean_pT_1_upper = (sum(pT_interp**2.*(dN_interp+dN_interp_err))
/sum(pT_interp*(dN_interp+dN_interp_err)))
mean_pT_1_lower = (sum(pT_interp**2.*(dN_interp-dN_interp_err))
/sum(pT_interp*(dN_interp-dN_interp_err)))
mean_pT_1_err = max(abs(mean_pT_1_upper - mean_pT_1),
abs(mean_pT_1 - mean_pT_1_lower))
# calcualte vn{2}
vn_phenix_2, vn_phenix_2_err = calcualte_vn_2(vn_phenix_array)
vn_star_2, vn_star_2_err = calcualte_vn_2(vn_star_array)
vn_alice_2, vn_alice_2_err = calcualte_vn_2(vn_alice_array)
vn_cms_2, vn_cms_2_err = calcualte_vn_2(vn_cms_array)
vn_atlas_2, vn_atlas_2_err = calcualte_vn_2(vn_atlas_array)
if (particle_id == '9999'):
vn_alice_array2 = array(vn_alice_array)
vn_cms_array2 = array(vn_cms_array)
vn_atlas_array2 = array(vn_atlas_array)
# calculate non-linear response coefficents with ALICE pT cut
nonlinear_response_alice = calculate_nonlinear_reponse(vn_alice_array2)
# calculate non-linear response coefficents with CMS pT cut
nonlinear_response_cms = calculate_nonlinear_reponse(vn_cms_array2)
# calculate non-linear response coefficents with ATLAS pT cut
nonlinear_response_atlas = calculate_nonlinear_reponse(vn_atlas_array2)
# calculate symmetric cumulant coefficents with ALICE pT cut
SC_alice = calculate_symmetric_cumulant(vn_alice_array)
# calculate vn{4}
vn4_alice = calculate_vn4(vn_alice_array)
vn4_cms = calculate_vn4(vn_cms_array)
vn4_atlas = calculate_vn4(vn_atlas_array)
# calculate vn{4}/vn{2} and vn{6}/vn{4}
vn4_over_vn2_alice = calculate_vn4_over_vn2(vn_alice_array)
vn4_over_vn2_cms = calculate_vn4_over_vn2(vn_cms_array)
vn4_over_vn2_atlas = calculate_vn4_over_vn2(vn_atlas_array)
vn6_over_vn4_alice = calculate_vn6_over_vn4(vn_alice_array)
vn6_over_vn4_cms = calculate_vn6_over_vn4(vn_cms_array)
vn6_over_vn4_atlas = calculate_vn6_over_vn4(vn_atlas_array)
# calculate vn distribution for charged hadrons
vn_phenix_dis = calculate_vn_distribution(vn_phenix_array)
vn_star_dis = calculate_vn_distribution(vn_star_array)
vn_alice_dis = calculate_vn_distribution(vn_alice_array)
vn_cms_dis = calculate_vn_distribution(vn_cms_array)
vn_atlas_dis = calculate_vn_distribution(vn_atlas_array)
# calculate rn ratios
rn_cms = calculate_rn_ratios(vn_cms_arrays_for_rn)
# calculate flow event-plane correlation
vn_corr_alice, vn_corr_alice_err = (
calcualte_event_plane_correlations(vn_alice_array))
vn_corr_atlas, vn_corr_atlas_err = (
calcualte_event_plane_correlations(vn_atlas_array))
# calcualte vn{SP}(pT)
vn_diff_SP_phenix, vn_diff_SP_phenix_err = calculate_vn_diff_SP(
vn_diff_phenix_real, vn_diff_phenix_imag,
vn_diff_phenix_denorm1, vn_diff_phenix_denorm2)
vn_diff_SP_star, vn_diff_SP_star_err = calculate_vn_diff_SP(
vn_diff_star_real, vn_diff_star_imag,
vn_diff_star_denorm1, vn_diff_star_denorm2)
vn_diff_SP_alice, vn_diff_SP_alice_err = calculate_vn_diff_SP(
vn_diff_alice_real, vn_diff_alice_imag,
vn_diff_alice_denorm1, vn_diff_alice_denorm2)
vn_diff_SP_cms, vn_diff_SP_cms_err = calculate_vn_diff_SP(
vn_diff_cms_real, vn_diff_cms_imag,
vn_diff_cms_denorm1, vn_diff_cms_denorm2)
vn_diff_SP_atlas, vn_diff_SP_atlas_err = calculate_vn_diff_SP(
vn_diff_atlas_real, vn_diff_atlas_imag,
vn_diff_atlas_denorm1, vn_diff_atlas_denorm2)
# calcualte vn[2](pT)
vn_diff_2PC, vn_diff_2PC_err = calculate_vn_diff_2PC(
vn_diff_2PC_real, vn_diff_2PC_imag, vn_diff_2PC_denorm)
# then particle rapidity distribution
if particle_id == '9999':
file_name = 'particle_%s_dNdeta_pT_0.2_3.dat' % particle_id
else:
file_name = 'particle_%s_dNdy_pT_0.2_3.dat' % particle_id
eta_array = []
dN_array = []
vn_array = []
for ifolder in range(nev):
results_folder = path.abspath(file_folder_list[ifolder])
temp_data = loadtxt(path.join(results_folder, file_name))
temp_data = nan_to_num(temp_data)
eta_array.append(temp_data[:, 0])
dN_array.append(temp_data[:, 1])
temp_vn_array = []
for iorder in range(1, n_order):
vn_real = temp_data[:, 6*iorder-3]
vn_imag = temp_data[:, 6*iorder-1]
vn = vn_real + 1j*vn_imag
temp_vn_array.append(vn)
vn_array.append(temp_vn_array)
eta_array = array(eta_array)
dN_array = array(dN_array)
vn_array = array(vn_array)
eta_point = mean(eta_array, 0)
dNdeta = mean(dN_array, 0)
dNdeta_err = std(dN_array, 0)/sqrt(nev)
vn_SP_eta, vn_SP_eta_err = calculate_vn_eta(dN_array, vn_array)
rn_eta, rn_eta_err, rnn_eta, rnn_eta_err = calculate_rn_eta(eta_point, dN_array, vn_array)
vn_eta_real = mean(real(vn_array), 0)
vn_eta_real_err = std(real(vn_array), 0)/sqrt(nev)
###########################################################################
# finally, output all the results
###########################################################################
if (particle_id =='9999'):
# output non-linear response coefficients chi_n for CMS pt cut
output_filename = ("non_linear_response_coefficients_CMS.dat")
f = open(output_filename, 'w')
f.write("# type value stat. err\n")
for i in range(len(nonlinear_reponse_correlator_name_list)):
f.write("%s %.10e %.10e\n"
% (nonlinear_reponse_correlator_name_list[i],
nonlinear_response_cms[2*i],
nonlinear_response_cms[2*i+1]))
f.close()
shutil.move(output_filename, avg_folder)
# output non-linear response coefficients chi_n for ALICE pt cut
output_filename = ("non_linear_response_coefficients_ALICE.dat")
f = open(output_filename, 'w')
f.write("# type value stat. err\n")
for i in range(len(nonlinear_reponse_correlator_name_list)):
f.write("%s %.10e %.10e\n"
% (nonlinear_reponse_correlator_name_list[i],
nonlinear_response_alice[2*i],
nonlinear_response_alice[2*i+1]))
f.close()
shutil.move(output_filename, avg_folder)
# output non-linear response coefficients chi_n for ATLAS pt cut
output_filename = ("non_linear_response_coefficients_ATLAS.dat")
f = open(output_filename, 'w')
f.write("# type value stat. err\n")
for i in range(len(nonlinear_reponse_correlator_name_list)):
f.write("%s %.10e %.10e\n"
% (nonlinear_reponse_correlator_name_list[i],
nonlinear_response_atlas[2*i],
nonlinear_response_atlas[2*i+1]))
f.close()
shutil.move(output_filename, avg_folder)
# output symmetric cumulants for ALICE pt cut
output_filename = ("symmetric_cumulant_ALICE.dat")
f = open(output_filename, 'w')
f.write("# type value stat. err\n")
for i in range(len(symmetric_cumulant_name_list)):
f.write("%s %.10e %.10e\n"
% (symmetric_cumulant_name_list[i],
SC_alice[2*i], SC_alice[2*i+1]))
f.close()
shutil.move(output_filename, avg_folder)
# output vn4 for ALICE pt cut
output_filename = ("charged_hadron_vn4_ALICE.dat")
f = open(output_filename, 'w')
f.write("# n vn{4} vn{4}_err Cn{4} Cn{4}_err\n")
for i in range(1, 4):
f.write("%d %.10e %.10e %.10e %.10e\n"
% (i, vn4_alice[4*i-4], vn4_alice[4*i-3],
vn4_alice[4*i-2], vn4_alice[4*i-1]))
f.close()
shutil.move(output_filename, avg_folder)
# output vn4 for CMS pt cut
output_filename = ("charged_hadron_vn4_CMS.dat")
f = open(output_filename, 'w')
f.write("# n vn{4} vn{4}_err Cn{4} Cn{4}_err\n")
for i in range(1, 4):
f.write("%d %.10e %.10e %.10e %.10e\n"
% (i, vn4_cms[4*i-4], vn4_cms[4*i-3],
vn4_cms[4*i-2], vn4_cms[4*i-1]))
f.close()
shutil.move(output_filename, avg_folder)
# output vn4 for ATLAS pt cut
output_filename = ("charged_hadron_vn4_ATLAS.dat")
f = open(output_filename, 'w')
f.write("# n vn{4} vn{4}_err Cn{4} Cn{4}_err\n")
for i in range(1, 4):
f.write("%d %.10e %.10e %.10e %.10e\n"
% (i, vn4_atlas[4*i-4], vn4_atlas[4*i-3],
vn4_atlas[4*i-2], vn4_atlas[4*i-1]))
f.close()
shutil.move(output_filename, avg_folder)
# output vn4/vn2 ratio for ALICE pt cut
output_filename = ("charged_hadron_vn4_over_vn2_ALICE.dat")
f = open(output_filename, 'w')
f.write("# n vn{4}/vn{2} (vn{4}/vn{2})_err Fn Fn_err \n")
f.write("# Fn = sqrt((vn{2}^2 - vn{4}^2)/(vn{2}^2 + vn{4}^2)) \n")
for i in range(1, 4):
f.write("%d %.10e %.10e %.10e %.10e\n"
% (i, vn4_over_vn2_alice[4*i-4], vn4_over_vn2_alice[4*i-3],
vn4_over_vn2_alice[4*i-2], vn4_over_vn2_alice[4*i-1]))
f.close()
shutil.move(output_filename, avg_folder)
# output vn4/vn2 ratio for CMS pt cut
output_filename = ("charged_hadron_vn4_over_vn2_CMS.dat")
f = open(output_filename, 'w')
f.write("# n vn{4}/vn{2} (vn{4}/vn{2})_err Fn Fn_err \n")
f.write("# Fn = sqrt((vn{2}^2 - vn{4}^2)/(vn{2}^2 + vn{4}^2)) \n")
for i in range(1, 4):
f.write("%d %.10e %.10e %.10e %.10e\n"
% (i, vn4_over_vn2_cms[4*i-4], vn4_over_vn2_cms[4*i-3],
vn4_over_vn2_cms[4*i-2], vn4_over_vn2_cms[4*i-1]))
f.close()
shutil.move(output_filename, avg_folder)
# output vn4/vn2 ratio for ATLAS pt cut
output_filename = ("charged_hadron_vn4_over_vn2_ATLAS.dat")
f = open(output_filename, 'w')
f.write("# n vn{4}/vn{2} (vn{4}/vn{2})_err Fn Fn_err \n")
f.write("# Fn = sqrt((vn{2}^2 - vn{4}^2)/(vn{2}^2 + vn{4}^2)) \n")
for i in range(1, 4):
f.write("%d %.10e %.10e %.10e %.10e\n"
% (i, vn4_over_vn2_atlas[4*i-4], vn4_over_vn2_atlas[4*i-3],
vn4_over_vn2_atlas[4*i-2], vn4_over_vn2_atlas[4*i-1]))
f.close()
shutil.move(output_filename, avg_folder)
# output vn6/vn4 ratio for ALICE pt cut
output_filename = ("charged_hadron_vn6_over_vn4_ALICE.dat")
f = open(output_filename, 'w')
f.write("# n vn{6}/vn{4} (vn{6}/vn{4})_err gamma_1 gamma_1_err \n")
f.write("%d %.10e %.10e %.10e %.10e\n"
% (2, vn6_over_vn4_alice[0], vn6_over_vn4_alice[1],
vn6_over_vn4_alice[2], vn6_over_vn4_alice[3]))
f.close()
shutil.move(output_filename, avg_folder)
# output vn6/vn4 ratio for CMS pt cut
output_filename = ("charged_hadron_vn6_over_vn4_CMS.dat")
f = open(output_filename, 'w')
f.write("# n vn{6}/vn{4} (vn{6}/vn{4})_err gamma_1 gamma_1_err \n")
f.write("%d %.10e %.10e %.10e %.10e\n"
% (2, vn6_over_vn4_cms[0], vn6_over_vn4_cms[1],
vn6_over_vn4_cms[2], vn6_over_vn4_cms[3]))
f.close()
shutil.move(output_filename, avg_folder)
# output vn6/vn4 ratio for ATLAS pt cut
output_filename = ("charged_hadron_vn6_over_vn4_ATLAS.dat")
f = open(output_filename, 'w')
f.write("# n vn{6}/vn{4} (vn{6}/vn{4})_err gamma_1 gamma_1_err \n")
f.write("%d %.10e %.10e %.10e %.10e\n"
% (2, vn6_over_vn4_atlas[0], vn6_over_vn4_atlas[1],
vn6_over_vn4_atlas[2], vn6_over_vn4_atlas[3]))
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_integrated_observables.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("dN/dy= %.10e +/- %.10e\n" % (dN_dy_avg, dN_dy_avg_err))
f.write("<pT>= %.10e +/- %.10e\n" % (mean_pT, mean_pT_err))
f.write("<pT(>0.15)>= %.10e +/- %.10e\n" % (mean_pT_1, mean_pT_1_err))
for iorder in range(1, n_order):
f.write("v_%d{2}(phenix)= %.10e +/- %.10e\n"
% (iorder, vn_phenix_2[iorder-1], vn_phenix_2_err[iorder-1]))
f.write("v_%d{2}(STAR)= %.10e +/- %.10e\n"
% (iorder, vn_star_2[iorder-1], vn_star_2_err[iorder-1]))
f.write("v_%d{2}(ALICE)= %.10e +/- %.10e\n"
% (iorder, vn_alice_2[iorder-1], vn_alice_2_err[iorder-1]))
f.write("v_%d{2}(CMS)= %.10e +/- %.10e\n"
% (iorder, vn_cms_2[iorder-1], vn_cms_2_err[iorder-1]))
f.write("v_%d{2}(ATLAS)= %.10e +/- %.10e\n"
% (iorder, vn_atlas_2[iorder-1], vn_atlas_2_err[iorder-1]))
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_differential_observables_PHENIX.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("#pT dN/(2pi dy pT dpT) dN/(2pi dy pT dpT)_err "
"vn{SP} vn{SP}_err\n")
for ipT in range(len(pT_spectra)):
f.write("%.10e %.10e %.10e "
% (pT_spectra[ipT], dN_spectra[ipT], dN_spectra_err[ipT]))
for iorder in range(1, n_order):
f.write("%.10e %.10e " % (vn_diff_SP_phenix[iorder-1, ipT],
vn_diff_SP_phenix_err[iorder-1, ipT]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_differential_observables_STAR.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("#pT dN/(2pi dy pT dpT) dN/(2pi dy pT dpT)_err "
"vn{SP} vn{SP}_err\n")
for ipT in range(len(pT_spectra)):
f.write("%.10e %.10e %.10e "
% (pT_spectra[ipT], dN_spectra[ipT], dN_spectra_err[ipT]))
for iorder in range(1, n_order):
f.write("%.10e %.10e " % (vn_diff_SP_star[iorder-1, ipT],
vn_diff_SP_star_err[iorder-1, ipT]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_differential_observables_ALICE.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("#pT dN/(2pi dy pT dpT) dN/(2pi dy pT dpT)_err "
"vn{SP} vn{SP}_err\n")
for ipT in range(len(pT_spectra)):
f.write("%.10e %.10e %.10e "
% (pT_spectra[ipT], dN_spectra[ipT], dN_spectra_err[ipT]))
for iorder in range(1, n_order):
f.write("%.10e %.10e " % (vn_diff_SP_alice[iorder-1, ipT],
vn_diff_SP_alice_err[iorder-1, ipT]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_differential_observables_2PC.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("#pT dN/(2pi dy pT dpT) dN/(2pi dy pT dpT)_err "
"vn[2] vn[2]_err\n")
for ipT in range(len(pT_spectra)):
f.write("%.10e %.10e %.10e "
% (pT_spectra[ipT], dN_spectra[ipT], dN_spectra_err[ipT]))
for iorder in range(1, n_order):
f.write("%.10e %.10e " % (vn_diff_2PC[iorder-1, ipT],
vn_diff_2PC_err[iorder-1, ipT]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_differential_observables_CMS.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("#pT dN/(2pi dy pT dpT) dN/(2pi dy pT dpT)_err "
"vn{SP} vn{SP}_err\n")
for ipT in range(len(pT_spectra)):
f.write("%.10e %.10e %.10e "
% (pT_spectra[ipT], dN_spectra[ipT], dN_spectra_err[ipT]))
for iorder in range(1, n_order):
f.write("%.10e %.10e " % (vn_diff_SP_cms[iorder-1, ipT],
vn_diff_SP_cms_err[iorder-1, ipT]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_differential_observables_ATLAS.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("#pT dN/(2pi dy pT dpT) dN/(2pi dy pT dpT)_err "
"vn{SP} vn{SP}_err\n")
for ipT in range(len(pT_spectra)):
f.write("%.10e %.10e %.10e "
% (pT_spectra[ipT], dN_spectra[ipT], dN_spectra_err[ipT]))
for iorder in range(1, n_order):
f.write("%.10e %.10e " % (vn_diff_SP_atlas[iorder-1, ipT],
vn_diff_SP_atlas_err[iorder-1, ipT]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_rapidity_distribution.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
if(particle_id == '9999'):
f.write("#eta dN/deta dN/deta_err vn{2}(eta) vn{2}(eta)_err\n")
else:
f.write("#y dN/dy dN/dy_err vn{2}(y) vn{2}(y)_err\n")
for ieta in range(len(eta_point)):
f.write("%.10e %.10e %.10e "
% (eta_point[ieta], dNdeta[ieta], dNdeta_err[ieta]))
for iorder in range(1, n_order):
f.write("%.10e %.10e %.10e %.10e "
% (vn_SP_eta[iorder-1, ieta],
vn_SP_eta_err[iorder-1, ieta],
vn_eta_real[iorder-1, ieta],
vn_eta_real_err[iorder-1, ieta]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
if (particle_id == '9999'):
output_filename = ("%s_vn_distribution_PHENIX.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("#vn dP(vn)/dvn dP(vn)/dvn_err\n")
for ipT in range(len(vn_phenix_dis[:, 0])):
for iorder in range(1, n_order):
f.write("%.10e %.10e %.10e "
% (vn_phenix_dis[ipT, 3*(iorder-1)],
vn_phenix_dis[ipT, 3*(iorder-1)+1],
vn_phenix_dis[ipT, 3*(iorder-1)+2]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_vn_distribution_STAR.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("#vn dP(vn)/dvn dP(vn)/dvn_err\n")
for ipT in range(len(vn_star_dis[:, 0])):
for iorder in range(1, n_order):
f.write("%.10e %.10e %.10e "
% (vn_star_dis[ipT, 3*(iorder-1)],
vn_star_dis[ipT, 3*(iorder-1)+1],
vn_star_dis[ipT, 3*(iorder-1)+2]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_vn_distribution_ALICE.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("#vn dP(vn)/dvn dP(vn)/dvn_err\n")
for ipT in range(len(vn_alice_dis[:, 0])):
for iorder in range(1, n_order):
f.write("%.10e %.10e %.10e "
% (vn_alice_dis[ipT, 3*(iorder-1)],
vn_alice_dis[ipT, 3*(iorder-1)+1],
vn_alice_dis[ipT, 3*(iorder-1)+2]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_vn_distribution_CMS.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("#vn dP(vn)/dvn dP(vn)/dvn_err\n")
for ipT in range(len(vn_cms_dis[:, 0])):
for iorder in range(1, n_order):
f.write("%.10e %.10e %.10e "
% (vn_cms_dis[ipT, 3*(iorder-1)],
vn_cms_dis[ipT, 3*(iorder-1)+1],
vn_cms_dis[ipT, 3*(iorder-1)+2]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_vn_distribution_ATLAS.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("#vn dP(vn)/dvn dP(vn)/dvn_err\n")
for ipT in range(len(vn_atlas_dis[:, 0])):
for iorder in range(1, n_order):
f.write("%.10e %.10e %.10e "
% (vn_atlas_dis[ipT, 3*(iorder-1)],
vn_atlas_dis[ipT, 3*(iorder-1)+1],
vn_atlas_dis[ipT, 3*(iorder-1)+2]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
# output rn ratios
pT_trig = ['1.0', '1.5', '2.0', '2.5', '3.0']
ipTtrig = 0
output_filename = ("%s_rn_ratios_CMS_pTtrig_%s_%s.dat"
% (particle_name_list[ipart],
pT_trig[ipTtrig], pT_trig[ipTtrig+1]))
f = open(output_filename, 'w')
f.write("#pT_mid rn rn_err (n = 2, 3, 4)\n")
for ipT in range(len(rn_cms[0, :, 0])):
for iorder in range(len(rn_cms[:, 0, 0])):
f.write("%.5e %.5e %.5e "
% (rn_cms[iorder, ipT, 0],
rn_cms[iorder, ipT, 1],
rn_cms[iorder, ipT, 2]))
f.write("\n")
if rn_cms[0, ipT, 0] == 0.0:
f.close()
shutil.move(output_filename, avg_folder)
ipTtrig += 1
if ipTtrig < (len(pT_trig) - 1):
output_filename = ("%s_rn_ratios_CMS_pTtrig_%s_%s.dat"
% (particle_name_list[ipart],
pT_trig[ipTtrig],
pT_trig[ipTtrig+1]))
f = open(output_filename, 'w')
f.write("#pT_mid rn rn_err (n = 2, 3, 4)\n")
output_filename = ("%s_rn_eta.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("#eta rn(eta) rn_err(eta) rnn(eta) rnn_err(eta)\n")
for ieta in range(len(eta_point)-1):
f.write("%.10e " % eta_point[ieta])
for iorder in range(0, n_order-1):
f.write("%.10e %.10e %.10e %.10e "
% (rn_eta[iorder, ieta],
rn_eta_err[iorder, ieta],
rnn_eta[iorder, ieta],
rnn_eta_err[iorder, ieta]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
# output flow event-plane correlation
output_filename = ("%s_event_plane_correlation_ALICE.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("#correlator value value_err\n")
f.write("4(24) %.5e %.5e\n"
% (vn_corr_alice[0], vn_corr_alice_err[0]))
f.write("6(23) %.5e %.5e\n"
% (vn_corr_alice[1], vn_corr_alice_err[1]))
f.write("6(26) %.5e %.5e\n"
% (vn_corr_alice[2], vn_corr_alice_err[2]))
f.write("6(36) %.5e %.5e\n"
% (vn_corr_alice[3], vn_corr_alice_err[3]))
f.write("(235) %.5e %.5e\n"
% (vn_corr_alice[4], vn_corr_alice_err[4]))
f.write("(246) %.5e %.5e\n"
% (vn_corr_alice[5], vn_corr_alice_err[5]))
f.write("(234) %.5e %.5e\n"
% (vn_corr_alice[6], vn_corr_alice_err[6]))
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_event_plane_correlation_ATLAS.dat"
% particle_name_list[ipart])
f = open(output_filename, 'w')
f.write("#correlator value value_err\n")
f.write("4(24) %.5e %.5e\n"
% (vn_corr_atlas[0], vn_corr_atlas_err[0]))
f.write("6(23) %.5e %.5e\n"
% (vn_corr_atlas[1], vn_corr_atlas_err[1]))
f.write("6(26) %.5e %.5e\n"
% (vn_corr_atlas[2], vn_corr_atlas_err[2]))
f.write("6(36) %.5e %.5e\n"
% (vn_corr_atlas[3], vn_corr_atlas_err[3]))
f.write("(235) %.5e %.5e\n"
% (vn_corr_atlas[4], vn_corr_atlas_err[4]))
f.write("(246) %.5e %.5e\n"
% (vn_corr_atlas[5], vn_corr_atlas_err[5]))
f.write("(234) %.5e %.5e\n"
% (vn_corr_atlas[6], vn_corr_atlas_err[6]))
f.close()
shutil.move(output_filename, avg_folder)
print("Analysis is done.")
| chunshen1987/HBTcorrelation_MCafterburner | ebe_scripts/average_event_spvn_unitweight.py | Python | mit | 78,494 | [
"Psi4"
] | e151fc03acb9bcb8f7698da9070818f190ef92fcdf8cf2293838ab4f5b8d5ebe |
"""
JupyterHub Spawner to spawn user notebooks on a Kubernetes cluster.
This module exports `KubeSpawner` class, which is the actual spawner
implementation that should be used by JupyterHub.
"""
import asyncio
import os
import signal
import string
import sys
import warnings
from functools import partial
from functools import wraps
from urllib.parse import urlparse
import escapism
from jinja2 import BaseLoader
from jinja2 import Environment
from jupyterhub.spawner import Spawner
from jupyterhub.traitlets import Command
from jupyterhub.utils import exponential_backoff
from kubernetes_asyncio import client
from kubernetes_asyncio.client.rest import ApiException
from slugify import slugify
from tornado import gen
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import Integer
from traitlets import List
from traitlets import observe
from traitlets import Unicode
from traitlets import Union
from traitlets import validate
from .clients import load_config
from .clients import shared_client
from .objects import make_namespace
from .objects import make_owner_reference
from .objects import make_pod
from .objects import make_pvc
from .objects import make_secret
from .objects import make_service
from .reflector import ResourceReflector
from .traitlets import Callable
class PodReflector(ResourceReflector):
"""
PodReflector is merely a configured ResourceReflector. It exposes
the pods property, which is simply mapping to self.resources where the
ResourceReflector keeps an updated list of the resource defined by
the `kind` field and the `list_method_name` field.
"""
kind = "pods"
# The default component label can be over-ridden by specifying the component_label property
labels = {
'component': 'singleuser-server',
}
@property
def pods(self):
"""
A dictionary of pods for the namespace as returned by the Kubernetes
API. The dictionary keys are the pod ids and the values are
dictionaries of the actual pod resource values.
ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#pod-v1-core
"""
return self.resources
class EventReflector(ResourceReflector):
"""
EventsReflector is merely a configured ResourceReflector. It
exposes the events property, which is simply mapping to self.resources where
the ResourceReflector keeps an updated list of the resource
defined by the `kind` field and the `list_method_name` field.
"""
kind = "events"
@property
def events(self):
"""
Returns list of dictionaries representing the k8s
events within the namespace, sorted by the latest event.
ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#event-v1-core
"""
# NOTE:
# - self.resources is a dictionary with keys mapping unique ids of
# Kubernetes Event resources, updated by ResourceReflector.
# self.resources will builds up with incoming k8s events, but can also
# suddenly refreshes itself entirely. We should not assume a call to
# this dictionary's values will result in a consistently ordered list,
# so we sort it to get it somewhat more structured.
# - We either seem to get only event['lastTimestamp'] or
# event['eventTime'], both fields serve the same role but the former
# is a low resolution timestamp without and the other is a higher
# resolution timestamp.
return sorted(
self.resources.values(),
key=lambda event: event["lastTimestamp"] or event["eventTime"],
)
class MockObject(object):
pass
class KubeSpawner(Spawner):
"""
A JupyterHub spawner that spawn pods in a Kubernetes Cluster. Each server
spawned by a user will have its own KubeSpawner instance.
"""
reflectors = {
"pods": None,
"events": None,
}
# Characters as defined by safe for DNS
# Note: '-' is not in safe_chars, as it is being used as escape character
safe_chars = set(string.ascii_lowercase + string.digits)
@property
def pod_reflector(self):
"""
A convenience alias to the class variable reflectors['pods'].
"""
return self.__class__.reflectors['pods']
@property
def event_reflector(self):
"""
A convenience alias to the class variable reflectors['events'] if the
spawner instance has events_enabled.
"""
if self.events_enabled:
return self.__class__.reflectors['events']
def __init__(self, *args, **kwargs):
_mock = kwargs.pop('_mock', False)
super().__init__(*args, **kwargs)
if _mock:
# runs during test execution only
if 'user' not in kwargs:
user = MockObject()
user.name = 'mock_name'
user.id = 'mock_id'
user.url = 'mock_url'
self.user = user
if 'hub' not in kwargs:
hub = MockObject()
hub.public_host = 'mock_public_host'
hub.url = 'mock_url'
hub.base_url = 'mock_base_url'
hub.api_url = 'mock_api_url'
self.hub = hub
# We have to set the namespace (if user namespaces are enabled)
# before we start the reflectors, so this must run before
# watcher start in normal execution. We still want to get the
# namespace right for test, though, so we need self.user to have
# been set in order to do that.
# By now, all the traitlets have been set, so we can use them to
# compute other attributes
if self.enable_user_namespaces:
self.namespace = self._expand_user_properties(self.user_namespace_template)
self.log.info("Using user namespace: {}".format(self.namespace))
self.pod_name = self._expand_user_properties(self.pod_name_template)
self.dns_name = self.dns_name_template.format(
namespace=self.namespace, name=self.pod_name
)
self.secret_name = self._expand_user_properties(self.secret_name_template)
self.pvc_name = self._expand_user_properties(self.pvc_name_template)
if self.working_dir:
self.working_dir = self._expand_user_properties(self.working_dir)
if self.port == 0:
# Our default port is 8888
self.port = 8888
# The attribute needs to exist, even though it is unset to start with
self._start_future = None
load_config(host=self.k8s_api_host, ssl_ca_cert=self.k8s_api_ssl_ca_cert)
self.api = shared_client("CoreV1Api")
self._start_watching_pods()
if self.events_enabled:
self._start_watching_events()
def _await_pod_reflector(method):
"""Decorator to wait for pod reflector to load
Apply to methods which require the pod reflector
to have completed its first load of pods.
"""
@wraps(method)
async def async_method(self, *args, **kwargs):
if not self.pod_reflector.first_load_future.done():
await self.pod_reflector.first_load_future
return await method(self, *args, **kwargs)
return async_method
def _await_event_reflector(method):
"""Decorator to wait for event reflector to load
Apply to methods which require the event reflector
to have completed its first load of events.
"""
@wraps(method)
async def async_method(self, *args, **kwargs):
if (
self.events_enabled
and not self.event_reflector.first_load_future.done()
):
await self.event_reflector.first_load_future
return await method(self, *args, **kwargs)
return async_method
k8s_api_ssl_ca_cert = Unicode(
"",
config=True,
help="""
Location (absolute filepath) for CA certs of the k8s API server.
Typically this is unnecessary, CA certs are picked up by
config.load_incluster_config() or config.load_kube_config.
In rare non-standard cases, such as using custom intermediate CA
for your cluster, you may need to mount root CA's elsewhere in
your Pod/Container and point this variable to that filepath
""",
)
k8s_api_host = Unicode(
"",
config=True,
help="""
Full host name of the k8s API server ("https://hostname:port").
Typically this is unnecessary, the hostname is picked up by
config.load_incluster_config() or config.load_kube_config.
""",
)
k8s_api_threadpool_workers = Integer(
config=True,
help="""
DEPRECATED in KubeSpawner 3.0.0.
No longer has any effect, as there is no threadpool anymore.
""",
)
k8s_api_request_timeout = Integer(
3,
config=True,
help="""
API request timeout (in seconds) for all k8s API calls.
This is the total amount of time a request might take before the connection
is killed. This includes connection time and reading the response.
NOTE: This is currently only implemented for creation and deletion of pods,
and creation of PVCs.
""",
)
k8s_api_request_retry_timeout = Integer(
30,
config=True,
help="""
Total timeout, including retry timeout, for kubernetes API calls
When a k8s API request connection times out, we retry it while backing
off exponentially. This lets you configure the total amount of time
we will spend trying an API request - including retries - before
giving up.
""",
)
events_enabled = Bool(
True,
config=True,
help="""
Enable event-watching for progress-reports to the user spawn page.
Disable if these events are not desirable
or to save some performance cost.
""",
)
enable_user_namespaces = Bool(
False,
config=True,
help="""
Cause each user to be spawned into an individual namespace.
This comes with some caveats. The Hub must run with significantly
more privilege (must have ClusterRoles analogous to its usual Roles)
and can therefore do heinous things to the entire cluster.
It will also make the Reflectors aware of pods and events across
all namespaces. This will have performance implications, although
using labels to restrict resource selection helps somewhat.
If you use this, consider cleaning up the user namespace in your
post_stop_hook.
""",
)
user_namespace_template = Unicode(
"{hubnamespace}-{username}",
config=True,
help="""
Template to use to form the namespace of user's pods (only if
enable_user_namespaces is True).
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
namespace = Unicode(
config=True,
help="""
Kubernetes namespace to spawn user pods in.
Assuming that you are not running with enable_user_namespaces
turned on, if running inside a kubernetes cluster with service
accounts enabled, defaults to the current namespace, and if not,
defaults to `default`.
If you are running with enable_user_namespaces, this parameter
is ignored in favor of the `user_namespace_template` template
resolved with the hub namespace and the user name, with the
caveat that if the hub namespace is `default` the user
namespace will have the prefix `user` rather than `default`.
""",
)
@default('namespace')
def _namespace_default(self):
"""
Set namespace default to current namespace if running in a k8s cluster
If not in a k8s cluster with service accounts enabled, default to
`default`
"""
ns_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace'
if os.path.exists(ns_path):
with open(ns_path) as f:
return f.read().strip()
return 'default'
ip = Unicode(
'0.0.0.0',
config=True,
help="""
The IP address (or hostname) the single-user server should listen on.
We override this from the parent so we can set a more sane default for
the Kubernetes setup.
""",
)
cmd = Command(
None,
allow_none=True,
minlen=0,
config=True,
help="""
The command used to start the single-user server.
Either
- a string containing a single command or path to a startup script
- a list of the command and arguments
- `None` (default) to use the Docker image's `CMD`
If `cmd` is set, it will be augmented with `spawner.get_args(). This will override the `CMD` specified in the Docker image.
""",
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
working_dir = Unicode(
None,
allow_none=True,
config=True,
help="""
The working directory where the Notebook server will be started inside the container.
Defaults to `None` so the working directory will be the one defined in the Dockerfile.
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
service_account = Unicode(
None,
allow_none=True,
config=True,
help="""
The service account to be mounted in the spawned user pod.
The token of the service account is NOT mounted by default.
This makes sure that we don't accidentally give access to the whole
kubernetes API to the users in the spawned pods.
Set automount_service_account_token True to mount it.
This `serviceaccount` must already exist in the namespace the user pod is being spawned in.
""",
)
automount_service_account_token = Bool(
None,
allow_none=True,
config=True,
help="""
Whether to mount the service account token in the spawned user pod.
The default value is None, which mounts the token if the service account is explicitly set,
but doesn't mount it if not.
WARNING: Be careful with this configuration! Make sure the service account being mounted
has the minimal permissions needed, and nothing more. When misconfigured, this can easily
give arbitrary users root over your entire cluster.
""",
)
dns_name_template = Unicode(
"{name}.{namespace}.svc.cluster.local",
config=True,
help="""
Template to use to form the dns name for the pod.
""",
)
pod_name_template = Unicode(
'jupyter-{username}--{servername}',
config=True,
help="""
Template to use to form the name of user's pods.
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
Trailing `-` characters are stripped for safe handling of empty server names (user default servers).
This must be unique within the namespace the pods are being spawned
in, so if you are running multiple jupyterhubs spawning in the
same namespace, consider setting this to be something more unique.
.. versionchanged:: 0.12
`--` delimiter added to the template,
where it was implicitly added to the `servername` field before.
Additionally, `username--servername` delimiter was `-` instead of `--`,
allowing collisions in certain circumstances.
""",
)
pod_connect_ip = Unicode(
config=True,
help="""
The IP address (or hostname) of user's pods which KubeSpawner connects to.
If you do not specify the value, KubeSpawner will use the pod IP.
e.g. 'jupyter-{username}--{servername}.notebooks.jupyterhub.svc.cluster.local',
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
Trailing `-` characters in each domain level are stripped for safe handling of empty server names (user default servers).
This must be unique within the namespace the pods are being spawned
in, so if you are running multiple jupyterhubs spawning in the
same namespace, consider setting this to be something more unique.
""",
)
storage_pvc_ensure = Bool(
False,
config=True,
help="""
Ensure that a PVC exists for each user before spawning.
Set to true to create a PVC named with `pvc_name_template` if it does
not exist for the user when their pod is spawning.
""",
)
delete_pvc = Bool(
True,
config=True,
help="""Delete PVCs when deleting Spawners.
When a Spawner is deleted (not just stopped),
delete its associated PVC.
This occurs when a named server is deleted,
or when the user itself is deleted for the default Spawner.
Requires JupyterHub 1.4.1 for Spawner.delete_forever support.
.. versionadded: 0.17
""",
)
pvc_name_template = Unicode(
'claim-{username}--{servername}',
config=True,
help="""
Template to use to form the name of user's pvc.
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
Trailing `-` characters are stripped for safe handling of empty server names (user default servers).
This must be unique within the namespace the pvc are being spawned
in, so if you are running multiple jupyterhubs spawning in the
same namespace, consider setting this to be something more unique.
.. versionchanged:: 0.12
`--` delimiter added to the template,
where it was implicitly added to the `servername` field before.
Additionally, `username--servername` delimiter was `-` instead of `--`,
allowing collisions in certain circumstances.
""",
)
component_label = Unicode(
'singleuser-server',
config=True,
help="""
The component label used to tag the user pods. This can be used to override
the spawner behavior when dealing with multiple hub instances in the same
namespace. Usually helpful for CI workflows.
""",
)
secret_name_template = Unicode(
'jupyter-{username}{servername}',
config=True,
help="""
Template to use to form the name of user's secret.
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
This must be unique within the namespace the pvc are being spawned
in, so if you are running multiple jupyterhubs spawning in the
same namespace, consider setting this to be something more unique.
""",
)
secret_mount_path = Unicode(
"/etc/jupyterhub/ssl/",
allow_none=False,
config=True,
help="""
Location to mount the spawned pod's certificates needed for internal_ssl functionality.
""",
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
hub_connect_ip = Unicode(
allow_none=True,
config=True,
help="""DEPRECATED. Use c.JupyterHub.hub_connect_ip""",
)
hub_connect_port = Integer(
config=True, help="""DEPRECATED. Use c.JupyterHub.hub_connect_url"""
)
@observe('hub_connect_ip', 'hub_connect_port')
def _deprecated_changed(self, change):
warnings.warn(
"""
KubeSpawner.{0} is deprecated with JupyterHub >= 0.8.
Use JupyterHub.{0}
""".format(
change.name
),
DeprecationWarning,
)
setattr(self.hub, change.name.split('_', 1)[1], change.new)
common_labels = Dict(
{
'app': 'jupyterhub',
'heritage': 'jupyterhub',
},
config=True,
help="""
Kubernetes labels that both spawned singleuser server pods and created
user PVCs will get.
Note that these are only set when the Pods and PVCs are created, not
later when this setting is updated.
""",
)
extra_labels = Dict(
config=True,
help="""
Extra kubernetes labels to set on the spawned single-user pods, as well
as on the pods' associated k8s Service and k8s Secret if internal_ssl is
enabled.
The keys and values specified here would be set as labels on the spawned single-user
kubernetes pods. The keys and values must both be strings that match the kubernetes
label key / value constraints.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`__
for more info on what labels are and why you might want to use them!
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
extra_annotations = Dict(
config=True,
help="""
Extra Kubernetes annotations to set on the spawned single-user pods, as
well as on the pods' associated k8s Service and k8s Secret if
internal_ssl is enabled.
The keys and values specified here are added as annotations on the spawned single-user
kubernetes pods. The keys and values must both be strings.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/>`__
for more info on what annotations are and why you might want to use them!
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
image = Unicode(
'jupyterhub/singleuser:latest',
config=True,
help="""
Docker image to use for spawning user's containers.
Defaults to `jupyterhub/singleuser:latest`
Name of the container + a tag, same as would be used with
a `docker pull` command. If tag is set to `latest`, kubernetes will
check the registry each time a new user is spawned to see if there
is a newer image available. If available, new image will be pulled.
Note that this could cause long delays when spawning, especially
if the image is large. If you do not specify a tag, whatever version
of the image is first pulled on the node will be used, thus possibly
leading to inconsistent images on different nodes. For all these
reasons, it is recommended to specify a specific immutable tag
for the image.
If your image is very large, you might need to increase the timeout
for starting the single user container from the default. You can
set this with::
c.KubeSpawner.start_timeout = 60 * 5 # Up to 5 minutes
""",
)
image_pull_policy = Unicode(
'IfNotPresent',
config=True,
help="""
The image pull policy of the docker container specified in
`image`.
Defaults to `IfNotPresent` which causes the Kubelet to NOT pull the image
specified in KubeSpawner.image if it already exists, except if the tag
is `:latest`. For more information on image pull policy,
refer to `the Kubernetes documentation <https://kubernetes.io/docs/concepts/containers/images/>`__.
This configuration is primarily used in development if you are
actively changing the `image_spec` and would like to pull the image
whenever a user container is spawned.
""",
)
image_pull_secrets = Union(
trait_types=[
List(),
Unicode(),
],
config=True,
help="""
A list of references to Kubernetes Secret resources with credentials to
pull images from image registries. This list can either have strings in
it or objects with the string value nested under a name field.
Passing a single string is still supported, but deprecated as of
KubeSpawner 0.14.0.
See `the Kubernetes documentation
<https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod>`__
for more information on when and why this might need to be set, and what
it should be set to.
""",
)
@validate('image_pull_secrets')
def _validate_image_pull_secrets(self, proposal):
if type(proposal['value']) == str:
warnings.warn(
"""Passing KubeSpawner.image_pull_secrets string values is
deprecated since KubeSpawner 0.14.0. The recommended
configuration is now a list of either strings or dictionary
objects with the string referencing the Kubernetes Secret name
in under the value of the dictionary's name key.""",
DeprecationWarning,
)
return [{"name": proposal['value']}]
return proposal['value']
node_selector = Dict(
config=True,
help="""
The dictionary Selector labels used to match the Nodes where Pods will be launched.
Default is None and means it will be launched in any available Node.
For example to match the Nodes that have a label of `disktype: ssd` use::
c.KubeSpawner.node_selector = {'disktype': 'ssd'}
""",
)
uid = Union(
trait_types=[
Integer(),
Callable(),
],
default_value=None,
allow_none=True,
config=True,
help="""
The UID to run the single-user server containers as.
This UID should ideally map to a user that already exists in the container
image being used. Running as root is discouraged.
Instead of an integer, this could also be a callable that takes as one
parameter the current spawner instance and returns an integer. The callable
will be called asynchronously if it returns a future. Note that
the interface of the spawner class is not deemed stable across versions,
so using this functionality might cause your JupyterHub or kubespawner
upgrades to break.
If set to `None`, the user specified with the `USER` directive in the
container metadata is used.
""",
)
gid = Union(
trait_types=[
Integer(),
Callable(),
],
default_value=None,
allow_none=True,
config=True,
help="""
The GID to run the single-user server containers as.
This GID should ideally map to a group that already exists in the container
image being used. Running as root is discouraged.
Instead of an integer, this could also be a callable that takes as one
parameter the current spawner instance and returns an integer. The callable
will be called asynchronously if it returns a future. Note that
the interface of the spawner class is not deemed stable across versions,
so using this functionality might cause your JupyterHub or kubespawner
upgrades to break.
If set to `None`, the group of the user specified with the `USER` directive
in the container metadata is used.
""",
)
fs_gid = Union(
trait_types=[
Integer(),
Callable(),
],
default_value=None,
allow_none=True,
config=True,
help="""
The GID of the group that should own any volumes that are created & mounted.
A special supplemental group that applies primarily to the volumes mounted
in the single-user server. In volumes from supported providers, the following
things happen:
1. The owning GID will be the this GID
2. The setgid bit is set (new files created in the volume will be owned by
this GID)
3. The permission bits are OR’d with rw-rw
The single-user server will also be run with this gid as part of its supplemental
groups.
Instead of an integer, this could also be a callable that takes as one
parameter the current spawner instance and returns an integer. The callable will
be called asynchronously if it returns a future, rather than an int. Note that
the interface of the spawner class is not deemed stable across versions,
so using this functionality might cause your JupyterHub or kubespawner
upgrades to break.
You'll *have* to set this if you are using auto-provisioned volumes with most
cloud providers. See `fsGroup <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core>`__
for more details.
""",
)
supplemental_gids = Union(
trait_types=[
List(),
Callable(),
],
config=True,
help="""
A list of GIDs that should be set as additional supplemental groups to the
user that the container runs as.
Instead of a list of integers, this could also be a callable that takes as one
parameter the current spawner instance and returns a list of integers. The
callable will be called asynchronously if it returns a future, rather than
a list. Note that the interface of the spawner class is not deemed stable
across versions, so using this functionality might cause your JupyterHub
or kubespawner upgrades to break.
You may have to set this if you are deploying to an environment with RBAC/SCC
enforced and pods run with a 'restricted' SCC which results in the image being
run as an assigned user ID. The supplemental group IDs would need to include
the corresponding group ID of the user ID the image normally would run as. The
image must setup all directories/files any application needs access to, as group
writable.
""",
)
privileged = Bool(
False,
config=True,
help="""
Whether to run the pod with a privileged security context.
""",
)
allow_privilege_escalation = Bool(
False,
allow_none=True,
config=True,
help="""
Controls whether a process can gain more privileges than its parent process.
When set to False (the default), the primary user visible effect is that
setuid binaries (like sudo) will no longer work.
When set to None, the defaults for the cluster are respected.
This bool directly controls whether the no_new_privs flag gets set on the container
AllowPrivilegeEscalation is true always when the container is:
1) run as Privileged OR 2) has CAP_SYS_ADMIN.
""",
)
container_security_context = Union(
trait_types=[
Dict(),
Callable(),
],
config=True,
help="""
A Kubernetes security context for the container. Note that all
configuration options within here should be camelCased.
What is configured here has the highest priority, so the alternative
configuration `uid`, `gid`, `privileged`, and
`allow_privilege_escalation` will be overridden by this.
Rely on `the Kubernetes reference
<https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#securitycontext-v1-core>`__
for details on allowed configuration.
""",
)
pod_security_context = Union(
trait_types=[
Dict(),
Callable(),
],
config=True,
help="""
A Kubernetes security context for the pod. Note that all configuration
options within here should be camelCased.
What is configured here has higher priority than `fs_gid` and
`supplemental_gids`, but lower priority than what is set in the
`container_security_context`.
Note that anything configured on the Pod level will influence all
containers, including init containers and sidecar containers.
Rely on `the Kubernetes reference
<https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core>`__
for details on allowed configuration.
""",
)
modify_pod_hook = Callable(
None,
allow_none=True,
config=True,
help="""
Callable to augment the Pod object before launching.
Expects a callable that takes two parameters:
1. The spawner object that is doing the spawning
2. The Pod object that is to be launched
You should modify the Pod object and return it.
This can be a coroutine if necessary. When set to none, no augmenting is done.
This is very useful if you want to modify the pod being launched dynamically.
Note that the spawner object can change between versions of KubeSpawner and JupyterHub,
so be careful relying on this!
""",
)
volumes = List(
config=True,
help="""
List of Kubernetes Volume specifications that will be mounted in the user pod.
This list will be directly added under `volumes` in the kubernetes pod spec,
so you should use the same structure. Each item in the list must have the
following two keys:
- `name`
Name that'll be later used in the `volume_mounts` config to mount this
volume at a specific path.
- `<name-of-a-supported-volume-type>` (such as `hostPath`, `persistentVolumeClaim`,
etc)
The key name determines the type of volume to mount, and the value should
be an object specifying the various options available for that kind of
volume.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/volumes>`__
for more information on the various kinds of volumes available and their options.
Your kubernetes cluster must already be configured to support the volume types you want to use.
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
volume_mounts = List(
config=True,
help="""
List of paths on which to mount volumes in the user notebook's pod.
This list will be added to the values of the `volumeMounts` key under the user's
container in the kubernetes pod spec, so you should use the same structure as that.
Each item in the list should be a dictionary with at least these two keys:
- `mountPath` The path on the container in which we want to mount the volume.
- `name` The name of the volume we want to mount, as specified in the `volumes` config.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/volumes>`__
for more information on how the `volumeMount` item works.
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
storage_capacity = Unicode(
None,
config=True,
allow_none=True,
help="""
The amount of storage space to request from the volume that the pvc will
mount to. This amount will be the amount of storage space the user has
to work with on their notebook. If left blank, the kubespawner will not
create a pvc for the pod.
This will be added to the `resources: requests: storage:` in the k8s pod spec.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims>`__
for more information on how storage works.
Quantities can be represented externally as unadorned integers, or as fixed-point
integers with one of these SI suffices (`E, P, T, G, M, K, m`) or their power-of-two
equivalents (`Ei, Pi, Ti, Gi, Mi, Ki`). For example, the following represent roughly
the same value: `128974848`, `129e6`, `129M`, `123Mi`.
""",
)
storage_extra_labels = Dict(
config=True,
help="""
Extra kubernetes labels to set on the user PVCs.
The keys and values specified here would be set as labels on the PVCs
created by kubespawner for the user. Note that these are only set
when the PVC is created, not later when this setting is updated.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`__
for more info on what labels are and why you might want to use them!
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
storage_class = Unicode(
None,
config=True,
allow_none=True,
help="""
The storage class that the pvc will use.
This will be added to the `annotations: volume.beta.kubernetes.io/storage-class:`
in the pvc metadata.
This will determine what type of volume the pvc will request to use. If one exists
that matches the criteria of the StorageClass, the pvc will mount to that. Otherwise,
b/c it has a storage class, k8s will dynamically spawn a pv for the pvc to bind to
and a machine in the cluster for the pv to bind to.
Note that an empty string is a valid value and is always interpreted to be
requesting a pv with no class.
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/storage-classes/>`__
for more information on how StorageClasses work.
""",
)
storage_access_modes = List(
["ReadWriteOnce"],
config=True,
help="""
List of access modes the user has for the pvc.
The access modes are:
- `ReadWriteOnce` : the volume can be mounted as read-write by a single node
- `ReadOnlyMany` : the volume can be mounted read-only by many nodes
- `ReadWriteMany` : the volume can be mounted as read-write by many nodes
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes>`__
for more information on how access modes work.
""",
)
storage_selector = Dict(
config=True,
help="""
The dictionary Selector labels used to match a PersistentVolumeClaim to
a PersistentVolume.
Default is None and means it will match based only on other storage criteria.
For example to match the Nodes that have a label of `content: jupyter` use::
c.KubeSpawner.storage_selector = {'matchLabels':{'content': 'jupyter'}}
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
lifecycle_hooks = Dict(
config=True,
help="""
Kubernetes lifecycle hooks to set on the spawned single-user pods.
The keys is name of hooks and there are only two hooks, postStart and preStop.
The values are handler of hook which executes by Kubernetes management system when hook is called.
Below is an sample copied from
`the Kubernetes documentation <https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/>`__::
c.KubeSpawner.lifecycle_hooks = {
"postStart": {
"exec": {
"command": ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
}
},
"preStop": {
"exec": {
"command": ["/usr/sbin/nginx", "-s", "quit"]
}
}
}
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/>`__
for more info on what lifecycle hooks are and why you might want to use them!
""",
)
init_containers = List(
config=True,
help="""
List of initialization containers belonging to the pod.
This list will be directly added under `initContainers` in the kubernetes pod spec,
so you should use the same structure. Each item in the dict must a field
of the `V1Container specification <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#container-v1-core>`__
One usage is disabling access to metadata service from single-user
notebook server with configuration below::
c.KubeSpawner.init_containers = [{
"name": "init-iptables",
"image": "<image with iptables installed>",
"command": ["iptables", "-A", "OUTPUT", "-p", "tcp", "--dport", "80", "-d", "169.254.169.254", "-j", "DROP"],
"securityContext": {
"capabilities": {
"add": ["NET_ADMIN"]
}
}
}]
See `the Kubernetes documentation <https://kubernetes.io/docs/concepts/workloads/pods/init-containers/>`__
for more info on what init containers are and why you might want to use them!
To user this feature, Kubernetes version must greater than 1.6.
""",
)
extra_container_config = Dict(
config=True,
help="""
Extra configuration (e.g. ``envFrom``) for notebook container which is not covered by other attributes.
This dict will be directly merge into `container` of notebook server,
so you should use the same structure. Each item in the dict must a field
of the `V1Container specification <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#container-v1-core>`__.
One usage is set ``envFrom`` on notebook container with configuration below::
c.KubeSpawner.extra_container_config = {
"envFrom": [{
"configMapRef": {
"name": "special-config"
}
}]
}
The key could be either a camelCase word (used by Kubernetes yaml, e.g.
``envFrom``) or a snake_case word (used by Kubernetes Python client,
e.g. ``env_from``).
""",
)
extra_pod_config = Dict(
config=True,
help="""
Extra configuration for the pod which is not covered by other attributes.
This dict will be directly merge into pod,so you should use the same structure.
Each item in the dict is field of pod configuration
which follows spec at https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podspec-v1-core
One usage is set restartPolicy and dnsPolicy with configuration below::
c.KubeSpawner.extra_pod_config = {
"restartPolicy": "OnFailure",
"dns_policy": "ClusterFirstWithHostNet"
}
The `key` could be either a camelCase word (used by Kubernetes yaml,
e.g. `restartPolicy`) or a snake_case word (used by Kubernetes Python
client, e.g. `dns_policy`).
""",
)
extra_containers = List(
config=True,
help="""
List of containers belonging to the pod which besides to the container generated for notebook server.
This list will be directly appended under `containers` in the kubernetes pod spec,
so you should use the same structure. Each item in the list is container configuration
which follows spec at https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#container-v1-core
One usage is setting crontab in a container to clean sensitive data with configuration below::
c.KubeSpawner.extra_containers = [{
"name": "crontab",
"image": "supercronic",
"command": ["/usr/local/bin/supercronic", "/etc/crontab"]
}]
`{username}`, `{userid}`, `{servername}`, `{hubnamespace}`,
`{unescaped_username}`, and `{unescaped_servername}` will be expanded if
found within strings of this configuration. The username and servername
come escaped to follow the [DNS label
standard](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names).
""",
)
# FIXME: Don't override 'default_value' ("") or 'allow_none' (False) (Breaking change)
scheduler_name = Unicode(
None,
allow_none=True,
config=True,
help="""
Set the pod's scheduler explicitly by name. See `the Kubernetes documentation <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podspec-v1-core>`__
for more information.
""",
)
tolerations = List(
config=True,
help="""
List of tolerations that are to be assigned to the pod in order to be able to schedule the pod
on a node with the corresponding taints. See the official Kubernetes documentation for additional details
https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
Pass this field an array of `"Toleration" objects
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#toleration-v1-core
Example::
[
{
'key': 'key',
'operator': 'Equal',
'value': 'value',
'effect': 'NoSchedule'
},
{
'key': 'key',
'operator': 'Exists',
'effect': 'NoSchedule'
}
]
""",
)
node_affinity_preferred = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "PreferredSchedulingTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#preferredschedulingterm-v1-core
""",
)
node_affinity_required = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "NodeSelectorTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#nodeselectorterm-v1-core
""",
)
pod_affinity_preferred = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "WeightedPodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#weightedpodaffinityterm-v1-core
""",
)
pod_affinity_required = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "PodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podaffinityterm-v1-core
""",
)
pod_anti_affinity_preferred = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "WeightedPodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#weightedpodaffinityterm-v1-core
""",
)
pod_anti_affinity_required = List(
config=True,
help="""
Affinities describe where pods prefer or require to be scheduled, they
may prefer or require a node to have a certain label or be in proximity
/ remoteness to another pod. To learn more visit
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Pass this field an array of "PodAffinityTerm" objects.*
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podaffinityterm-v1-core
""",
)
extra_resource_guarantees = Dict(
config=True,
help="""
The dictionary used to request arbitrary resources.
Default is None and means no additional resources are requested.
For example, to request 1 Nvidia GPUs::
c.KubeSpawner.extra_resource_guarantees = {"nvidia.com/gpu": "1"}
""",
)
extra_resource_limits = Dict(
config=True,
help="""
The dictionary used to limit arbitrary resources.
Default is None and means no additional resources are limited.
For example, to add a limit of 3 Nvidia GPUs::
c.KubeSpawner.extra_resource_limits = {"nvidia.com/gpu": "3"}
""",
)
delete_stopped_pods = Bool(
True,
config=True,
help="""
Whether to delete pods that have stopped themselves.
Set to False to leave stopped pods in the completed state,
allowing for easier debugging of why they may have stopped.
""",
)
profile_form_template = Unicode(
"""
<style>
/* The profile description should not be bold, even though it is inside the <label> tag */
#kubespawner-profiles-list label p {
font-weight: normal;
}
</style>
<div class='form-group' id='kubespawner-profiles-list'>
{% for profile in profile_list %}
<label for='profile-item-{{ profile.slug }}' class='form-control input-group'>
<div class='col-md-1'>
<input type='radio' name='profile' id='profile-item-{{ profile.slug }}' value='{{ profile.slug }}' {% if profile.default %}checked{% endif %} />
</div>
<div class='col-md-11'>
<strong>{{ profile.display_name }}</strong>
{% if profile.description %}
<p>{{ profile.description }}</p>
{% endif %}
</div>
</label>
{% endfor %}
</div>
""",
config=True,
help="""
Jinja2 template for constructing profile list shown to user.
Used when `profile_list` is set.
The contents of `profile_list` are passed in to the template.
This should be used to construct the contents of a HTML form. When
posted, this form is expected to have an item with name `profile` and
the value the index of the profile in `profile_list`.
""",
)
profile_list = Union(
trait_types=[List(trait=Dict()), Callable()],
config=True,
help="""
List of profiles to offer for selection by the user.
Signature is: `List(Dict())`, where each item is a dictionary that has two keys:
- `display_name`: the human readable display name (should be HTML safe)
- `slug`: the machine readable slug to identify the profile
(missing slugs are generated from display_name)
- `description`: Optional description of this profile displayed to the user.
- `kubespawner_override`: a dictionary with overrides to apply to the KubeSpawner
settings. Each value can be either the final value to change or a callable that
take the `KubeSpawner` instance as parameter and return the final value.
- `default`: (optional Bool) True if this is the default selected option
Example::
c.KubeSpawner.profile_list = [
{
'display_name': 'Training Env - Python',
'slug': 'training-python',
'default': True,
'kubespawner_override': {
'image': 'training/python:label',
'cpu_limit': 1,
'mem_limit': '512M',
}
}, {
'display_name': 'Training Env - Datascience',
'slug': 'training-datascience',
'kubespawner_override': {
'image': 'training/datascience:label',
'cpu_limit': 4,
'mem_limit': '8G',
}
}, {
'display_name': 'DataScience - Small instance',
'slug': 'datascience-small',
'kubespawner_override': {
'image': 'datascience/small:label',
'cpu_limit': 10,
'mem_limit': '16G',
}
}, {
'display_name': 'DataScience - Medium instance',
'slug': 'datascience-medium',
'kubespawner_override': {
'image': 'datascience/medium:label',
'cpu_limit': 48,
'mem_limit': '96G',
}
}, {
'display_name': 'DataScience - Medium instance (GPUx2)',
'slug': 'datascience-gpu2x',
'kubespawner_override': {
'image': 'datascience/medium:label',
'cpu_limit': 48,
'mem_limit': '96G',
'extra_resource_guarantees': {"nvidia.com/gpu": "2"},
}
}
]
Instead of a list of dictionaries, this could also be a callable that takes as one
parameter the current spawner instance and returns a list of dictionaries. The
callable will be called asynchronously if it returns a future, rather than
a list. Note that the interface of the spawner class is not deemed stable
across versions, so using this functionality might cause your JupyterHub
or kubespawner upgrades to break.
""",
)
priority_class_name = Unicode(
config=True,
help="""
The priority class that the pods will use.
See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption for
more information on how pod priority works.
""",
)
delete_grace_period = Integer(
1,
config=True,
help="""
Time in seconds for the pod to be in `terminating` state before is forcefully killed.
Increase this if you need more time to execute a `preStop` lifecycle hook.
See https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods for
more information on how pod termination works.
Defaults to `1`.
""",
)
# deprecate redundant and inconsistent singleuser_ and user_ prefixes:
_deprecated_traits_09 = [
"singleuser_working_dir",
"singleuser_service_account",
"singleuser_extra_labels",
"singleuser_extra_annotations",
"singleuser_image_spec",
"singleuser_image_pull_policy",
"singleuser_image_pull_secrets",
"singleuser_node_selector",
"singleuser_uid",
"singleuser_fs_gid",
"singleuser_supplemental_gids",
"singleuser_privileged",
"singleuser_allow_privilege_escalation" "singleuser_lifecycle_hooks",
"singleuser_extra_pod_config",
"singleuser_init_containers",
"singleuser_extra_container_config",
"singleuser_extra_containers",
"user_storage_class",
"user_storage_pvc_ensure",
"user_storage_capacity",
"user_storage_extra_labels",
"user_storage_access_modes",
]
# other general deprecations:
_deprecated_traits = {
'image_spec': ('image', '0.10'),
}
# add the bulk deprecations from 0.9
for _deprecated_name in _deprecated_traits_09:
_new_name = _deprecated_name.split('_', 1)[1]
_deprecated_traits[_deprecated_name] = (_new_name, '0.9')
@validate('config')
def _handle_deprecated_config(self, proposal):
config = proposal.value
if 'KubeSpawner' not in config:
# nothing to check
return config
for _deprecated_name, (_new_name, version) in self._deprecated_traits.items():
# for any `singleuser_name` deprecate in favor of `name`
if _deprecated_name not in config.KubeSpawner:
# nothing to do
continue
# remove deprecated value from config
_deprecated_value = config.KubeSpawner.pop(_deprecated_name)
self.log.warning(
"KubeSpawner.%s is deprecated in %s. Use KubeSpawner.%s instead",
_deprecated_name,
version,
_new_name,
)
if _new_name in config.KubeSpawner:
# *both* config values found,
# ignore deprecated config and warn about the collision
_new_value = config.KubeSpawner[_new_name]
# ignore deprecated config in favor of non-deprecated config
self.log.warning(
"Ignoring deprecated config KubeSpawner.%s = %r "
" in favor of KubeSpawner.%s = %r",
_deprecated_name,
_deprecated_value,
_new_name,
_new_value,
)
else:
# move deprecated config to its new home
config.KubeSpawner[_new_name] = _deprecated_value
return config
# define properties for deprecated names
# so we can propagate their values to the new traits.
# most deprecations should be handled via config above,
# but in case these are set at runtime, e.g. by subclasses
# or hooks, hook this up.
# The signature-order of these is funny
# because the property methods are created with
# functools.partial(f, name) so name is passed as the first arg
# before self.
def _get_deprecated(name, new_name, version, self):
# warn about the deprecated name
self.log.warning(
"KubeSpawner.%s is deprecated in %s. Use KubeSpawner.%s",
name,
version,
new_name,
)
return getattr(self, new_name)
def _set_deprecated(name, new_name, version, self, value):
# warn about the deprecated name
self.log.warning(
"KubeSpawner.%s is deprecated in %s. Use KubeSpawner.%s",
name,
version,
new_name,
)
return setattr(self, new_name, value)
for _deprecated_name, (_new_name, _version) in _deprecated_traits.items():
exec(
"""{0} = property(
partial(_get_deprecated, '{0}', '{1}', '{2}'),
partial(_set_deprecated, '{0}', '{1}', '{2}'),
)
""".format(
_deprecated_name,
_new_name,
_version,
)
)
del _deprecated_name
def _expand_user_properties(self, template):
# Make sure username and servername match the restrictions for DNS labels
# Note: '-' is not in safe_chars, as it is being used as escape character
safe_chars = set(string.ascii_lowercase + string.digits)
raw_servername = self.name or ''
safe_servername = escapism.escape(
raw_servername, safe=safe_chars, escape_char='-'
).lower()
hub_namespace = self._namespace_default()
if hub_namespace == "default":
hub_namespace = "user"
legacy_escaped_username = ''.join(
[s if s in safe_chars else '-' for s in self.user.name.lower()]
)
safe_username = escapism.escape(
self.user.name, safe=safe_chars, escape_char='-'
).lower()
rendered = template.format(
userid=self.user.id,
username=safe_username,
unescaped_username=self.user.name,
legacy_escape_username=legacy_escaped_username,
servername=safe_servername,
unescaped_servername=raw_servername,
hubnamespace=hub_namespace,
)
# strip trailing - delimiter in case of empty servername.
# k8s object names cannot have trailing -
return rendered.rstrip("-")
def _expand_all(self, src):
if isinstance(src, list):
return [self._expand_all(i) for i in src]
elif isinstance(src, dict):
return {k: self._expand_all(v) for k, v in src.items()}
elif isinstance(src, str):
return self._expand_user_properties(src)
else:
return src
def _build_common_labels(self, extra_labels):
# Default set of labels, picked up from
# https://github.com/helm/helm-www/blob/HEAD/content/en/docs/chart_best_practices/labels.md
labels = {
'hub.jupyter.org/username': escapism.escape(
self.user.name, safe=self.safe_chars, escape_char='-'
).lower()
}
labels.update(extra_labels)
labels.update(self.common_labels)
return labels
def _build_pod_labels(self, extra_labels):
labels = self._build_common_labels(extra_labels)
labels.update(
{
'component': self.component_label,
'hub.jupyter.org/servername': self.name,
}
)
return labels
def _build_common_annotations(self, extra_annotations):
# Annotations don't need to be escaped
annotations = {'hub.jupyter.org/username': self.user.name}
if self.name:
annotations['hub.jupyter.org/servername'] = self.name
annotations.update(extra_annotations)
return annotations
# specify default ssl alt names
@default("ssl_alt_names")
def _default_ssl_alt_names(self):
return [
f"DNS:{self.dns_name}",
f"DNS:{self.pod_name}",
f"DNS:{self.pod_name}.{self.namespace}",
f"DNS:{self.pod_name}.{self.namespace}.svc",
]
@default("ssl_alt_names_include_local")
def _default_ssl_alt_names_include_local(self):
return False
get_pod_url = Callable(
default_value=None,
allow_none=True,
config=True,
help="""Callable to retrieve pod url
Called with (spawner, pod)
Must not be async
""",
)
def _get_pod_url(self, pod):
"""Return the pod url
Default: use pod.status.pod_ip (dns_name if ssl is enabled)
"""
if self.get_pod_url:
# custom get_pod_url hook
return self.get_pod_url(self, pod)
if getattr(self, "internal_ssl", False):
proto = "https"
hostname = self.dns_name
else:
proto = "http"
hostname = pod["status"]["podIP"]
if self.pod_connect_ip:
hostname = ".".join(
[
s.rstrip("-")
for s in self._expand_user_properties(self.pod_connect_ip).split(
"."
)
]
)
return "{}://{}:{}".format(
proto,
hostname,
self.port,
)
async def get_pod_manifest(self):
"""
Make a pod manifest that will spawn current user's notebook pod.
"""
if callable(self.uid):
uid = await gen.maybe_future(self.uid(self))
else:
uid = self.uid
if callable(self.gid):
gid = await gen.maybe_future(self.gid(self))
else:
gid = self.gid
if callable(self.fs_gid):
fs_gid = await gen.maybe_future(self.fs_gid(self))
else:
fs_gid = self.fs_gid
if callable(self.supplemental_gids):
supplemental_gids = await gen.maybe_future(self.supplemental_gids(self))
else:
supplemental_gids = self.supplemental_gids
if callable(self.container_security_context):
csc = await gen.maybe_future(self.container_security_context(self))
else:
csc = self.container_security_context
if callable(self.pod_security_context):
psc = await gen.maybe_future(self.pod_security_context(self))
else:
psc = self.pod_security_context
args = self.get_args()
real_cmd = None
if self.cmd:
real_cmd = self.cmd + args
elif args:
self.log.warning(
f"Ignoring arguments when using implicit command from image: {args}."
" Set KubeSpawner.cmd explicitly to support passing cli arguments."
)
labels = self._build_pod_labels(self._expand_all(self.extra_labels))
annotations = self._build_common_annotations(
self._expand_all(self.extra_annotations)
)
return make_pod(
name=self.pod_name,
cmd=real_cmd,
port=self.port,
image=self.image,
image_pull_policy=self.image_pull_policy,
image_pull_secrets=self.image_pull_secrets,
node_selector=self.node_selector,
uid=uid,
gid=gid,
fs_gid=fs_gid,
supplemental_gids=supplemental_gids,
privileged=self.privileged,
allow_privilege_escalation=self.allow_privilege_escalation,
container_security_context=csc,
pod_security_context=psc,
env=self.get_env(),
volumes=self._expand_all(self.volumes),
volume_mounts=self._expand_all(self.volume_mounts),
working_dir=self.working_dir,
labels=labels,
annotations=annotations,
cpu_limit=self.cpu_limit,
cpu_guarantee=self.cpu_guarantee,
mem_limit=self.mem_limit,
mem_guarantee=self.mem_guarantee,
extra_resource_limits=self.extra_resource_limits,
extra_resource_guarantees=self.extra_resource_guarantees,
lifecycle_hooks=self.lifecycle_hooks,
init_containers=self._expand_all(self.init_containers),
service_account=self._expand_all(self.service_account),
automount_service_account_token=self.automount_service_account_token,
extra_container_config=self.extra_container_config,
extra_pod_config=self._expand_all(self.extra_pod_config),
extra_containers=self._expand_all(self.extra_containers),
scheduler_name=self.scheduler_name,
tolerations=self.tolerations,
node_affinity_preferred=self.node_affinity_preferred,
node_affinity_required=self.node_affinity_required,
pod_affinity_preferred=self.pod_affinity_preferred,
pod_affinity_required=self.pod_affinity_required,
pod_anti_affinity_preferred=self.pod_anti_affinity_preferred,
pod_anti_affinity_required=self.pod_anti_affinity_required,
priority_class_name=self.priority_class_name,
ssl_secret_name=self.secret_name if self.internal_ssl else None,
ssl_secret_mount_path=self.secret_mount_path,
logger=self.log,
)
def get_secret_manifest(self, owner_reference):
"""
Make a secret manifest that contains the ssl certificates.
"""
labels = self._build_common_labels(self._expand_all(self.extra_labels))
annotations = self._build_common_annotations(
self._expand_all(self.extra_annotations)
)
return make_secret(
name=self.secret_name,
username=self.user.name,
cert_paths=self.cert_paths,
hub_ca=self.internal_trust_bundles['hub-ca'],
owner_references=[owner_reference],
labels=labels,
annotations=annotations,
)
def get_service_manifest(self, owner_reference):
"""
Make a service manifest for dns.
"""
labels = self._build_common_labels(self._expand_all(self.extra_labels))
annotations = self._build_common_annotations(
self._expand_all(self.extra_annotations)
)
# TODO: validate that the service name
return make_service(
name=self.pod_name,
port=self.port,
servername=self.name,
owner_references=[owner_reference],
labels=labels,
annotations=annotations,
)
def get_pvc_manifest(self):
"""
Make a pvc manifest that will spawn current user's pvc.
"""
labels = self._build_common_labels(self._expand_all(self.storage_extra_labels))
labels.update({'component': 'singleuser-storage'})
annotations = self._build_common_annotations({})
storage_selector = self._expand_all(self.storage_selector)
return make_pvc(
name=self.pvc_name,
storage_class=self.storage_class,
access_modes=self.storage_access_modes,
selector=storage_selector,
storage=self.storage_capacity,
labels=labels,
annotations=annotations,
)
def is_pod_running(self, pod):
"""
Check if the given pod is running
pod must be a dictionary representing a Pod kubernetes API object.
"""
# FIXME: Validate if this is really the best way
is_running = (
pod is not None
and pod["status"]["phase"] == 'Running'
and pod["status"]["podIP"] is not None
and "deletionTimestamp" not in pod["metadata"]
and all([cs["ready"] for cs in pod["status"]["containerStatuses"]])
)
return is_running
def pod_has_uid(self, pod):
"""
Check if the given pod exists and has a UID
pod must be a dictionary representing a Pod kubernetes API object.
"""
return bool(
pod and pod.get("metadata") and pod["metadata"].get("uid") is not None
)
def get_state(self):
"""
Save state required to reinstate this user's pod from scratch
We save the `pod_name`, even though we could easily compute it,
because JupyterHub requires you save *some* state! Otherwise
it assumes your server is dead. This works around that.
It's also useful for cases when the `pod_template` changes between
restarts - this keeps the old pods around.
"""
state = super().get_state()
state['pod_name'] = self.pod_name
return state
def get_env(self):
"""Return the environment dict to use for the Spawner.
See also: jupyterhub.Spawner.get_env
"""
env = super(KubeSpawner, self).get_env()
# deprecate image
env['JUPYTER_IMAGE_SPEC'] = self.image
env['JUPYTER_IMAGE'] = self.image
return env
def load_state(self, state):
"""
Load state from storage required to reinstate this user's pod
Since this runs after `__init__`, this will override the generated `pod_name`
if there's one we have saved in state. These are the same in most cases,
but if the `pod_template` has changed in between restarts, it will no longer
be the case. This allows us to continue serving from the old pods with
the old names.
"""
if 'pod_name' in state:
self.pod_name = state['pod_name']
@_await_pod_reflector
async def poll(self):
"""
Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running.
"""
ref_key = "{}/{}".format(self.namespace, self.pod_name)
pod = self.pod_reflector.pods.get(ref_key, None)
if pod is not None:
if pod["status"]["phase"] == 'Pending':
return None
ctr_stat = pod["status"].get("containerStatuses")
if ctr_stat is None: # No status, no container (we hope)
# This seems to happen when a pod is idle-culled.
return 1
for c in ctr_stat:
# return exit code if notebook container has terminated
if c["name"] == 'notebook':
if "terminated" in c["state"]:
# call self.stop to delete the pod
if self.delete_stopped_pods:
await self.stop(now=True)
return c["state"]["terminated"]["exitCode"]
break
# pod running. Check and update server url if it changed!
# only do this if fully running, not just starting up
# and there's a stored url in self.server to check against
if self.is_pod_running(pod) and self.server:
def _normalize_url(url):
"""Normalize url to be comparable
- parse with urlparse
- Ensures port is always defined
"""
url = urlparse(url)
if url.port is None:
if url.scheme.lower() == "https":
url = url._replace(netloc=f"{url.hostname}:443")
elif url.scheme.lower() == "http":
url = url._replace(netloc=f"{url.hostname}:80")
return url
pod_url = _normalize_url(self._get_pod_url(pod))
server_url = _normalize_url(self.server.url)
# netloc: only compare hostname:port, ignore path
if server_url.netloc != pod_url.netloc:
self.log.warning(
f"Pod {ref_key} url changed! {server_url.netloc} -> {pod_url.netloc}"
)
self.server.ip = pod_url.hostname
self.server.port = pod_url.port
self.db.commit()
# None means pod is running or starting up
return None
# pod doesn't exist or has been deleted
return 1
@property
def events(self):
"""Filter event-reflector to just this pods events
Returns list of all events that match our pod_name
since our ._last_event (if defined).
._last_event is set at the beginning of .start().
"""
if not self.event_reflector:
return []
events = []
for event in self.event_reflector.events:
if event["involvedObject"]["name"] != self.pod_name:
# only consider events for my pod name
continue
if self._last_event and event["metadata"]["uid"] == self._last_event:
# saw last_event marker, ignore any previous events
# and only consider future events
# only include events *after* our _last_event marker
events = []
else:
events.append(event)
return events
async def progress(self):
"""
This function is reporting back the progress of spawning a pod until
self._start_future has fired.
This is working with events parsed by the python kubernetes client,
and here is the specification of events that is relevant to understand:
ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#event-v1-core
"""
if not self.events_enabled:
return
self.log.debug('progress generator: %s', self.pod_name)
start_future = self._start_future
progress = 0
next_event = 0
break_while_loop = False
while True:
# This logic avoids a race condition. self._start() will be invoked by
# self.start() and almost directly set self._start_future. But,
# progress() will be invoked via self.start(), so what happen first?
# Due to this, the logic below is to avoid making an assumption that
# self._start_future was set before this function was called.
if start_future is None and self._start_future:
start_future = self._start_future
# Ensure we capture all events by inspecting events a final time
# after the start_future signal has fired, we could have been in
# .sleep() and missed something.
if start_future and start_future.done():
break_while_loop = True
events = self.events
len_events = len(events)
if next_event < len_events:
for i in range(next_event, len_events):
event = events[i]
# move the progress bar.
# Since we don't know how many events we will get,
# asymptotically approach 90% completion with each event.
# each event gets 33% closer to 90%:
# 30 50 63 72 78 82 84 86 87 88 88 89
progress += (90 - progress) / 3
yield {
'progress': int(progress),
'raw_event': event,
'message': "%s [%s] %s"
% (
event["lastTimestamp"] or event["eventTime"],
event["type"],
event["message"],
),
}
next_event = len_events
if break_while_loop:
break
await asyncio.sleep(1)
def _start_reflector(
self,
kind=None,
reflector_class=ResourceReflector,
replace=False,
**kwargs,
):
"""Start a shared reflector on the KubeSpawner class
kind: key for the reflector (e.g. 'pod' or 'events')
reflector_class: Reflector class to be instantiated
kwargs: extra keyword-args to be relayed to ReflectorClass
If replace=False and the pod reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
key = kind
ReflectorClass = reflector_class
def on_reflector_failure():
self.log.critical(
"%s reflector failed, halting Hub.",
key.title(),
)
sys.exit(1)
previous_reflector = self.__class__.reflectors.get(key)
if replace or not previous_reflector:
self.__class__.reflectors[key] = ReflectorClass(
parent=self,
namespace=self.namespace,
on_failure=on_reflector_failure,
**kwargs,
)
asyncio.ensure_future(self.__class__.reflectors[key].start())
if replace and previous_reflector:
# we replaced the reflector, stop the old one
asyncio.ensure_future(previous_reflector.stop())
# return the current reflector
return self.__class__.reflectors[key]
def _start_watching_events(self, replace=False):
"""Start the events reflector
If replace=False and the event reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
return self._start_reflector(
kind="events",
reflector_class=EventReflector,
fields={"involvedObject.kind": "Pod"},
omit_namespace=self.enable_user_namespaces,
replace=replace,
)
def _start_watching_pods(self, replace=False):
"""Start the pod reflector
If replace=False and the pod reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
pod_reflector_class = PodReflector
pod_reflector_class.labels.update({"component": self.component_label})
return self._start_reflector(
"pods",
PodReflector,
omit_namespace=self.enable_user_namespaces,
replace=replace,
)
def start(self):
"""Thin wrapper around self._start
so we can hold onto a reference for the Future
start returns, which we can use to terminate
.progress()
"""
self._start_future = asyncio.ensure_future(self._start())
return self._start_future
_last_event = None
async def _make_create_pod_request(self, pod, request_timeout):
"""
Make an HTTP request to create the given pod
Designed to be used with exponential_backoff, so returns
True / False on success / failure
"""
try:
self.log.info(
f"Attempting to create pod {pod.metadata.name}, with timeout {request_timeout}"
)
await asyncio.wait_for(
self.api.create_namespaced_pod(
self.namespace,
pod,
),
request_timeout,
)
return True
except asyncio.TimeoutError:
# Just try again
return False
except ApiException as e:
pod_name = pod.metadata.name
if e.status != 409:
# We only want to handle 409 conflict errors
self.log.exception("Failed for %s", pod.to_str())
raise
self.log.info(f'Found existing pod {pod_name}, attempting to kill')
# TODO: this should show up in events
await self.stop(True)
self.log.info(
f'Killed pod {pod_name}, will try starting singleuser pod again'
)
# We tell exponential_backoff to retry
return False
async def _make_create_pvc_request(self, pvc, request_timeout):
# Try and create the pvc. If it succeeds we are good. If
# returns a 409 indicating it already exists we are good. If
# it returns a 403, indicating potential quota issue we need
# to see if pvc already exists before we decide to raise the
# error for quota being exceeded. This is because quota is
# checked before determining if the PVC needed to be
# created.
pvc_name = pvc.metadata.name
try:
self.log.info(
f"Attempting to create pvc {pvc.metadata.name}, with timeout {request_timeout}"
)
await asyncio.wait_for(
self.api.create_namespaced_persistent_volume_claim(
namespace=self.namespace,
body=pvc,
),
request_timeout,
)
return True
except asyncio.TimeoutError:
# Just try again
return False
except ApiException as e:
if e.status == 409:
self.log.info(
"PVC " + pvc_name + " already exists, so did not create new pvc."
)
return True
elif e.status == 403:
t, v, tb = sys.exc_info()
try:
await self.api.read_namespaced_persistent_volume_claim(
name=pvc_name,
namespace=self.namespace,
)
except ApiException as e:
raise v.with_traceback(tb)
self.log.info(
"PVC "
+ self.pvc_name
+ " already exists, possibly have reached quota though."
)
return True
else:
raise
async def _ensure_not_exists(self, kind, name):
"""Ensure a resource does not exist
Request deletion and wait for it to be gone
Designed to be used with exponential_backoff, so returns
True when the resource no longer exists, False otherwise
"""
delete = getattr(self.api, "delete_namespaced_{}".format(kind))
read = getattr(self.api, "read_namespaced_{}".format(kind))
# first, attempt to delete the resource
try:
self.log.info(f"Deleting {kind}/{name}")
await asyncio.wait_for(
delete(namespace=self.namespace, name=name),
self.k8s_api_request_timeout,
)
except asyncio.TimeoutError:
# Just try again
return False
except ApiException as e:
if e.status == 404:
self.log.info(f"{kind}/{name} is gone")
# no such resource, delete successful
return True
self.log.exception("Error deleting {kind}/{name}: {e}")
return False
try:
self.log.info(f"Checking for {kind}/{name}")
await asyncio.wait_for(
read(namespace=self.namespace, name=name), self.k8s_api_request_timeout
)
except asyncio.TimeoutError:
# Just try again
return False
except ApiException as e:
if e.status == 404:
self.log.info(f"{kind}/{name} is gone")
return True
self.log.exception("Error reading {kind}/{name}: {e}")
return False
# if we got here, resource still exists, try again
return False
async def _make_create_resource_request(self, kind, manifest):
"""Make an HTTP request to create the given resource
Designed to be used with exponential_backoff, so returns
True / False on success / failure
"""
create = getattr(self.api, f"create_namespaced_{kind}")
self.log.info(f"Attempting to create {kind} {manifest.metadata.name}")
try:
await asyncio.wait_for(
create(self.namespace, manifest), self.k8s_api_request_timeout
)
except asyncio.TimeoutError:
# Just try again
return False
except ApiException as e:
name = manifest.metadata.name
if e.status == 409:
self.log.info(f'Found existing {kind} {name}')
return True
# We only want to handle 409 conflict errors
self.log.exception("Failed to create %s", manifest.to_str())
raise
else:
return True
async def _start(self):
"""Start the user's pod"""
# load user options (including profile)
await self.load_user_options()
# If we have user_namespaces enabled, create the namespace.
# It's fine if it already exists.
if self.enable_user_namespaces:
await self._ensure_namespace()
# record latest event so we don't include old
# events from previous pods in self.events
# track by order and name instead of uid
# so we get events like deletion of a previously stale
# pod if it's part of this spawn process
events = self.events
if events:
self._last_event = events[-1]["metadata"]["uid"]
if self.storage_pvc_ensure:
pvc = self.get_pvc_manifest()
# If there's a timeout, just let it propagate
await exponential_backoff(
partial(
self._make_create_pvc_request, pvc, self.k8s_api_request_timeout
),
f'Could not create PVC {self.pvc_name}',
# Each req should be given k8s_api_request_timeout seconds.
timeout=self.k8s_api_request_retry_timeout,
)
# If we run into a 409 Conflict error, it means a pod with the
# same name already exists. We stop it, wait for it to stop, and
# try again. We try 4 times, and if it still fails we give up.
pod = await self.get_pod_manifest()
if self.modify_pod_hook:
pod = await gen.maybe_future(self.modify_pod_hook(self, pod))
ref_key = "{}/{}".format(self.namespace, self.pod_name)
# If there's a timeout, just let it propagate
await exponential_backoff(
partial(self._make_create_pod_request, pod, self.k8s_api_request_timeout),
f'Could not create pod {ref_key}',
timeout=self.k8s_api_request_retry_timeout,
)
if self.internal_ssl:
try:
# wait for pod to have uid,
# required for creating owner reference
await exponential_backoff(
lambda: self.pod_has_uid(
self.pod_reflector.pods.get(ref_key, None)
),
f"pod/{ref_key} does not have a uid!",
)
pod = self.pod_reflector.pods[ref_key]
owner_reference = make_owner_reference(
self.pod_name, pod["metadata"]["uid"]
)
# internal ssl, create secret object
secret_manifest = self.get_secret_manifest(owner_reference)
await exponential_backoff(
partial(
self._ensure_not_exists, "secret", secret_manifest.metadata.name
),
f"Failed to delete secret {secret_manifest.metadata.name}",
)
await exponential_backoff(
partial(
self._make_create_resource_request, "secret", secret_manifest
),
f"Failed to create secret {secret_manifest.metadata.name}",
)
service_manifest = self.get_service_manifest(owner_reference)
await exponential_backoff(
partial(
self._ensure_not_exists,
"service",
service_manifest.metadata.name,
),
f"Failed to delete service {service_manifest.metadata.name}",
)
await exponential_backoff(
partial(
self._make_create_resource_request, "service", service_manifest
),
f"Failed to create service {service_manifest.metadata.name}",
)
except Exception:
# cleanup on failure and re-raise
await self.stop(True)
raise
# we need a timeout here even though start itself has a timeout
# in order for this coroutine to finish at some point.
# using the same start_timeout here
# essentially ensures that this timeout should never propagate up
# because the handler will have stopped waiting after
# start_timeout, starting from a slightly earlier point.
try:
await exponential_backoff(
lambda: self.is_pod_running(self.pod_reflector.pods.get(ref_key, None)),
'pod %s did not start in %s seconds!' % (ref_key, self.start_timeout),
timeout=self.start_timeout,
)
except TimeoutError:
if ref_key not in self.pod_reflector.pods:
# if pod never showed up at all,
# restart the pod reflector which may have become disconnected.
self.log.error(
"Pod %s never showed up in reflector, restarting pod reflector",
ref_key,
)
self.log.error("Pods: {}".format(self.pod_reflector.pods))
self._start_watching_pods(replace=True)
raise
pod = self.pod_reflector.pods[ref_key]
self.pod_id = pod["metadata"]["uid"]
if self.event_reflector:
self.log.debug(
'pod %s events before launch: %s',
ref_key,
"\n".join(
[
"%s [%s] %s"
% (
event["lastTimestamp"] or event["eventTime"],
event["type"],
event["message"],
)
for event in self.events
]
),
)
return self._get_pod_url(pod)
async def _make_delete_pod_request(
self, pod_name, delete_options, grace_seconds, request_timeout
):
"""
Make an HTTP request to delete the given pod
Designed to be used with exponential_backoff, so returns
True / False on success / failure
"""
ref_key = "{}/{}".format(self.namespace, pod_name)
self.log.info("Deleting pod %s", ref_key)
try:
await asyncio.wait_for(
self.api.delete_namespaced_pod(
name=pod_name,
namespace=self.namespace,
body=delete_options,
grace_period_seconds=grace_seconds,
),
request_timeout,
)
return True
except asyncio.TimeoutError:
return False
except ApiException as e:
if e.status == 404:
self.log.warning(
"No pod %s to delete. Assuming already deleted.",
ref_key,
)
# If there isn't already a pod, that's ok too!
return True
else:
raise
async def _make_delete_pvc_request(self, pvc_name, request_timeout):
"""
Make an HTTP request to delete the given PVC
Designed to be used with exponential_backoff, so returns
True / False on success / failure
"""
self.log.info("Deleting pvc %s", pvc_name)
try:
await asyncio.wait_for(
self.api.delete_namespaced_persistent_volume_claim(
name=pvc_name,
namespace=self.namespace,
),
request_timeout,
)
return True
except asyncio.TimeoutError:
return False
except ApiException as e:
if e.status == 404:
self.log.warning(
"No pvc %s to delete. Assuming already deleted.",
pvc_name,
)
# If there isn't a PVC to delete, that's ok too!
return True
else:
raise
@_await_pod_reflector
async def stop(self, now=False):
delete_options = client.V1DeleteOptions()
if now:
grace_seconds = 0
else:
grace_seconds = self.delete_grace_period
delete_options.grace_period_seconds = grace_seconds
ref_key = "{}/{}".format(self.namespace, self.pod_name)
await exponential_backoff(
partial(
self._make_delete_pod_request,
self.pod_name,
delete_options,
grace_seconds,
self.k8s_api_request_timeout,
),
f'Could not delete pod {ref_key}',
timeout=self.k8s_api_request_retry_timeout,
)
try:
await exponential_backoff(
lambda: self.pod_reflector.pods.get(ref_key, None) is None,
'pod %s did not disappear in %s seconds!'
% (ref_key, self.start_timeout),
timeout=self.start_timeout,
)
except TimeoutError:
self.log.error(
"Pod %s did not disappear, restarting pod reflector", ref_key
)
self._start_watching_pods(replace=True)
raise
@default('env_keep')
def _env_keep_default(self):
return []
_profile_list = None
def _render_options_form(self, profile_list):
self._profile_list = self._init_profile_list(profile_list)
profile_form_template = Environment(loader=BaseLoader).from_string(
self.profile_form_template
)
return profile_form_template.render(profile_list=self._profile_list)
async def _render_options_form_dynamically(self, current_spawner):
profile_list = await gen.maybe_future(self.profile_list(current_spawner))
profile_list = self._init_profile_list(profile_list)
return self._render_options_form(profile_list)
@default('options_form')
def _options_form_default(self):
"""
Build the form template according to the `profile_list` setting.
Returns:
'' when no `profile_list` has been defined
The rendered template (using jinja2) when `profile_list` is defined.
"""
if not self.profile_list:
return ''
if callable(self.profile_list):
return self._render_options_form_dynamically
else:
return self._render_options_form(self.profile_list)
@default('options_from_form')
def _options_from_form_default(self):
return self._options_from_form
def _options_from_form(self, formdata):
"""get the option selected by the user on the form
This only constructs the user_options dict,
it should not actually load any options.
That is done later in `.load_user_options()`
Args:
formdata: user selection returned by the form
To access to the value, you can use the `get` accessor and the name of the html element,
for example::
formdata.get('profile',[0])
to get the value of the form named "profile", as defined in `form_template`::
<select class="form-control" name="profile"...>
</select>
Returns:
user_options (dict): the selected profile in the user_options form,
e.g. ``{"profile": "cpus-8"}``
"""
return {'profile': formdata.get('profile', [None])[0]}
async def _load_profile(self, slug):
"""Load a profile by name
Called by load_user_options
"""
# find the profile
default_profile = self._profile_list[0]
for profile in self._profile_list:
if profile.get('default', False):
# explicit default, not the first
default_profile = profile
if profile['slug'] == slug:
break
else:
if slug:
# name specified, but not found
raise ValueError(
"No such profile: %s. Options include: %s"
% (slug, ', '.join(p['slug'] for p in self._profile_list))
)
else:
# no name specified, use the default
profile = default_profile
self.log.debug(
"Applying KubeSpawner override for profile '%s'", profile['display_name']
)
kubespawner_override = profile.get('kubespawner_override', {})
for k, v in kubespawner_override.items():
if callable(v):
v = v(self)
self.log.debug(
".. overriding KubeSpawner value %s=%s (callable result)", k, v
)
else:
self.log.debug(".. overriding KubeSpawner value %s=%s", k, v)
setattr(self, k, v)
# set of recognised user option keys
# used for warning about ignoring unrecognised options
_user_option_keys = {
'profile',
}
def _init_profile_list(self, profile_list):
# generate missing slug fields from display_name
for profile in profile_list:
if 'slug' not in profile:
profile['slug'] = slugify(profile['display_name'])
return profile_list
async def load_user_options(self):
"""Load user options from self.user_options dict
This can be set via POST to the API or via options_from_form
Only supported argument by default is 'profile'.
Override in subclasses to support other options.
"""
if self._profile_list is None:
if callable(self.profile_list):
profile_list = await gen.maybe_future(self.profile_list(self))
else:
profile_list = self.profile_list
self._profile_list = self._init_profile_list(profile_list)
selected_profile = self.user_options.get('profile', None)
if self._profile_list:
await self._load_profile(selected_profile)
elif selected_profile:
self.log.warning(
"Profile %r requested, but profiles are not enabled", selected_profile
)
# help debugging by logging any option fields that are not recognized
option_keys = set(self.user_options)
unrecognized_keys = option_keys.difference(self._user_option_keys)
if unrecognized_keys:
self.log.warning(
"Ignoring unrecognized KubeSpawner user_options: %s",
", ".join(map(str, sorted(unrecognized_keys))),
)
async def _ensure_namespace(self):
ns = make_namespace(self.namespace)
api = self.api
try:
await asyncio.wait_for(
api.create_namespace(ns),
self.k8s_api_request_timeout,
)
except ApiException as e:
if e.status != 409:
# It's fine if it already exists
self.log.exception("Failed to create namespace %s", self.namespace)
raise
async def delete_forever(self):
"""Called when a user is deleted.
This can do things like request removal of resources such as persistent storage.
Only called on stopped spawners, and is likely the last action ever taken for the user.
Called on each spawner after deletion,
i.e. on named server deletion (not just stop),
and on the default Spawner when the user is being deleted.
Requires JupyterHub 1.4.1+
.. versionadded: 0.17
"""
log_name = self.user.name
if self.name:
log_name = f"{log_name}/{self.name}"
if not self.delete_pvc:
self.log.info(f"Not deleting pvc for {log_name}: {self.pvc_name}")
return
if self.name and '{servername}' not in self.pvc_name_template:
# named server has the same PVC as the default server
# don't delete the default server's PVC!
self.log.info(
f"Not deleting shared pvc for named server {log_name}: {self.pvc_name}"
)
return
await exponential_backoff(
partial(
self._make_delete_pvc_request,
self.pvc_name,
self.k8s_api_request_timeout,
),
f'Could not delete pvc {self.pvc_name}',
timeout=self.k8s_api_request_retry_timeout,
)
| jupyterhub/kubespawner | kubespawner/spawner.py | Python | bsd-3-clause | 109,371 | [
"VisIt"
] | 8e8db3c6563e59c514ccc77f0b9c95bc2914412526cbd218ec94c5389396d105 |
# -*- coding: utf-8 -*-
"""
Unitary Event (UE) analysis is a statistical method that
enables to analyze in a time resolved manner excess spike correlation
between simultaneously recorded neurons by comparing the empirical
spike coincidences (precision of a few ms) to the expected number
based on the firing rates of the neurons.
References:
- Gruen, Diesmann, Grammont, Riehle, Aertsen (1999) J Neurosci Methods,
94(1): 67-79.
- Gruen, Diesmann, Aertsen (2002a,b) Neural Comput, 14(1): 43-80; 81-19.
- Gruen S, Riehle A, and Diesmann M (2003) Effect of cross-trial
nonstationarity on joint-spike events Biological Cybernetics 88(5):335-351.
- Gruen S (2009) Data-driven significance estimation of precise spike
correlation. J Neurophysiology 101:1126-1140 (invited review)
:copyright: Copyright 2015-2016 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
import numpy as np
import quantities as pq
import neo
import warnings
import elephant.conversion as conv
import scipy
def hash_from_pattern(m, N, base=2):
"""
Calculate for a spike pattern or a matrix of spike patterns
(provide each pattern as a column) composed of N neurons a
unique number.
Parameters:
-----------
m: 2-dim ndarray
spike patterns represented as a binary matrix (i.e., matrix of 0's and 1's).
Rows and columns correspond to patterns and neurons, respectively.
N: integer
number of neurons is required to be equal to the number
of rows
base: integer
base for calculation of hash values from binary
sequences (= pattern).
Default is 2
Returns:
--------
list of integers:
An array containing the hash values of each pattern,
shape: (number of patterns)
Raises:
-------
ValueError: if matrix m has wrong orientation
Examples:
---------
descriptive example:
m = [0
1
1]
N = 3
base = 2
hash = 0*2^2 + 1*2^1 + 1*2^0 = 3
second example:
>>> import numpy as np
>>> m = np.array([[0, 1, 0, 0, 1, 1, 0, 1],
[0, 0, 1, 0, 1, 0, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]])
>>> hash_from_pattern(m,N=3)
array([0, 4, 2, 1, 6, 5, 3, 7])
"""
# check the consistency between shape of m and number neurons N
if N != np.shape(m)[0]:
raise ValueError('patterns in the matrix should be column entries')
# check the entries of the matrix
if not np.all((np.array(m) == 0) + (np.array(m) == 1)):
raise ValueError('patterns should be zero or one')
# generate the representation
v = np.array([base**x for x in range(N)])
# reverse the order
v = v[np.argsort(-v)]
# calculate the binary number by use of scalar product
return np.dot(v, m)
def inverse_hash_from_pattern(h, N, base=2):
"""
Calculate the 0-1 spike patterns (matrix) from hash values
Parameters:
-----------
h: list of integers
list or array of hash values, length: number of patterns
N: integer
number of neurons
base: integer
base for calculation of the number from binary
sequences (= pattern).
Default is 2
Raises:
-------
ValueError: if the hash is not compatible with the number
of neurons hash value should not be larger than the biggest
possible hash number with given number of neurons
(e.g. for N = 2, max(hash) = 2^1 + 2^0 = 3
, or for N = 4, max(hash) = 2^3 + 2^2 + 2^1 + 2^0 = 15)
Returns:
--------
numpy.array:
A matrix of shape: (N, number of patterns)
Examples
---------
>>> import numpy as np
>>> h = np.array([3,7])
>>> N = 4
>>> inverse_hash_from_pattern(h,N)
array([[1, 1],
[1, 1],
[0, 1],
[0, 0]])
"""
# check if the hash values are not greater than the greatest possible
# value for N neurons with the given base
if np.any(h > np.sum([base**x for x in range(N)])):
raise ValueError(
"hash value is not compatible with the number of neurons N")
# check if the hash values are integer
if not np.all(np.int64(h) == h):
raise ValueError("hash values are not integers")
m = np.zeros((N, len(h)), dtype=int)
for j, hh in enumerate(h):
i = N - 1
while i >= 0 and hh != 0:
m[i, j] = hh % base
hh /= base
i -= 1
return m
def n_emp_mat(mat, N, pattern_hash, base=2):
"""
Count the occurrences of spike coincidence patterns
in the given spike trains.
Parameters:
-----------
mat: 2-dim ndarray
binned spike trains of N neurons. Rows and columns correspond
to neurons and temporal bins, respectively.
N: integer
number of neurons
pattern_hash: list of integers
hash values representing the spike coincidence patterns
of which occurrences are counted.
base: integer
Base which was used to generate the hash values.
Default is 2
Returns:
--------
N_emp: list of integers
number of occurrences of the given patterns in the given spike trains
indices: list of lists of integers
indices indexing the bins where the given spike patterns are found
in `mat`. Same length as `pattern_hash`
indices[i] = N_emp[i] = pattern_hash[i]
Raises:
-------
ValueError: if mat is not zero-one matrix
Examples:
---------
>>> mat = np.array([[1, 0, 0, 1, 1],
[1, 0, 0, 1, 0]])
>>> pattern_hash = np.array([1,3])
>>> n_emp, n_emp_indices = N_emp_mat(mat, N,pattern_hash)
>>> print n_emp
[ 0. 2.]
>>> print n_emp_indices
[array([]), array([0, 3])]
"""
# check if the mat is zero-one matrix
if not np.all((np.array(mat) == 0) + (np.array(mat) == 1)):
raise ValueError("entries of mat should be either one or zero")
h = hash_from_pattern(mat, N, base=base)
N_emp = np.zeros(len(pattern_hash))
indices = []
for idx_ph, ph in enumerate(pattern_hash):
indices_tmp = np.where(h == ph)[0]
indices.append(indices_tmp)
N_emp[idx_ph] = len(indices_tmp)
return N_emp, indices
def n_emp_mat_sum_trial(mat, N, pattern_hash):
"""
Calculates empirical number of observed patterns summed across trials
Parameters:
-----------
mat: 3d numpy array or elephant BinnedSpikeTrain object
Binned spike trains represented as a binary matrix (i.e., matrix of 0's and 1's),
segmented into trials. Trials should contain an identical number of neurons and
an identical number of time bins.
the entries are zero or one
0-axis --> trials
1-axis --> neurons
2-axis --> time bins
N: integer
number of neurons
pattern_hash: list of integers
array of hash values, length: number of patterns
Returns:
--------
N_emp: list of integers
numbers of occurences of the given spike patterns in the given spike trains,
summed across trials. Same length as `pattern_hash`.
idx_trials: list of lists of integers
list of indices of mat for each trial in which
the specific pattern has been observed.
0-axis --> trial
1-axis --> list of indices for the chosen trial per
entry of `pattern_hash`
Raises:
-------
ValueError: if matrix mat has wrong orientation
ValueError: if mat is not zero-one matrix
Examples:
---------
>>> mat = np.array([[[1, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 0, 1]],
[[1, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[1, 1, 0, 1, 0]]])
>>> pattern_hash = np.array([4,6])
>>> N = 3
>>> n_emp_sum_trial, n_emp_sum_trial_idx =
n_emp_mat_sum_trial(mat, N,pattern_hash)
>>> n_emp_sum_trial
array([ 1., 3.])
>>> n_emp_sum_trial_idx
[[array([0]), array([3])], [array([], dtype=int64), array([2, 4])]]
"""
# check the consistency between shape of m and number neurons N
if N != np.shape(mat)[1]:
raise ValueError('the entries of mat should be a list of a'
'list where 0-axis is trials and 1-axis is neurons')
num_patt = len(pattern_hash)
N_emp = np.zeros(num_patt)
idx_trials = []
# check if the mat is zero-one matrix
if not np.all((np.array(mat) == 0) + (np.array(mat) == 1)):
raise ValueError("entries of mat should be either one or zero")
for mat_tr in mat:
N_emp_tmp, indices_tmp = n_emp_mat(mat_tr, N, pattern_hash, base=2)
idx_trials.append(indices_tmp)
N_emp += N_emp_tmp
return N_emp, idx_trials
def _n_exp_mat_analytic(mat, N, pattern_hash):
"""
Calculates the expected joint probability for each spike pattern analyticaly
"""
marg_prob = np.mean(mat, 1, dtype=float)
# marg_prob needs to be a column vector, so we
# build a two dimensional array with 1 column
# and len(marg_prob) rows
marg_prob = np.reshape(marg_prob, (len(marg_prob), 1))
m = inverse_hash_from_pattern(pattern_hash, N)
nrep = np.shape(m)[1]
# multipyling the marginal probability of neurons with regard to the
# pattern
pmat = np.multiply(m, np.tile(marg_prob, (1, nrep))) +\
np.multiply(1 - m, np.tile(1 - marg_prob, (1, nrep)))
return np.prod(pmat, axis=0) * float(np.shape(mat)[1])
def _n_exp_mat_surrogate(mat, N, pattern_hash, n_surr=1):
"""
Calculates the expected joint probability for each spike pattern with spike
time randomization surrogate
"""
if len(pattern_hash) > 1:
raise ValueError('surrogate method works only for one pattern!')
N_exp_array = np.zeros(n_surr)
for rz_idx, rz in enumerate(np.arange(n_surr)):
# shuffling all elements of zero-one matrix
mat_surr = np.array(mat)
[np.random.shuffle(i) for i in mat_surr]
N_exp_array[rz_idx] = n_emp_mat(mat_surr, N, pattern_hash)[0][0]
return N_exp_array
def n_exp_mat(mat, N, pattern_hash, method='analytic', n_surr=1):
"""
Calculates the expected joint probability for each spike pattern
Parameters:
-----------
mat: 2d numpy array
the entries are zero or one
0-axis --> neurons
1-axis --> time bins
pattern_hash: list of integers
array of hash values, length: number of patterns
method: string
method with which the expectency should be caculated
'analytic' -- > analytically
'surr' -- > with surrogates (spike time randomization)
Default is 'analytic'
n_surr: integer
number of surrogates for constructing the distribution of expected joint probability.
Default is 1 and this number is needed only when method = 'surr'
kwargs:
-------
Raises:
-------
ValueError: if matrix m has wrong orientation
Returns:
--------
if method is analytic:
numpy.array:
An array containing the expected joint probability of each pattern,
shape: (number of patterns,)
if method is surr:
numpy.ndarray, 0-axis --> different realizations,
length = number of surrogates
1-axis --> patterns
Examples:
---------
>>> mat = np.array([[1, 1, 1, 1],
[0, 1, 0, 1],
[0, 0, 1, 0]])
>>> pattern_hash = np.array([5,6])
>>> N = 3
>>> n_exp_anal = n_exp_mat(mat,N, pattern_hash, method = 'analytic')
>>> n_exp_anal
[ 0.5 1.5 ]
>>>
>>>
>>> n_exp_surr = n_exp_mat(
mat, N,pattern_hash, method = 'surr', n_surr = 5000)
>>> print n_exp_surr
[[ 1. 1.]
[ 2. 0.]
[ 2. 0.]
...,
[ 2. 0.]
[ 2. 0.]
[ 1. 1.]]
"""
# check if the mat is zero-one matrix
if np.any(mat > 1) or np.any(mat < 0):
raise ValueError("entries of mat should be either one or zero")
if method == 'analytic':
return _n_exp_mat_analytic(mat, N, pattern_hash)
if method == 'surr':
return _n_exp_mat_surrogate(mat, N, pattern_hash, n_surr)
def n_exp_mat_sum_trial(
mat, N, pattern_hash, method='analytic_TrialByTrial', **kwargs):
"""
Calculates the expected joint probability
for each spike pattern sum over trials
Parameters:
-----------
mat: 3d numpy array or elephant BinnedSpikeTrain object
Binned spike trains represented as a binary matrix (i.e., matrix of 0's and 1's),
segmented into trials. Trials should contain an identical number of neurons and
an identical number of time bins.
the entries are zero or one
0-axis --> trials
1-axis --> neurons
2-axis --> time bins
N: integer
number of neurons
pattern_hash: list of integers
array of hash values, length: number of patterns
method: string
method with which the unitary events whould be computed
'analytic_TrialByTrial' -- > calculate the expectency
(analytically) on each trial, then sum over all trials.
'analytic_TrialAverage' -- > calculate the expectency
by averaging over trials.
(cf. Gruen et al. 2003)
'surrogate_TrialByTrial' -- > calculate the distribution
of expected coincidences by spike time randomzation in
each trial and sum over trials.
Default is 'analytic_trialByTrial'
kwargs:
-------
n_surr: integer
number of surrogate to be used
Default is 1
Returns:
--------
numpy.array:
An array containing the expected joint probability of
each pattern summed over trials,shape: (number of patterns,)
Examples:
--------
>>> mat = np.array([[[1, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 0, 1]],
[[1, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[1, 1, 0, 1, 0]]])
>>> pattern_hash = np.array([5,6])
>>> N = 3
>>> n_exp_anal = n_exp_mat_sum_trial(mat, N, pattern_hash)
>>> print n_exp_anal
array([ 1.56, 2.56])
"""
# check the consistency between shape of m and number neurons N
if N != np.shape(mat)[1]:
raise ValueError('the entries of mat should be a list of a'
'list where 0-axis is trials and 1-axis is neurons')
if method == 'analytic_TrialByTrial':
n_exp = np.zeros(len(pattern_hash))
for mat_tr in mat:
n_exp += n_exp_mat(mat_tr, N, pattern_hash, method='analytic')
elif method == 'analytic_TrialAverage':
n_exp = n_exp_mat(
np.mean(mat, 0), N, pattern_hash, method='analytic') * np.shape(mat)[0]
elif method == 'surrogate_TrialByTrial':
if 'n_surr' in kwargs:
n_surr = kwargs['n_surr']
else:
n_surr = 1.
n_exp = np.zeros(n_surr)
for mat_tr in mat:
n_exp += n_exp_mat(mat_tr, N, pattern_hash,
method='surr', n_surr=n_surr)
else:
raise ValueError(
"The method only works on the zero_one matrix at the moment")
return n_exp
def gen_pval_anal(
mat, N, pattern_hash, method='analytic_TrialByTrial', **kwargs):
"""
computes the expected coincidences and a function to calculate
p-value for given empirical coincidences
this function generate a poisson distribution with the expected
value calculated by mat. it returns a function which gets
the empirical coincidences, `n_emp`, and calculates a p-value
as the area under the poisson distribution from `n_emp` to infinity
Parameters:
-----------
mat: 3d numpy array or elephant BinnedSpikeTrain object
Binned spike trains represented as a binary matrix (i.e., matrix of 0's and 1's),
segmented into trials. Trials should contain an identical number of neurons and
an identical number of time bins.
the entries are zero or one
0-axis --> trials
1-axis --> neurons
2-axis --> time bins
N: integer
number of neurons
pattern_hash: list of integers
array of hash values, length: number of patterns
method: string
method with which the unitary events whould be computed
'analytic_TrialByTrial' -- > calculate the expectency
(analytically) on each trial, then sum over all trials.
''analytic_TrialAverage' -- > calculate the expectency
by averaging over trials.
Default is 'analytic_trialByTrial'
(cf. Gruen et al. 2003)
kwargs:
-------
n_surr: integer
number of surrogate to be used
Default is 1
Returns:
--------
pval_anal:
a function which calculates the p-value for
the given empirical coincidences
n_exp: list of floats
expected coincidences
Examples:
--------
>>> mat = np.array([[[1, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 0, 1]],
[[1, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[1, 1, 0, 1, 0]]])
>>> pattern_hash = np.array([5,6])
>>> N = 3
>>> pval_anal,n_exp = gen_pval_anal(mat, N,pattern_hash)
>>> n_exp
array([ 1.56, 2.56])
"""
if method == 'analytic_TrialByTrial' or method == 'analytic_TrialAverage':
n_exp = n_exp_mat_sum_trial(mat, N, pattern_hash, method=method)
def pval(n_emp):
p = 1. - scipy.special.gammaincc(n_emp, n_exp)
return p
elif method == 'surrogate_TrialByTrial':
if 'n_surr' in kwargs:
n_surr = kwargs['n_surr']
else:
n_surr = 1.
n_exp = n_exp_mat_sum_trial(
mat, N, pattern_hash, method=method, n_surr=n_surr)
def pval(n_emp):
hist = np.bincount(np.int64(n_exp))
exp_dist = hist / float(np.sum(hist))
if len(n_emp) > 1:
raise ValueError(
'in surrogate method the p_value can be calculated only for one pattern!')
return np.sum(exp_dist[int(n_emp[0]):])
return pval, n_exp
def jointJ(p_val):
"""Surprise measurement
logarithmic transformation of joint-p-value into surprise measure
for better visualization as the highly significant events are
indicated by very low joint-p-values
Parameters:
-----------
p_val: list of floats
p-values of statistical tests for different pattern.
Returns:
--------
J: list of floats
list of surprise measure
Examples:
---------
>>> p_val = np.array([0.31271072, 0.01175031])
>>> jointJ(p_val)
array([0.3419968 , 1.92481736])
"""
p_arr = np.array(p_val)
try:
Js = np.log10(1 - p_arr) - np.log10(p_arr)
except RuntimeWarning:
pass
return Js
def _rate_mat_avg_trial(mat):
"""
calculates the average firing rate of each neurons across trials
"""
num_tr, N, nbins = np.shape(mat)
psth = np.zeros(N)
for tr, mat_tr in enumerate(mat):
psth += np.sum(mat_tr, axis=1)
return psth / float(nbins) / float(num_tr)
def _bintime(t, binsize):
"""
change the real time to bintime
"""
t_dl = t.rescale('ms').magnitude
binsize_dl = binsize.rescale('ms').magnitude
return np.floor(np.array(t_dl) / binsize_dl).astype(int)
def _winpos(t_start, t_stop, winsize, winstep, position='left-edge'):
"""
Calculates the position of the analysis window
"""
t_start_dl = t_start.rescale('ms').magnitude
t_stop_dl = t_stop.rescale('ms').magnitude
winsize_dl = winsize.rescale('ms').magnitude
winstep_dl = winstep.rescale('ms').magnitude
# left side of the window time
if position == 'left-edge':
ts_winpos = np.arange(
t_start_dl, t_stop_dl - winsize_dl + winstep_dl, winstep_dl) * pq.ms
else:
raise ValueError(
'the current version only returns left-edge of the window')
return ts_winpos
def _UE(mat, N, pattern_hash, method='analytic_TrialByTrial', **kwargs):
"""
returns the default results of unitary events analysis
(Surprise, empirical coincidences and index of where it happened
in the given mat, n_exp and average rate of neurons)
"""
rate_avg = _rate_mat_avg_trial(mat)
n_emp, indices = n_emp_mat_sum_trial(mat, N, pattern_hash)
if method == 'surrogate_TrialByTrial':
if 'n_surr' in kwargs:
n_surr = kwargs['n_surr']
else:
n_surr = 1
dist_exp, n_exp = gen_pval_anal(
mat, N, pattern_hash, method, n_surr=n_surr)
n_exp = np.mean(n_exp)
elif method == 'analytic_TrialByTrial' or method == 'analytic_TrialAverage':
dist_exp, n_exp = gen_pval_anal(mat, N, pattern_hash, method)
pval = dist_exp(n_emp)
Js = jointJ(pval)
return Js, rate_avg, n_exp, n_emp, indices
def jointJ_window_analysis(
data, binsize, winsize, winstep, pattern_hash,
method='analytic_TrialByTrial', t_start=None,
t_stop=None, binary=True, **kwargs):
"""
Calculates the joint surprise in a sliding window fashion
Parameters:
----------
data: list of neo.SpikeTrain objects
list of spike trains in different trials
0-axis --> Trials
1-axis --> Neurons
2-axis --> Spike times
binsize: Quantity scalar with dimension time
size of bins for descritizing spike trains
winsize: Quantity scalar with dimension time
size of the window of analysis
winstep: Quantity scalar with dimension time
size of the window step
pattern_hash: list of integers
list of interested patterns in hash values
(see hash_from_pattern and inverse_hash_from_pattern functions)
method: string
method with which the unitary events whould be computed
'analytic_TrialByTrial' -- > calculate the expectency
(analytically) on each trial, then sum over all trials.
'analytic_TrialAverage' -- > calculate the expectency
by averaging over trials.
(cf. Gruen et al. 2003)
'surrogate_TrialByTrial' -- > calculate the distribution
of expected coincidences by spike time randomzation in
each trial and sum over trials.
Default is 'analytic_trialByTrial'
t_start: float or Quantity scalar, optional
The start time to use for the time points.
If not specified, retrieved from the `t_start`
attribute of `spiketrain`.
t_stop: float or Quantity scalar, optional
The start time to use for the time points.
If not specified, retrieved from the `t_stop`
attribute of `spiketrain`.
kwargs:
-------
n_surr: integer
number of surrogate to be used
Default is 100
Returns:
-------
result: dictionary
Js: list of float
JointSurprise of different given patterns within each window
shape: different pattern hash --> 0-axis
different window --> 1-axis
indices: list of list of integers
list of indices of pattern within each window
shape: different pattern hash --> 0-axis
different window --> 1-axis
n_emp: list of integers
empirical number of each observed pattern.
shape: different pattern hash --> 0-axis
different window --> 1-axis
n_exp: list of floats
expeced number of each pattern.
shape: different pattern hash --> 0-axis
different window --> 1-axis
rate_avg: list of floats
average firing rate of each neuron
shape: different pattern hash --> 0-axis
different window --> 1-axis
"""
if not isinstance(data[0][0], neo.SpikeTrain):
raise ValueError(
"structure of the data is not correct: 0-axis should be trials, 1-axis units and 2-axis neo spike trains")
if t_start is None:
t_start = data[0][0].t_start.rescale('ms')
if t_stop is None:
t_stop = data[0][0].t_stop.rescale('ms')
# position of all windows (left edges)
t_winpos = _winpos(t_start, t_stop, winsize, winstep, position='left-edge')
t_winpos_bintime = _bintime(t_winpos, binsize)
winsize_bintime = _bintime(winsize, binsize)
winstep_bintime = _bintime(winstep, binsize)
if winsize_bintime * binsize != winsize:
warnings.warn(
"ratio between winsize and binsize is not integer -- "
"the actual number for window size is " + str(winsize_bintime * binsize))
if winstep_bintime * binsize != winstep:
warnings.warn(
"ratio between winsize and binsize is not integer -- "
"the actual number for window size is" + str(winstep_bintime * binsize))
num_tr, N = np.shape(data)[:2]
n_bins = int((t_stop - t_start) / binsize)
mat_tr_unit_spt = np.zeros((len(data), N, n_bins))
for tr, sts in enumerate(data):
bs = conv.BinnedSpikeTrain(
sts, t_start=t_start, t_stop=t_stop, binsize=binsize)
if binary is True:
mat = bs.to_bool_array()
else:
raise ValueError(
"The method only works on the zero_one matrix at the moment")
mat_tr_unit_spt[tr] = mat
num_win = len(t_winpos)
Js_win, n_exp_win, n_emp_win = (np.zeros(num_win) for _ in range(3))
rate_avg = np.zeros((num_win, N))
indices_win = {}
for i in range(num_tr):
indices_win['trial' + str(i)] = []
for i, win_pos in enumerate(t_winpos_bintime):
mat_win = mat_tr_unit_spt[:, :, win_pos:win_pos + winsize_bintime]
if method == 'surrogate_TrialByTrial':
if 'n_surr' in kwargs:
n_surr = kwargs['n_surr']
else:
n_surr = 100
Js_win[i], rate_avg[i], n_exp_win[i], n_emp_win[i], indices_lst = _UE(
mat_win, N, pattern_hash, method, n_surr=n_surr)
else:
Js_win[i], rate_avg[i], n_exp_win[i], n_emp_win[
i], indices_lst = _UE(mat_win, N, pattern_hash, method)
for j in range(num_tr):
if len(indices_lst[j][0]) > 0:
indices_win[
'trial' + str(j)] = np.append(indices_win['trial' + str(j)], indices_lst[j][0] + win_pos)
return {'Js': Js_win, 'indices': indices_win, 'n_emp': n_emp_win, 'n_exp': n_exp_win, 'rate_avg': rate_avg / binsize}
| pietroquaglio/elephant | elephant/unitary_event_analysis.py | Python | bsd-3-clause | 27,650 | [
"NEURON"
] | f0b9e4df0ac9e044386c539826ed5a8773b727292b0b7784c32f382e17f2d803 |
import threading
#Because eval(valenc) might require it
import datetime
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities import DEncode, List
from DIRAC.Core.Base.ExecutorModule import ExecutorModule
from DIRAC.WorkloadManagementSystem.Client.JobState.CachedJobState import CachedJobState
class OptimizerExecutor( ExecutorModule ):
class JobLog:
class LogWrap:
def __init__( self, log, jid, funcName ):
self.__log = log
self.__jid = jid
self.__funcName = funcName
def __call__( self, msg, varMsg = "" ):
try:
funcObj = getattr( self.__log, self.__funcName )
except AttributeError:
raise AttributeError( "Logger does not have %s method" % self.__funcName )
msg = "\n".join( "[JID %s] %s" % ( self.__jid, line ) for line in msg.split( "\n" ) )
funcObj( msg, varMsg )
def __init__( self, log, jid ):
self.__jid = jid
self.__log = log
def __nonzero__( self ):
return True
def __getattr__( self, name ):
return self.LogWrap( self.__log, self.__jid, name )
@classmethod
def initialize( cls ):
opName = cls.ex_getProperty( 'fullName' )
opName = "/".join( opName.split( "/" )[1:] )
if opName.find( "Agent" ) == len( opName ) - 5:
opName = opName[ :-5]
cls.__optimizerName = opName
maxTasks = cls.ex_getOption( 'Tasks', 1 )
cls.__jobData = threading.local()
cls.__jobData.jobState = None
cls.__jobData.jobLog = None
cls.ex_setProperty( 'optimizerName', cls.__optimizerName )
try:
result = cls.initializeOptimizer()
if not result[ 'OK' ]:
return result
except Exception, excp:
cls.log.exception( "Error while initializing optimizer" )
return S_ERROR( "Error initializing: %s" % str( excp ) )
cls.ex_setMind( "WorkloadManagement/OptimizationMind" )
return S_OK()
@classmethod
def ex_optimizerName( cls ):
return cls.__optimizerName
@classmethod
def initializeOptimizer( cls ):
return S_OK()
def processTask( self, jid, jobState ):
self.__jobData.jobState = jobState
self.__jobData.jobLog = self.JobLog( self.log, jid )
try:
self.jobLog.info( "Processing" )
optResult = self.optimizeJob( jid, jobState )
#If the manifest is dirty, update it!
result = jobState.getManifest()
if not result[ 'OK' ]:
return result
manifest = result[ 'Value' ]
if manifest.isDirty():
jobState.setManifest( manifest )
#Did it go as expected? If not Failed!
if not optResult[ 'OK' ]:
self.jobLog.info( "Set to Failed/%s" % optResult[ 'Message' ] )
minorStatus = "%s optimizer" % self.ex_optimizerName()
return jobState.setStatus( "Failed", minorStatus, optResult[ 'Message' ], source = self.ex_optimizerName() )
return S_OK()
finally:
self.__jobData.jobState = None
self.__jobData.jobLog = None
def optimizeJob( self, jid, jobState ):
raise Exception( "You need to overwrite this method to optimize the job!" )
def setNextOptimizer( self, jobState = None ):
if not jobState:
jobState = self.__jobData.jobState
result = jobState.getOptParameter( 'OptimizerChain' )
if not result['OK']:
return result
opChain = List.fromChar( result[ 'Value' ], "," )
opName = self.__optimizerName
try:
opIndex = opChain.index( opName )
except ValueError:
return S_ERROR( "Optimizer %s is not in the chain!" % opName )
chainLength = len( opChain )
if chainLength - 1 == opIndex:
#This is the last optimizer in the chain!
result = jobState.setStatus( self.ex_getOption( 'WaitingStatus', 'Waiting' ),
minorStatus = self.ex_getOption( 'WaitingMinorStatus', 'Pilot Agent Submission' ),
appStatus = "Unknown",
source = opName )
if not result[ 'OK' ]:
return result
result = jobState.insertIntoTQ()
if not result[ 'OK' ]:
return result
return S_OK()
#Keep optimizing!
nextOp = opChain[ opIndex + 1 ]
self.jobLog.info( "Set to Checking/%s" % nextOp )
return jobState.setStatus( "Checking", nextOp, source = opName )
def storeOptimizerParam( self, name, value ):
if not self.__jobData.jobState:
return S_ERROR( "This function can only be called inside the optimizeJob function" )
valenc = DEncode.encode( value )
return self.__jobData.jobState.setOptParameter( name, valenc )
def retrieveOptimizerParam( self, name ):
if not self.__jobData.jobState:
return S_ERROR( "This function can only be called inside the optimizeJob function" )
result = self.__jobData.jobState.getOptParameter( name )
if not result[ 'OK' ]:
return result
valenc = result[ 'Value' ]
try:
value, encLength = DEncode.decode( valenc )
if encLength == len( valenc ):
return S_OK( value )
except Exception:
self.jobLog.warn( "Opt param %s doesn't seem to be dencoded %s" % ( name, valenc ) )
return S_OK( eval( valenc ) )
@property
def jobLog( self ):
if not self.__jobData.jobLog:
raise RuntimeError( "jobLog can only be invoked inside the optimizeJob function" )
return self.__jobData.jobLog
def deserializeTask( self, taskStub ):
return CachedJobState.deserialize( taskStub )
def serializeTask( self, cjs ):
return S_OK( cjs.serialize() )
def fastTrackDispatch( self, jid, jobState ):
result = jobState.getStatus()
if not result[ 'OK' ]:
return S_ERROR( "Could not retrieve job status for %s: %s" % ( jid, result[ 'Message' ] ) )
status, minorStatus = result[ 'Value' ]
if status != "Checking":
self.log.info( "[JID %s] Not in checking state. Avoid fast track" % jid )
return S_OK()
result = jobState.getOptParameter( "OptimizerChain" )
if not result[ 'OK' ]:
return S_ERROR( "Could not retrieve OptimizerChain for job %s: %s" % ( jid, result[ 'Message' ] ) )
optChain = result[ 'Value' ]
if minorStatus not in optChain:
self.log.info( "[JID %s] End of chain for job" % jid )
return S_OK()
self.log.info( "[JID %s] Fast track possible to %s" % ( jid, minorStatus ) )
return S_OK( "WorkloadManagement/%s" % minorStatus )
| vmendez/DIRAC | WorkloadManagementSystem/Executor/Base/OptimizerExecutor.py | Python | gpl-3.0 | 6,380 | [
"DIRAC"
] | 6fc0da56a3d7aba1b2f359816823cf86573e009e7bad1be1d80dd3d7b836a356 |
''' PolicyCaller
Module used for calling policies. Its class is used for invoking
real policies, based on the policy name.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC import S_ERROR
from DIRAC.ResourceStatusSystem.Utilities import Utils
from DIRAC.ResourceStatusSystem.Command import CommandCaller
__RCSID__ = '$Id$'
class PolicyCaller(object):
'''
PolicyCaller loads policies, sets commands and runs them.
'''
def __init__(self, clients=None):
'''
Constructor
'''
self.cCaller = CommandCaller
self.clients = {}
if clients is not None:
self.clients = clients
def policyInvocation(self, decisionParams, policyDict):
'''
Invokes a policy:
1. If :attr:`policy` is None, import the policy module specified
with :attr:`pModule` (e.g. 'DT_Policy').
1.1. Create a policy object.
2. Set the policy arguments (usually :attr:`granularity`,
:attr:`name`) + :attr:`extraArgs`.
3. If commandIn is specified (normally it is), use
:meth:`DIRAC.ResourceStatusSystem.Command.CommandCaller.CommandCaller.setCommandObject`
to get a command object
'''
if 'module' not in policyDict:
return S_ERROR('Malformed policyDict %s' % policyDict)
pModuleName = policyDict['module']
if 'command' not in policyDict:
return S_ERROR('Malformed policyDict %s' % policyDict)
pCommand = policyDict['command']
if 'args' not in policyDict:
return S_ERROR('Malformed policyDict %s' % policyDict)
pArgs = policyDict['args']
try:
policyModule = Utils.voimport('DIRAC.ResourceStatusSystem.Policy.%s' % pModuleName)
except ImportError:
return S_ERROR('Unable to import DIRAC.ResourceStatusSystem.Policy.%s' % pModuleName)
if not hasattr(policyModule, pModuleName):
return S_ERROR('%s has no attibute %s' % (policyModule, pModuleName))
policy = getattr(policyModule, pModuleName)()
command = self.cCaller.commandInvocation(pCommand, pArgs, decisionParams, self.clients)
if not command['OK']:
return command
command = command['Value']
evaluationResult = self.policyEvaluation(policy, command)
if evaluationResult['OK']:
evaluationResult['Value']['Policy'] = policyDict
return evaluationResult
@staticmethod
def policyEvaluation(policy, command):
'''
Method that given a policy and a command objects, assigns the second one as
a member of the first and evaluates the policy.
'''
policy.setCommand(command)
evaluationResult = policy.evaluate()
return evaluationResult
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| yujikato/DIRAC | src/DIRAC/ResourceStatusSystem/PolicySystem/PolicyCaller.py | Python | gpl-3.0 | 2,830 | [
"DIRAC"
] | a5884f9d852fc29a69f370afa84070b258ed7d4d635fd9ac37e13f7940e7979e |
#!/usr/bin/env python
"""Tests for alignedsignal.py"""
########################################################################
# File: alignedsignal_test.py
# executable: alignedsignal_test.py
#
# Author: Andrew Bailey
# History: Created 03/09/18
########################################################################
import unittest
import os
import numpy as np
from nanotensor.alignedsignal import *
class CreateLabelsTest(unittest.TestCase):
"""Test the class CreateLabels"""
@classmethod
def setUpClass(cls):
super(CreateLabelsTest, cls).setUpClass()
cls.HOME = '/'.join(os.path.abspath(__file__).split("/")[:-2])
cls.dna_file = os.path.join(cls.HOME,
"tests/test_files/minion-reads/canonical/miten_PC_20160820_FNFAD20259_MN17223_sequencing_run_AMS_158_R9_WGA_Ecoli_08_20_16_43623_ch100_read280_strand.fast5")
cls.modified_file = os.path.join(cls.HOME,
"tests/test_files/minion-reads/methylated/DEAMERNANOPORE_20160805_FNFAD19383_MN16450_sequencing_run_MA_821_R9_gEcoli_MG1655_08_05_16_89825_ch100_read5189_strand.fast5")
cls.rna_file = os.path.join(cls.HOME,
"tests/test_files/minion-reads/rna_reads/DEAMERNANOPORE_20170922_FAH26525_MN16450_sequencing_run_MA_821_R94_NA12878_mRNA_09_22_17_67136_read_61_ch_151_strand.fast5")
cls.fasta = os.path.join(cls.HOME,
"tests/test_files/ecoli_k12_mg1655.fa")
# TODO
def test_initialize(self):
"""Test initialize function in CreateLabels"""
# handle = CreateLabels(self.dna_file)
# TODO
def test_add_signal_align_predictions(self):
"""Test add_signal_align_predictions function"""
pass
# TODO
def test_add_mea_labels(self):
"""Test add mea labels"""
pass
# TODO
def test_add_guide_alignment(self):
"""Test add guide alignemnt """
pass
# handle = CreateLabels(self.dna_file)
# handle.add_guide_alignment()
def test_create_labels_from_guide_alignment(self):
"""Test create_labels_from_guide_alignment"""
# make sure alignments track correct reference indexes
test_sam = "@SQ SN:ref LN:45\n@SQ SN:ref2 LN:40\nr001 163 ref 7 30 8M = 37 39 GATTACTG * XX:B:S,12561,2,20,112 MD:Z:6A1"
events = np.zeros(4, dtype=[('raw_start', int), ('raw_length', int), ('move', int),
('p_model_state', float), ('model_state', 'S5')])
events["raw_start"] = [0, 1, 2, 3]
events["raw_length"] = [1, 1, 1, 1]
events["move"] = [1, 1, 1, 1]
events["p_model_state"] = [1, 1, 1, 1]
events["model_state"] = ["GATTA", "ATTAC", "TTACT", "TACTG"]
cigar_labels = create_labels_from_guide_alignment(events=events, sam_string=test_sam, kmer_index=2,
one_ref_indexing=True)[0]
self.assertEqual("GATTACAG", ''.join([bytes.decode(x) for x in cigar_labels['kmer']]))
self.assertSequenceEqual([0, 0, 0, 1, 2, 3, 3, 3], cigar_labels['raw_start'].tolist())
self.assertSequenceEqual([7, 8, 9, 10, 11, 12, 13, 14], cigar_labels['reference_index'].tolist())
# zero reference indexing and kmer index of 1
cigar_labels = create_labels_from_guide_alignment(events=events, sam_string=test_sam, kmer_index=1,
one_ref_indexing=False)[0]
self.assertEqual("GATTACAG", ''.join([bytes.decode(x) for x in cigar_labels['kmer']]))
self.assertSequenceEqual([0, 0, 1, 2, 3, 3, 3, 3], cigar_labels['raw_start'].tolist())
self.assertSequenceEqual([6, 7, 8, 9, 10, 11, 12, 13], cigar_labels['reference_index'].tolist())
test_header = "@SQ SN:Chromosome LN:4641652 \n@PG ID:bwa PN:bwa VN:0.7.15-r1142-dirty CL:bwa mem -x ont2d /Users/andrewbailey/CLionProjects/nanopore-RNN/signalAlign/bin/test_output/tempFiles_alignment/temp_bwaIndex /Users/andrewbailey/CLionProjects/nanopore-RNN/signalAlign/bin/test_output/tempFiles_alignment/tempFiles_miten_PC_20160820_FNFAD20259_MN17223_mux_scan_AMS_158_R9_WGA_Ecoli_08_20_16_83098_ch138_read23_strand/temp_seq_5048dffc-a463-4d84-bd3b-90ca183f488a.fa\n"
no_mdz = "r001\t163\tChromosome\t1\t30\t7M\t=\t37\t39\tAGCTTTC\t*\tXX:B:S,12561,2,20,112" # \tMD:Z:6T"
events = np.zeros(4, dtype=[('raw_start', int), ('raw_length', int), ('move', int),
('p_model_state', float), ('model_state', 'S5')])
events["raw_start"] = [0, 1, 2, 3]
events["raw_length"] = [1, 1, 1, 1]
events["move"] = [1, 1, 0, 1]
events["p_model_state"] = [1, 1, 1, 1]
events["model_state"] = ["AGCTT", "GCTTT", "GCTTT", "CTTTC"]
with self.assertRaises(AssertionError):
create_labels_from_guide_alignment(events=events, sam_string=test_header + no_mdz)
cigar_labels = create_labels_from_guide_alignment(events=events, sam_string=test_header + no_mdz, kmer_index=1,
one_ref_indexing=False, reference_path=self.fasta)[0]
self.assertEqual("AGCTTTT", ''.join([bytes.decode(x) for x in cigar_labels['kmer']]))
self.assertSequenceEqual([0, 0, 1, 3, 3, 3, 3], cigar_labels['raw_start'].tolist())
self.assertSequenceEqual([0, 1, 2, 3, 4, 5, 6], cigar_labels['reference_index'].tolist())
def test_index_bases_from_events(self):
"""Test index_bases_from_events"""
# make sure each event is corresponding to correct nucleotide
events = np.zeros(4, dtype=[('raw_start', int), ('raw_length', int), ('move', int),
('p_model_state', float), ('model_state', 'S5')])
events["raw_start"] = [0, 1, 2, 3]
events["raw_length"] = [1, 1, 1, 1]
events["move"] = [1, 1, 1, 1]
events["p_model_state"] = [1, 1, 1, 1]
events["model_state"] = ["GATTA", "ATTAC", "TTACA", "TACAG"]
bases, base_raw_starts, base_raw_lengths, probs = index_bases_from_events(events, kmer_index=2)
self.assertSequenceEqual(bases, list("GATTACAG"))
self.assertSequenceEqual(base_raw_lengths, [1, 1, 1, 1, 1, 1, 1, 1])
self.assertSequenceEqual(probs, [1, 1, 1, 1, 1, 1, 1, 1])
self.assertSequenceEqual(base_raw_starts, [0, 0, 0, 1, 2, 3, 3, 3])
bases, base_raw_starts, base_raw_lengths, probs = index_bases_from_events(events, kmer_index=3)
self.assertSequenceEqual(bases, list("GATTACAG"))
self.assertSequenceEqual(base_raw_lengths, [1, 1, 1, 1, 1, 1, 1, 1])
self.assertSequenceEqual(probs, [1, 1, 1, 1, 1, 1, 1, 1])
self.assertSequenceEqual(base_raw_starts, [0, 0, 0, 0, 1, 2, 3, 3])
bases, base_raw_starts, base_raw_lengths, probs = index_bases_from_events(events, kmer_index=4)
self.assertSequenceEqual(bases, list("GATTACAG"))
self.assertSequenceEqual(base_raw_lengths, [1, 1, 1, 1, 1, 1, 1, 1])
self.assertSequenceEqual(probs, [1, 1, 1, 1, 1, 1, 1, 1])
self.assertSequenceEqual(base_raw_starts, [0, 0, 0, 0, 0, 1, 2, 3])
class AlignedSignalTest(unittest.TestCase):
"""Test the class AlignedSignal"""
@classmethod
def setUpClass(cls):
super(AlignedSignalTest, cls).setUpClass()
cls.HOME = '/'.join(os.path.abspath(__file__).split("/")[:-2])
cls.dna_file = os.path.join(cls.HOME,
"tests/test_files/minion-reads/canonical/miten_PC_20160820_FNFAD20259_MN17223_sequencing_run_AMS_158_R9_WGA_Ecoli_08_20_16_43623_ch100_read280_strand.fast5")
cls.modified_file = os.path.join(cls.HOME,
"tests/test_files/minion-reads/methylated/DEAMERNANOPORE_20160805_FNFAD19383_MN16450_sequencing_run_MA_821_R9_gEcoli_MG1655_08_05_16_89825_ch100_read5189_strand.fast5")
cls.rna_file = os.path.join(cls.HOME,
"tests/test_files/minion-reads/rna_reads/DEAMERNANOPORE_20170922_FAH26525_MN16450_sequencing_run_MA_821_R94_NA12878_mRNA_09_22_17_67136_read_61_ch_151_strand.fast5")
cls.handle = AlignedSignal(scaled_signal=[1.1, 2.2, 1.1, 2.2, 1.1, 2.2])
def test__add_label(self):
"""Test _add_label method"""
label = np.zeros(4, dtype=[('raw_start', int), ('raw_length', int), ('reference_index', int),
('posterior_probability', float), ('kmer', 'S5')])
label["raw_start"] = [0, 1, 2, 3]
label["raw_length"] = [1, 1, 1, 1]
label["reference_index"] = [0, 1, 2, 3]
label["posterior_probability"] = [1, 1, 1, 1]
label["kmer"] = ["AAT", "A", "B", "C"]
self.handle.add_label(label, name="test", label_type='label')
self.handle.add_label(label, name="test2", label_type='prediction')
self.handle.add_label(label, name="test3", label_type='guide')
# catch wrong label type
with self.assertRaises(AssertionError):
self.handle.add_label(label, name="test3", label_type='fake')
with self.assertRaises(KeyError):
label = np.zeros(0, dtype=[('fake', int), ('raw_length', int), ('reference_index', int),
('posterior_probability', float), ('kmer', 'S5')])
self.handle.add_label(label, name="test", label_type="label")
def test_add_raw_signal(self):
"""Test add_raw_signal method"""
self.handle.add_raw_signal(np.asanyarray([1, 2, 3, 4, 5, 6]))
self.handle.add_raw_signal([1, 2, 3, 4, 5, 6])
with self.assertRaises(AssertionError):
self.handle.add_raw_signal([1.1, 2.2, 1.1, 4])
self.handle.add_raw_signal([1.1, 2, 1, 2, 3, 6])
def test__add_scaled_signal(self):
"""Test _add_scaled_signal method"""
# add floats as scaled signal
self.handle._add_scaled_signal(np.asanyarray([1.1, 2.2, 1.1, 2.2, 1.1, 2.2]))
self.handle._add_scaled_signal([1.1, 2.2, 1.1, 2.2, 1.1, 2.2])
# throw error if not (probably passed raw ADC counts)
with self.assertRaises(AssertionError):
self.handle._add_scaled_signal([1, 2.2, 1.1, 4])
self.handle._add_scaled_signal([1, 2, 1, 2, 3, 6])
def test_generate_label_mapping(self):
"""Test generate_label_mapping method"""
label = np.zeros(4, dtype=[('raw_start', int), ('raw_length', int), ('reference_index', int),
('posterior_probability', float), ('kmer', 'S5')])
label["raw_start"] = [0, 1, 2, 3]
label["raw_length"] = [0, 0, 0, 1]
label["reference_index"] = [0, 1, 2, 3]
label["posterior_probability"] = [1, 1, 1, 1]
label["kmer"] = ["AAT", "A", "B", "C"]
handle = AlignedSignal(scaled_signal=[1.1, 2.2, 1.1, 2.2, 1.1, 2.2])
# create labels
handle.add_label(label, name="test", label_type='label')
handle.add_label(label, name="test2", label_type='label')
handle.add_label(label, name="test2", label_type='prediction')
handle.add_label(label, name="test3", label_type='guide')
# make sure we generate the correct mappings
test = handle.generate_label_mapping(name='test')
for i, return_tuple in enumerate(test):
self.assertEqual(return_tuple[0], handle.scaled_signal[i:i + 1])
self.assertEqual(return_tuple[1], label["kmer"][i])
self.assertEqual(return_tuple[2], label["posterior_probability"][i])
self.assertEqual(return_tuple[3], label["reference_index"][i])
# make sure we generate the correct mappings for all labels added
test = handle.generate_label_mapping(name='test2')
for i, return_tuple in enumerate(test):
self.assertEqual(return_tuple[0], handle.scaled_signal[i:i + 1])
self.assertEqual(return_tuple[1], label["kmer"][i])
self.assertEqual(return_tuple[2], label["posterior_probability"][i])
self.assertEqual(return_tuple[3], label["reference_index"][i])
# make sure the key exists and the raw data exists
with self.assertRaises(AssertionError):
handle.generate_label_mapping(name="test2", scaled=False).__next__()
handle.generate_label_mapping(name="fake").__next__()
if __name__ == "__main__":
unittest.main()
raise SystemExit
| UCSC-nanopore-cgl/nanopore-RNN | nanotensor/tests/alignedsignal_test.py | Python | mit | 12,508 | [
"BWA"
] | a54adb1f42be194734277aa804c3a7901bd311b39650b372df60e00f56885724 |
import os
import sys
import numpy
import platform
import sysconfig
try:
from setuptools import setup, Extension
use_setuptools = True
print("setuptools is used.")
except ImportError:
from distutils.core import setup, Extension
use_setuptools = False
print("distutils is used.")
try:
from setuptools_scm import get_version
except ImportError:
git_num = None
if 'setuptools_scm' in sys.modules.keys():
try:
git_ver = get_version()
git_num = int(git_ver.split('.')[3].split('+')[0].replace("dev", ""))
except:
git_num = None
include_dirs_numpy = [numpy.get_include()]
extra_link_args = []
# Workaround Python issue 21121
config_var = sysconfig.get_config_var("CFLAGS")
if (config_var is not None and
"-Werror=declaration-after-statement" in config_var):
os.environ['CFLAGS'] = config_var.replace(
"-Werror=declaration-after-statement", "")
sources = ['c/_phono3py.c',
'c/harmonic/dynmat.c',
'c/harmonic/phonon.c',
'c/harmonic/lapack_wrapper.c',
'c/harmonic/phonoc_array.c',
'c/harmonic/phonoc_utils.c',
'c/anharmonic/phonon3/fc3.c',
'c/anharmonic/phonon3/real_self_energy.c',
'c/anharmonic/phonon3/interaction.c',
'c/anharmonic/phonon3/real_to_reciprocal.c',
'c/anharmonic/phonon3/reciprocal_to_normal.c',
'c/anharmonic/phonon3/imag_self_energy_with_g.c',
'c/anharmonic/phonon3/pp_collision.c',
'c/anharmonic/phonon3/collision_matrix.c',
'c/anharmonic/other/isotope.c',
'c/anharmonic/triplet/triplet.c',
'c/anharmonic/triplet/triplet_kpoint.c',
'c/anharmonic/triplet/triplet_iw.c',
'c/spglib/mathfunc.c',
'c/spglib/kpoint.c',
'c/kspclib/kgrid.c',
'c/kspclib/tetrahedron_method.c']
extra_compile_args = ['-fopenmp', ]
include_dirs = ['c/harmonic_h',
'c/anharmonic_h',
'c/spglib_h',
'c/kspclib_h'] + include_dirs_numpy
define_macros = []
extra_link_args_lapacke = []
include_dirs_lapacke = []
use_mkl = False
# C macro definitions:
# - MULTITHREADED_BLAS
# This deactivates OpenMP multithread harmonic phonon calculation,
# since inside each phonon calculation, zheev is called.
# When using multithread BLAS, this macro has to be set and
# by this all phonons on q-points should be calculated in series.
# - MKL_LAPACKE:
# This sets definitions and functions needed when using MKL lapacke.
# Phono3py complex values are handled based on those provided by Netlib
# lapacke. However MKL lapacke doesn't provide some macros and functions
# that provided Netlib. This macro defines those used in phono3py among them.
if os.path.isfile("setup_mkl.py"):
# This supposes that MKL multithread BLAS is used.
# This is invoked when setup_mkl.py exists on the current directory.
print("MKL LAPACKE is to be used.")
print("Use of icc is assumed (CC='icc').")
from setup_mkl import mkl_extra_link_args_lapacke, mkl_include_dirs_lapacke
#### Examples of setup_mkl.py ####
# For 2015
# intel_root = "/opt/intel/composer_xe_2015.7.235"
# mkl_root = "%s/mkl" % intel_root
# compiler_root = "%s/compiler" % intel_root
#
# For 2016
# intel_root = "/opt/intel/parallel_studio_xe_2016"
# mkl_root = "%s/mkl" % intel_root
# compiler_root = "%s" % intel_root
#
# For both
# mkl_extra_link_args_lapacke = ['-L%s/lib/intel64' % mkl_root,
# '-lmkl_rt']
# mkl_extra_link_args_lapacke += ['-L%s/lib/intel64' % compiler_root,
# '-lsvml',
# '-liomp5',
# '-limf',
# '-lpthread']
# mkl_include_dirs_lapacke = ["%s/include" % mkl_root]
use_mkl = True
extra_link_args_lapacke += mkl_extra_link_args_lapacke
include_dirs_lapacke += mkl_include_dirs_lapacke
if use_setuptools:
extra_compile_args += ['-DMKL_LAPACKE',
'-DMULTITHREADED_BLAS']
else:
define_macros += [('MKL_LAPACKE', None),
('MULTITHREADED_BLAS', None)]
elif os.path.isfile("libopenblas.py"):
# This supposes that multithread openBLAS is used.
# This is invoked when libopenblas.py exists on the current directory.
#### Example of libopenblas.py ####
# extra_link_args_lapacke += ['-lopenblas']
from libopenblas import extra_link_args_lapacke, include_dirs_lapacke
include_dirs_lapacke += []
if use_setuptools:
extra_compile_args += ['-DMULTITHREADED_BLAS']
else:
define_macros += [('MULTITHREADED_BLAS', None)]
elif (platform.system() == 'Darwin' and
os.path.isfile('/opt/local/lib/libopenblas.a')):
# This supposes lapacke with single-thread openBLAS provided by MacPort is
# used.
# % sudo port install gcc6
# % sudo port select --set gcc mp-gcc
# % sudo port install OpenBLAS +gcc6
extra_link_args_lapacke += ['/opt/local/lib/libopenblas.a']
include_dirs_lapacke += ['/opt/local/include']
elif ('CONDA_PREFIX' in os.environ and
(os.path.isfile(os.path.join(os.environ['CONDA_PREFIX'],
'lib', 'liblapacke.dylib')) or
os.path.isfile(os.path.join(os.environ['CONDA_PREFIX'],
'lib', 'liblapacke.so')))):
# This is for the system prepared with conda openblas.
extra_link_args_lapacke += ['-llapacke']
include_dirs_lapacke += [
os.path.join(os.environ['CONDA_PREFIX'], 'include'), ]
if os.path.isfile(os.path.join(os.environ['CONDA_PREFIX'],
'include', 'mkl.h')):
use_mkl = True
if use_setuptools:
extra_compile_args += ['-DMKL_LAPACKE',
'-DMULTITHREADED_BLAS']
else:
define_macros += [('MKL_LAPACKE', None),
('MULTITHREADED_BLAS', None)]
else:
if use_setuptools:
extra_compile_args += ['-DMULTITHREADED_BLAS']
else:
define_macros += [('MULTITHREADED_BLAS', None)]
elif os.path.isfile('/usr/lib/liblapacke.so'):
# This supposes that lapacke with single-thread BLAS is installed on
# system.
extra_link_args_lapacke += ['-llapacke', '-llapack', '-lblas']
include_dirs_lapacke += []
else:
# Here is the default lapacke linkage setting.
# Please modify according to your system environment.
# Without using multithreaded BLAS, DMULTITHREADED_BLAS is better to be
# removed to activate OpenMP multithreading harmonic phonon calculation,
# but this is not mandatory.
#
# The below supposes that lapacke with multithread openblas is used.
# Even if using single-thread BLAS and deactivating OpenMP
# multithreading for harmonic phonon calculation, the total performance
# decrease is considered marginal.
#
# For conda: Try installing with dynamic link library of openblas by
# % conda install numpy scipy h5py pyyaml matplotlib openblas libgfortran
extra_link_args_lapacke += ['-lopenblas', '-lgfortran']
include_dirs_lapacke += [
os.path.join(os.environ['CONDA_PREFIX'], 'include'), ]
if use_setuptools:
extra_compile_args += ['-DMULTITHREADED_BLAS']
else:
define_macros += [('MULTITHREADED_BLAS', None)]
cc = None
lib_omp = None
if 'CC' in os.environ:
if 'clang' in os.environ['CC']:
cc = 'clang'
if not use_mkl:
lib_omp = '-lomp'
# lib_omp = '-liomp5'
if 'gcc' in os.environ['CC'] or 'gnu-cc' in os.environ['CC']:
cc = 'gcc'
if cc == 'gcc' or cc is None:
lib_omp = '-lgomp'
if 'CC' in os.environ and 'gcc-' in os.environ['CC']:
# For macOS & homebrew gcc:
# Using conda's gcc is more recommended though. Suppose using
# homebrew gcc whereas conda is used as general environment.
# This is to avoid linking conda libgomp that is incompatible
# with homebrew gcc.
try:
v = int(os.environ['CC'].split('-')[1])
except ValueError:
pass
else:
ary = [os.sep, "usr", "local", "opt", "gcc@%d" % v, "lib", "gcc",
"%d" % v, "libgomp.a"]
libgomp_a = os.path.join(*ary)
if os.path.isfile(libgomp_a):
lib_omp = libgomp_a
if lib_omp:
extra_link_args.append(lib_omp)
## Uncomment below to measure reciprocal_to_normal_squared_openmp performance
# define_macros += [('MEASURE_R2N', None)]
extra_link_args += extra_link_args_lapacke
include_dirs += include_dirs_lapacke
print("extra_link_args", extra_link_args)
extension_phono3py = Extension(
'phono3py._phono3py',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
define_macros=define_macros,
sources=sources)
packages_phono3py = ['phono3py',
'phono3py.cui',
'phono3py.interface',
'phono3py.other',
'phono3py.phonon',
'phono3py.phonon3',
'phono3py.sscha']
scripts_phono3py = ['scripts/phono3py',
'scripts/phono3py-load',
'scripts/phono3py-kaccum',
'scripts/phono3py-kdeplot',
'scripts/phono3py-coleigplot']
## This is for the test of libflame
##
# use_libflame = False
# if use_libflame:
# sources.append('c/anharmonic/flame_wrapper.c')
# extra_link_args.append('../libflame-bin/lib/libflame.a')
# include_dirs_libflame = ['../libflame-bin/include']
# include_dirs += include_dirs_libflame
########################
# _lapackepy extension #
########################
include_dirs_lapackepy = (['c/harmonic_h',] + include_dirs_numpy
+ include_dirs_lapacke)
sources_lapackepy = ['c/_lapackepy.c',
'c/harmonic/dynmat.c',
'c/harmonic/phonon.c',
'c/harmonic/phonoc_array.c',
'c/harmonic/phonoc_utils.c',
'c/harmonic/lapack_wrapper.c']
extension_lapackepy = Extension(
'phono3py._lapackepy',
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
include_dirs=include_dirs_lapackepy,
sources=sources_lapackepy)
if __name__ == '__main__':
version_nums = [None, None, None]
with open("phono3py/version.py") as w:
for line in w:
if "__version__" in line:
for i, num in enumerate(
line.split()[2].strip('\"').split('.')):
version_nums[i] = num
break
# To deploy to pypi by travis-CI
if os.path.isfile("__nanoversion__.txt"):
nanoversion = 0
with open('__nanoversion__.txt') as nv:
try:
for line in nv:
nanoversion = int(line.strip())
break
except ValueError:
nanoversion = 0
if nanoversion != 0:
version_nums.append(nanoversion)
elif git_num:
version_nums.append(git_num)
if None in version_nums:
print("Failed to get version number in setup.py.")
raise
version = ".".join(["%s" % n for n in version_nums[:3]])
if len(version_nums) > 3:
version += "-%d" % version_nums[3]
if use_setuptools:
setup(name='phono3py',
version=version,
description='This is the phono3py module.',
author='Atsushi Togo',
author_email='atz.togo@gmail.com',
url='http://phonopy.github.io/phono3py/',
packages=packages_phono3py,
install_requires=['numpy', 'scipy', 'PyYAML', 'matplotlib',
'h5py', 'spglib', 'phonopy>=2.8.1,<2.9'],
provides=['phono3py'],
scripts=scripts_phono3py,
ext_modules=[extension_lapackepy, extension_phono3py])
else:
setup(name='phono3py',
version=version,
description='This is the phono3py module.',
author='Atsushi Togo',
author_email='atz.togo@gmail.com',
url='http://phonopy.github.io/phono3py/',
packages=packages_phono3py,
requires=['numpy', 'scipy', 'PyYAML', 'matplotlib', 'h5py',
'phonopy', 'spglib'],
provides=['phono3py'],
scripts=scripts_phono3py,
ext_modules=[extension_lapackepy, extension_phono3py])
| atztogo/phono3py | setup.py | Python | bsd-3-clause | 12,839 | [
"phonopy"
] | a362bb50fe860755488efb5129ed917d12c6dbbc54b979d1d32dc04972a9416e |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 06 14:25:40 2017
@author: Zhenqin Wu
"""
import os
import time
import csv
import numpy as np
import tensorflow as tf
import deepchem
import pickle
from deepchem.molnet.run_benchmark_models import benchmark_classification, benchmark_regression
from deepchem.molnet.check_availability import CheckFeaturizer, CheckSplit
from deepchem.molnet.preset_hyper_parameters import hps
def run_benchmark(datasets,
model,
split=None,
metric=None,
direction=True,
featurizer=None,
n_features=0,
out_path='.',
hyper_parameters=None,
hyper_param_search=False,
max_iter=20,
search_range=2,
test=False,
reload=True,
seed=123):
"""
Run benchmark test on designated datasets with deepchem(or user-defined) model
Parameters
----------
datasets: list of string
choice of which datasets to use, should be: bace_c, bace_r, bbbp, chembl,
clearance, clintox, delaney, hiv, hopv, kaggle, lipo, muv, nci, pcba,
pdbbind, ppb, qm7, qm7b, qm8, qm9, sampl, sider, tox21, toxcast, uv, factors,
kinase
model: string or user-defined model stucture
choice of which model to use, deepchem provides implementation of
logistic regression, random forest, multitask network,
bypass multitask network, irv, graph convolution;
for user define model, it should include function: fit, evaluate
split: string, optional (default=None)
choice of splitter function, None = using the default splitter
metric: string, optional (default=None)
choice of evaluation metrics, None = using the default metrics(AUC & R2)
direction: bool, optional(default=True)
Optimization direction when doing hyperparameter search
Maximization(True) or minimization(False)
featurizer: string or dc.feat.Featurizer, optional (default=None)
choice of featurization, None = using the default corresponding to model
(string only applicable to deepchem models)
n_features: int, optional(default=0)
depending on featurizers, redefined when using deepchem featurizers,
need to be specified for user-defined featurizers(if using deepchem models)
out_path: string, optional(default='.')
path of result file
hyper_parameters: dict, optional (default=None)
hyper parameters for designated model, None = use preset values
hyper_param_search: bool, optional(default=False)
whether to perform hyper parameter search, using gaussian process by default
max_iter: int, optional(default=20)
number of optimization trials
search_range: int(float), optional(default=4)
optimization on [initial values / search_range,
initial values * search_range]
test: boolean, optional(default=False)
whether to evaluate on test set
reload: boolean, optional(default=True)
whether to save and reload featurized datasets
"""
for dataset in datasets:
if dataset in [
'bace_c', 'bbbp', 'clintox', 'hiv', 'muv', 'pcba', 'pcba_146',
'pcba_2475', 'sider', 'tox21', 'toxcast'
]:
mode = 'classification'
if metric == None:
metric = [
deepchem.metrics.Metric(deepchem.metrics.roc_auc_score, np.mean),
]
elif dataset in [
'bace_r', 'chembl', 'clearance', 'delaney', 'hopv', 'kaggle', 'lipo',
'nci', 'pdbbind', 'ppb', 'qm7', 'qm7b', 'qm8', 'qm9', 'sampl',
'thermosol'
]:
mode = 'regression'
if metric == None:
metric = [
deepchem.metrics.Metric(deepchem.metrics.pearson_r2_score, np.mean)
]
else:
raise ValueError('Dataset not supported')
if featurizer == None and isinstance(model, str):
# Assigning featurizer if not user defined
pair = (dataset, model)
if pair in CheckFeaturizer:
featurizer = CheckFeaturizer[pair][0]
n_features = CheckFeaturizer[pair][1]
else:
continue
if not split in [None] + CheckSplit[dataset]:
continue
loading_functions = {
'bace_c': deepchem.molnet.load_bace_classification,
'bace_r': deepchem.molnet.load_bace_regression,
'bbbp': deepchem.molnet.load_bbbp,
'chembl': deepchem.molnet.load_chembl,
'clearance': deepchem.molnet.load_clearance,
'clintox': deepchem.molnet.load_clintox,
'delaney': deepchem.molnet.load_delaney,
'factors': deepchem.molnet.load_factors,
'hiv': deepchem.molnet.load_hiv,
'hopv': deepchem.molnet.load_hopv,
'hppb': deepchem.molnet.load_hppb,
'kaggle': deepchem.molnet.load_kaggle,
'kinase': deepchem.molnet.load_kinase,
'lipo': deepchem.molnet.load_lipo,
'muv': deepchem.molnet.load_muv,
'nci': deepchem.molnet.load_nci,
'pcba': deepchem.molnet.load_pcba,
'pdbbind': deepchem.molnet.load_pdbbind_grid,
'ppb': deepchem.molnet.load_ppb,
'qm7': deepchem.molnet.load_qm7,
'qm8': deepchem.molnet.load_qm8,
'qm9': deepchem.molnet.load_qm9,
'sampl': deepchem.molnet.load_sampl,
'sider': deepchem.molnet.load_sider,
'thermosol': deepchem.molnet.load_thermosol,
'tox21': deepchem.molnet.load_tox21,
'toxcast': deepchem.molnet.load_toxcast,
'uv': deepchem.molnet.load_uv,
}
print('-------------------------------------')
print('Benchmark on dataset: %s' % dataset)
print('-------------------------------------')
# loading datasets
if split is not None:
print('Splitting function: %s' % split)
tasks, all_dataset, transformers = loading_functions[dataset](
featurizer=featurizer, split=split, reload=reload)
else:
tasks, all_dataset, transformers = loading_functions[dataset](
featurizer=featurizer, reload=reload)
train_dataset, valid_dataset, test_dataset = all_dataset
time_start_fitting = time.time()
train_score = {}
valid_score = {}
test_score = {}
if hyper_param_search:
if hyper_parameters is None:
hyper_parameters = hps[model]
search_mode = deepchem.hyper.GaussianProcessHyperparamOpt(model)
hyper_param_opt, _ = search_mode.hyperparam_search(
hyper_parameters,
train_dataset,
valid_dataset,
transformers,
metric,
direction=direction,
n_features=n_features,
n_tasks=len(tasks),
max_iter=max_iter,
search_range=search_range)
hyper_parameters = hyper_param_opt
if isinstance(model, str):
if mode == 'classification':
train_score, valid_score, test_score = benchmark_classification(
train_dataset,
valid_dataset,
test_dataset,
tasks,
transformers,
n_features,
metric,
model,
test=test,
hyper_parameters=hyper_parameters,
seed=seed)
elif mode == 'regression':
train_score, valid_score, test_score = benchmark_regression(
train_dataset,
valid_dataset,
test_dataset,
tasks,
transformers,
n_features,
metric,
model,
test=test,
hyper_parameters=hyper_parameters,
seed=seed)
else:
model.fit(train_dataset)
train_score['user_defined'] = model.evaluate(train_dataset, metric,
transformers)
valid_score['user_defined'] = model.evaluate(valid_dataset, metric,
transformers)
if test:
test_score['user_defined'] = model.evaluate(test_dataset, metric,
transformers)
time_finish_fitting = time.time()
with open(os.path.join(out_path, 'results.csv'), 'a') as f:
writer = csv.writer(f)
model_name = list(train_score.keys())[0]
for i in train_score[model_name]:
output_line = [
dataset,
str(split), mode, model_name, i, 'train',
train_score[model_name][i], 'valid', valid_score[model_name][i]
]
if test:
output_line.extend(['test', test_score[model_name][i]])
output_line.extend(
['time_for_running', time_finish_fitting - time_start_fitting])
writer.writerow(output_line)
if hyper_param_search:
with open(os.path.join(out_path, dataset + model + '.pkl'), 'w') as f:
pickle.dump(hyper_parameters, f)
#
# Note by @XericZephyr. Reason why I spun off this function:
# 1. Some model needs dataset information.
# 2. It offers us possibility to **cache** the dataset
# if the featurizer runs very slow, e.g., GraphConv.
# 2+. The cache can even happen at Travis CI to accelerate
# CI testing.
#
def load_dataset(dataset, featurizer, split='random'):
"""
Load specific dataset for benchmark.
Parameters
----------
dataset: string
choice of which datasets to use, should be: tox21, muv, sider,
toxcast, pcba, delaney, factors, hiv, hopv, kaggle, kinase, nci,
clintox, hiv, pcba_128, pcba_146, pdbbind, chembl, qm7, qm7b, qm9,
sampl, uv
featurizer: string or dc.feat.Featurizer.
choice of featurization.
split: string, optional (default=None)
choice of splitter function, None = using the default splitter
"""
dataset_loading_functions = {
'bace_c': deepchem.molnet.load_bace_classification,
'bace_r': deepchem.molnet.load_bace_regression,
'bbbp': deepchem.molnet.load_bbbp,
'chembl': deepchem.molnet.load_chembl,
'clearance': deepchem.molnet.load_clearance,
'clintox': deepchem.molnet.load_clintox,
'delaney': deepchem.molnet.load_delaney,
'factors': deepchem.molnet.load_factors,
'hiv': deepchem.molnet.load_hiv,
'hopv': deepchem.molnet.load_hopv,
'hppb': deepchem.molnet.load_hppb,
'kaggle': deepchem.molnet.load_kaggle,
'kinase': deepchem.molnet.load_kinase,
'lipo': deepchem.molnet.load_lipo,
'muv': deepchem.molnet.load_muv,
'nci': deepchem.molnet.load_nci,
'pcba': deepchem.molnet.load_pcba,
'pcba_128': deepchem.molnet.load_pcba_128,
'pcba_146': deepchem.molnet.load_pcba_146,
'pcba_2475': deepchem.molnet.load_pcba_2475,
'pdbbind': deepchem.molnet.load_pdbbind_grid,
'ppb': deepchem.molnet.load_ppb,
'qm7': deepchem.molnet.load_qm7,
'qm8': deepchem.molnet.load_qm8,
'qm9': deepchem.molnet.load_qm9,
'sampl': deepchem.molnet.load_sampl,
'sider': deepchem.molnet.load_sider,
'thermosol': deepchem.molnet.load_thermosol,
'tox21': deepchem.molnet.load_tox21,
'toxcast': deepchem.molnet.load_toxcast,
'uv': deepchem.molnet.load_uv
}
print('-------------------------------------')
print('Loading dataset: %s' % dataset)
print('-------------------------------------')
# loading datasets
if split is not None:
print('Splitting function: %s' % split)
tasks, all_dataset, transformers = dataset_loading_functions[dataset](
featurizer=featurizer, split=split)
return tasks, all_dataset, transformers
def benchmark_model(model, all_dataset, transformers, metric, test=False):
"""
Benchmark custom model.
model: user-defined model stucture
For user define model, it should include function: fit, evaluate.
all_dataset: (train, test, val) data tuple.
Returned by `load_dataset` function.
transformers
metric: string
choice of evaluation metrics.
"""
time_start_fitting = time.time()
train_score = .0
valid_score = .0
test_score = .0
train_dataset, valid_dataset, test_dataset = all_dataset
model.fit(train_dataset)
train_score = model.evaluate(train_dataset, metric, transformers)
valid_score = model.evaluate(valid_dataset, metric, transformers)
if test:
test_score = model.evaluate(test_dataset, metric, transformers)
time_finish_fitting = time.time()
time_for_running = time_finish_fitting - time_start_fitting
return train_score, valid_score, test_score, time_for_running
| lilleswing/deepchem | deepchem/molnet/run_benchmark.py | Python | mit | 12,434 | [
"Gaussian"
] | 779917c312042b146eb7b1f8905d57a8eff7d9004d778888369d09539799712f |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Batch Uploader core functions. Uploading metadata and documents.
"""
import os
import pwd
import grp
import sys
import time
import tempfile
import cgi
import re
from invenio.dbquery import run_sql, Error
from invenio.access_control_engine import acc_authorize_action
from invenio.webuser import collect_user_info, page_not_authorized
from invenio.config import CFG_BINDIR, CFG_TMPSHAREDDIR, CFG_LOGDIR, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_OAI_ID_FIELD, CFG_BATCHUPLOADER_DAEMON_DIR, \
CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS, \
CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS, \
CFG_PREFIX, CFG_SITE_LANG
from invenio.textutils import encode_for_xml
from invenio.bibtask import task_low_level_submission
from invenio.messages import gettext_set_language
from invenio.textmarc2xmlmarc import transform_file
from invenio.shellutils import run_shell_command
from invenio.bibupload import xml_marc_to_records, bibupload
from invenio.access_control_firerole import _ip_matcher_builder, _ipmatch
from invenio.webinterface_handler_config import HTTP_BAD_REQUEST, HTTP_FORBIDDEN
import invenio.bibupload as bibupload_module
from invenio.bibrecord import create_records, \
record_strip_empty_volatile_subfields, \
record_strip_empty_fields
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PERMITTED_MODES = ['-i', '-r', '-c', '-a', '-ir',
'--insert', '--replace', '--correct', '--append']
_CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS_RE = re.compile(CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS)
_CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS = []
for _network, _collection in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS.items():
if '/' not in _network:
_network += '/32'
_CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS.append((_ip_matcher_builder(_network), _collection))
del _network
del _collection
def cli_allocate_record(req):
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _get_client_authorized_collections(_get_client_ip(req)):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = '[ERROR] Sorry, the "%s" useragent cannot use the service.' % _get_useragent(req)
_log(msg)
return _write(req, msg)
recid = run_sql("insert into bibrec (creation_date,modification_date) values(NOW(),NOW())")
return recid
def cli_upload(req, file_content=None, mode=None, callback_url=None, nonce=None, special_treatment=None):
""" Robot interface for uploading MARC files
"""
req.content_type = "text/plain"
# check IP and useragent:
if not _get_client_authorized_collections(_get_client_ip(req)):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
req.status = HTTP_FORBIDDEN
return _write(req, msg)
if not _check_client_useragent(req):
msg = "[ERROR] Sorry, the %s useragent cannot use the service." % _get_useragent(req)
_log(msg)
req.status = HTTP_FORBIDDEN
return _write(req, msg)
arg_mode = mode
if not arg_mode:
msg = "[ERROR] Please specify upload mode to use."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
if arg_mode == '--insertorreplace':
arg_mode = '-ir'
if not arg_mode in PERMITTED_MODES:
msg = "[ERROR] Invalid upload mode."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
arg_file = file_content
if hasattr(arg_file, 'read'):
## We've been passed a readable file, e.g. req
arg_file = arg_file.read()
if not arg_file:
msg = "[ERROR] Please provide a body to your request."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
else:
if not arg_file:
msg = "[ERROR] Please specify file body to input."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
if hasattr(arg_file, "filename"):
arg_file = arg_file.value
else:
msg = "[ERROR] 'file' parameter must be a (single) file"
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
# write temporary file:
(fd, filename) = tempfile.mkstemp(prefix="batchupload_" + \
time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_",
dir=CFG_TMPSHAREDDIR)
filedesc = os.fdopen(fd, 'w')
filedesc.write(arg_file)
filedesc.close()
# check if this client can run this file:
client_ip = _get_client_ip(req)
permitted_dbcollids = _get_client_authorized_collections(client_ip)
if '*' not in permitted_dbcollids: # wildcard
allow = _check_client_can_submit_file(client_ip, filename, req, 0)
if not allow:
msg = "[ERROR] Cannot submit such a file from this IP. (Wrong collection.)"
_log(msg)
req.status = HTTP_FORBIDDEN
return _write(req, msg)
# check validity of marcxml
xmlmarclint_path = CFG_BINDIR + '/xmlmarclint'
xmlmarclint_output, dummy1, dummy2 = run_shell_command('%s %s' % (xmlmarclint_path, filename))
if xmlmarclint_output != 0:
msg = "[ERROR] MARCXML is not valid."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
args = ['bibupload', "batchupload", arg_mode, filename]
# run upload command
if callback_url:
args += ["--callback-url", callback_url]
if nonce:
args += ["--nonce", nonce]
if special_treatment:
args += ["--special-treatment", special_treatment]
task_low_level_submission(*args)
msg = "[INFO] %s" % ' '.join(args)
_log(msg)
return _write(req, msg)
def metadata_upload(req, metafile=None, filetype=None, mode=None, exec_date=None,
exec_time=None, metafilename=None, ln=CFG_SITE_LANG,
priority="1", email_logs_to=None):
"""
Metadata web upload service. Get upload parameters and exec bibupload for the given file.
Finally, write upload history.
@return: tuple (error code, message)
error code: code that indicates if an error ocurred
message: message describing the error
"""
# start output:
req.content_type = "text/html"
req.send_http_header()
error_codes = {'not_authorized': 1}
user_info = collect_user_info(req)
(fd, filename) = tempfile.mkstemp(prefix="batchupload_" + \
user_info['nickname'] + "_" + time.strftime("%Y%m%d%H%M%S",
time.localtime()) + "_", dir=CFG_TMPSHAREDDIR)
filedesc = os.fdopen(fd, 'w')
filedesc.write(metafile)
filedesc.close()
# check if this client can run this file:
if req is not None:
allow = _check_client_can_submit_file(req=req, metafile=metafile, webupload=1, ln=ln)
if allow[0] != 0:
return (error_codes['not_authorized'], allow[1])
# run upload command:
task_arguments = ('bibupload', user_info['nickname'], mode,
"--priority=" + priority, "-N", "batchupload")
if exec_date:
date = exec_date
if exec_time:
date += ' ' + exec_time
task_arguments += ("-t", date)
if email_logs_to:
task_arguments += ('--email-logs-to', email_logs_to)
task_arguments += (filename, )
jobid = task_low_level_submission(*task_arguments)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "metadata")""",
(user_info['nickname'], metafilename,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid), ))
return (0, "Task %s queued" % str(jobid))
def document_upload(req=None, folder="", matching="", mode="", exec_date="", exec_time="", ln=CFG_SITE_LANG, priority="1", email_logs_to=None):
""" Take files from the given directory and upload them with the appropiate mode.
@parameters:
+ folder: Folder where the files to upload are stored
+ matching: How to match file names with record fields (report number, barcode,...)
+ mode: Upload mode (append, revise, replace)
@return: tuple (file, error code)
file: file name causing the error to notify the user
error code:
1 - More than one possible recID, ambiguous behaviour
2 - No records match that file name
3 - File already exists
"""
import sys
if sys.hexversion < 0x2060000:
from md5 import md5
else:
from hashlib import md5
from invenio.bibdocfile import BibRecDocs, file_strip_ext
import shutil
from invenio.search_engine import perform_request_search, \
search_pattern, \
guess_collection_of_a_record
_ = gettext_set_language(ln)
errors = []
info = [0, []] # Number of files read, name of the files
try:
files = os.listdir(folder)
except OSError, error:
errors.append(("", error))
return errors, info
err_desc = {1: _("More than one possible recID, ambiguous behaviour"), 2: _("No records match that file name"),
3: _("File already exists"), 4: _("A file with the same name and format already exists"),
5: _("No rights to upload to collection '%s'")}
# Create directory DONE/ if doesn't exist
folder = (folder[-1] == "/") and folder or (folder + "/")
files_done_dir = folder + "DONE/"
try:
os.mkdir(files_done_dir)
except OSError:
# Directory exists or no write permission
pass
for docfile in files:
if os.path.isfile(os.path.join(folder, docfile)):
info[0] += 1
identifier = file_strip_ext(docfile)
extension = docfile[len(identifier):]
rec_id = None
if identifier:
rec_id = search_pattern(p=identifier, f=matching, m='e')
if not rec_id:
errors.append((docfile, err_desc[2]))
continue
elif len(rec_id) > 1:
errors.append((docfile, err_desc[1]))
continue
else:
rec_id = str(list(rec_id)[0])
rec_info = BibRecDocs(rec_id)
if rec_info.bibdocs:
for bibdoc in rec_info.bibdocs:
attached_files = bibdoc.list_all_files()
file_md5 = md5(open(os.path.join(folder, docfile), "rb").read()).hexdigest()
num_errors = len(errors)
for attached_file in attached_files:
if attached_file.checksum == file_md5:
errors.append((docfile, err_desc[3]))
break
elif attached_file.get_full_name() == docfile:
errors.append((docfile, err_desc[4]))
break
if len(errors) > num_errors:
continue
# Check if user has rights to upload file
if req is not None:
file_collection = guess_collection_of_a_record(int(rec_id))
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=file_collection)
if auth_code != 0:
error_msg = err_desc[5] % file_collection
errors.append((docfile, error_msg))
continue
# Move document to be uploaded to temporary folder
(fd, tmp_file) = tempfile.mkstemp(prefix=identifier + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_", suffix=extension, dir=CFG_TMPSHAREDDIR)
shutil.copy(os.path.join(folder, docfile), tmp_file)
# Create MARC temporary file with FFT tag and call bibupload
(fd, filename) = tempfile.mkstemp(prefix=identifier + '_', dir=CFG_TMPSHAREDDIR)
filedesc = os.fdopen(fd, 'w')
marc_content = """ <record>
<controlfield tag="001">%(rec_id)s</controlfield>
<datafield tag="FFT" ind1=" " ind2=" ">
<subfield code="n">%(name)s</subfield>
<subfield code="a">%(path)s</subfield>
</datafield>
</record> """ % {'rec_id': rec_id,
'name': encode_for_xml(identifier),
'path': encode_for_xml(tmp_file),
}
filedesc.write(marc_content)
filedesc.close()
info[1].append(docfile)
user = ""
if req is not None:
user_info = collect_user_info(req)
user = user_info['nickname']
if not user:
user = "batchupload"
# Execute bibupload with the appropiate mode
task_arguments = ('bibupload', user, "--" + mode,
"--priority=" + priority, "-N", "batchupload")
if exec_date:
date = '--runtime=' + "\'" + exec_date + ' ' + exec_time + "\'"
task_arguments += (date, )
if email_logs_to:
task_arguments += ("--email-logs-to", email_logs_to)
task_arguments += (filename, )
jobid = task_low_level_submission(*task_arguments)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "document")""",
(user_info['nickname'], docfile,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid)))
# Move file to DONE folder
done_filename = docfile + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_" + str(jobid)
try:
os.rename(os.path.join(folder, docfile), os.path.join(files_done_dir, done_filename))
except OSError:
errors.append('MoveError')
return errors, info
def get_user_metadata_uploads(req):
"""Retrieve all metadata upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="metadata"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_user_document_uploads(req):
"""Retrieve all document upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="document"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_daemon_doc_files():
""" Return all files found in batchuploader document folders """
files = {}
for folder in ['/revise', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/documents' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def get_daemon_meta_files():
""" Return all files found in batchuploader metadata folders """
files = {}
for folder in ['/correct', '/replace', '/insert', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/metadata' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def user_authorization(req, ln):
""" Check user authorization to visit page """
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader')
if auth_code != 0:
referer = '/batchuploader/'
return page_not_authorized(req=req, referer=referer,
text=auth_message, navmenuid="batchuploader")
else:
return None
def perform_basic_upload_checks(xml_record):
""" Performs tests that would provoke the bibupload task to fail with
an exit status 1, to prevent batchupload from crashing while alarming
the user wabout the issue
"""
from invenio.bibupload import writing_rights_p
errors = []
if not writing_rights_p():
errors.append("Error: BibUpload does not have rights to write fulltext files.")
recs = create_records(xml_record, 1, 1)
if recs == []:
errors.append("Error: Cannot parse MARCXML file.")
elif recs[0][0] is None:
errors.append("Error: MARCXML file has wrong format: %s" % recs)
return errors
def perform_upload_check(xml_record, mode):
""" Performs a upload simulation with the given record and mode
@return: string describing errors
@rtype: string
"""
error_cache = []
def my_writer(msg, stream=sys.stdout, verbose=1):
if verbose == 1:
if 'DONE' not in msg:
error_cache.append(msg.strip())
orig_writer = bibupload_module.write_message
bibupload_module.write_message = my_writer
error_cache.extend(perform_basic_upload_checks(xml_record))
if error_cache:
# There has been some critical error
return '\n'.join(error_cache)
recs = xml_marc_to_records(xml_record)
try:
upload_mode = mode[2:]
# Adapt input data for bibupload function
if upload_mode == "r insert-or-replace":
upload_mode = "replace_or_insert"
for record in recs:
if record:
record_strip_empty_volatile_subfields(record)
record_strip_empty_fields(record)
bibupload(record, opt_mode=upload_mode, pretend=True)
finally:
bibupload_module.write_message = orig_writer
return '\n'.join(error_cache)
def _get_useragent(req):
"""Return client user agent from req object."""
user_info = collect_user_info(req)
return user_info['agent']
def _get_client_ip(req):
"""Return client IP address from req object."""
return str(req.remote_ip)
def _get_client_authorized_collections(client_ip):
"""
Is this client permitted to use the service?
Return list of collections for which the client is authorized
"""
ret = []
for network, collection in _CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS:
if _ipmatch(client_ip, network):
if '*' in collection:
return ['*']
ret += collection
return ret
def _check_client_useragent(req):
"""
Is this user agent permitted to use the service?
"""
client_useragent = _get_useragent(req)
if _CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS_RE.match(client_useragent):
return True
return False
def _check_client_can_submit_file(client_ip="", metafile="", req=None, webupload=0, ln=CFG_SITE_LANG):
"""
Is this client able to upload such a FILENAME?
check 980 $a values and collection tags in the file to see if they are among the
permitted ones as specified by CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS and ACC_AUTHORIZE_ACTION.
Useful to make sure that the client does not override other records by
mistake.
"""
_ = gettext_set_language(ln)
recs = create_records(metafile, 0, 0)
user_info = collect_user_info(req)
permitted_dbcollids = _get_client_authorized_collections(client_ip)
if '*' in permitted_dbcollids:
return True
filename_tag980_values = _detect_980_values_from_marcxml_file(recs)
for filename_tag980_value in filename_tag980_values:
if not filename_tag980_value:
if not webupload:
return False
else:
return(1, "Invalid collection in tag 980")
if not webupload:
if not filename_tag980_value in permitted_dbcollids:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_tag980_value)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_tag980_value}
return (auth_code, error_msg)
filename_rec_id_collections = _detect_collections_from_marcxml_file(recs)
for filename_rec_id_collection in filename_rec_id_collections:
if not webupload:
if not filename_rec_id_collection in permitted_dbcollids:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_rec_id_collection)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_rec_id_collection}
return (auth_code, error_msg)
if not webupload:
return True
else:
return (0, " ")
def _detect_980_values_from_marcxml_file(recs):
"""
Read MARCXML file and return list of 980 $a values found in that file.
Useful for checking rights.
"""
from invenio.bibrecord import record_get_field_values
collection_tag = run_sql("SELECT value FROM tag, field_tag, field \
WHERE tag.id=field_tag.id_tag AND \
field_tag.id_field=field.id AND \
field.code='collection'")
collection_tag = collection_tag[0][0]
dbcollids = {}
for rec, dummy1, dummy2 in recs:
if rec:
for tag980 in record_get_field_values(rec,
tag=collection_tag[:3],
ind1=collection_tag[3],
ind2=collection_tag[4],
code=collection_tag[5]):
dbcollids[tag980] = 1
return dbcollids.keys()
def _detect_collections_from_marcxml_file(recs):
"""
Extract all possible recIDs from MARCXML file and guess collections
for these recIDs.
"""
from invenio.bibrecord import record_get_field_values
from invenio.search_engine import guess_collection_of_a_record
from invenio.bibupload import find_record_from_sysno, \
find_records_from_extoaiid, \
find_record_from_oaiid
dbcollids = {}
sysno_tag = CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG
oaiid_tag = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG
oai_tag = CFG_OAI_ID_FIELD
for rec, dummy1, dummy2 in recs:
if rec:
for tag001 in record_get_field_values(rec, '001'):
collection = guess_collection_of_a_record(int(tag001))
dbcollids[collection] = 1
for tag_sysno in record_get_field_values(rec, tag=sysno_tag[:3],
ind1=sysno_tag[3],
ind2=sysno_tag[4],
code=sysno_tag[5]):
record = find_record_from_sysno(tag_sysno)
if record:
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oaiid in record_get_field_values(rec, tag=oaiid_tag[:3],
ind1=oaiid_tag[3],
ind2=oaiid_tag[4],
code=oaiid_tag[5]):
try:
records = find_records_from_extoaiid(tag_oaiid)
except Error:
records = []
if records:
record = records.pop()
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oai in record_get_field_values(rec, tag=oai_tag[0:3],
ind1=oai_tag[3],
ind2=oai_tag[4],
code=oai_tag[5]):
record = find_record_from_oaiid(tag_oai)
if record:
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
return dbcollids.keys()
def _transform_input_to_marcxml(file_input=""):
"""
Takes text-marc as input and transforms it
to MARCXML.
"""
# Create temporary file to read from
tmp_fd, filename = tempfile.mkstemp(dir=CFG_TMPSHAREDDIR)
os.write(tmp_fd, file_input)
os.close(tmp_fd)
try:
# Redirect output, transform, restore old references
old_stdout = sys.stdout
new_stdout = StringIO()
sys.stdout = new_stdout
transform_file(filename)
finally:
sys.stdout = old_stdout
return new_stdout.getvalue()
def _log(msg, logfile="webupload.log"):
"""
Log MSG into LOGFILE with timestamp.
"""
filedesc = open(CFG_LOGDIR + "/" + logfile, "a")
filedesc.write(time.strftime("%Y-%m-%d %H:%M:%S") + " --> " + msg + "\n")
filedesc.close()
return
def _write(req, msg):
"""
Write MSG to the output stream for the end user.
"""
req.write(msg + "\n")
return
| jmartinm/invenio | modules/bibupload/lib/batchuploader_engine.py | Python | gpl-2.0 | 29,348 | [
"VisIt"
] | 0b1366c20019258c32dbd5bc381e8ce99c1c94c5451e6eb2730dfc8a4b05016a |
#!/usr/bin/env python
# errors.py
#
# Copyright (C) 2014 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU General Public License v2
#
#
# Standard Errors
#
# This file only contains typical errors to be reported on the UI.
INTERNET_ERROR = {
'title': 'No internet connection..',
'description': 'You need to be connected to the internet to download Kano OS'
}
FREE_SPACE_ERROR = {
'title': 'Insufficient available space..',
'description': 'Please ensure you have at least 4.5 GB available space locally'
}
TOOLS_ERROR = {
'title': 'Missing some tools..',
'description': 'Please visit the dependency page for more information'
}
NO_DISKS_ERROR = {
'title': 'SD Card not found..',
'description': 'Make sure you have inserted the SD card correctly'
}
DOWNLOAD_ERROR = {
'title': 'There was an error downloading Kano OS..',
'description': 'Please check your internet connection or try again later'
}
OLDBURNER_ERROR = {
'title': 'This version of the Kano Burner is too old',
'description': 'Please download a new version from help.kano.me.'
}
SERVER_DOWN_ERROR = {
'title': 'Our servers seem to be down.. :(',
'description': 'We apologise for the inconvenience. Please try again later.'
}
MD5_ERROR = {
'title': 'Could not verify download integrity..',
'description': 'Kano OS download may have been corrupted - please try again'
}
BURN_ERROR = {
'title': 'Burning Kano OS failed..',
'description': 'Make sure the SD card is still correctly inserted and try again'
}
UNMOUNT_ERROR = {
'title': 'There was an error unmounting the disk..',
'description': 'Make sure the you selected the right disk, and try again'
}
FORMAT_ERROR = {
'title': 'There was an error formatting the disk..',
'description': 'Maybe it is write protected?'
}
EJECT_ERROR = {
'title': 'There was an error ejecting the disk..',
'description': 'Please eject it manually.'
}
| KanoComputing/kano-burners | src/common/errors.py | Python | gpl-2.0 | 1,964 | [
"VisIt"
] | a8ebbf6ed8aa4fd209fb6ce641dc46ef98576a9ccf7deb78641e72a76f857c1b |
'''
This function loads one random recording from CinC Challenge and use pre-trained model in predicting what it is using Residual Networks
For more information visit: https://github.com/fernandoandreotti/cinc-challenge2017
Referencing this work
Andreotti, F., Carr, O., Pimentel, M.A.F., Mahdi, A., & De Vos, M. (2017). Comparing Feature Based
Classifiers and Convolutional Neural Networks to Detect Arrhythmia from Short Segments of ECG. In
Computing in Cardiology. Rennes (France).
--
cinc-challenge2017, version 1.0, Sept 2017
Last updated : 27-09-2017
Released under the GNU General Public License
Copyright (C) 2017 Fernando Andreotti, Oliver Carr, Marco A.F. Pimentel, Adam Mahdi, Maarten De Vos
University of Oxford, Department of Engineering Science, Institute of Biomedical Engineering
fernando.andreotti@eng.ox.ac.uk
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
# Download some random waveform from challenge database
from random import randint
import urllib.request
record = "A{:05d}".format(randint(0, 999))
urlfile = "https://www.physionet.org/physiobank/database/challenge/2017/training/A00/{}.mat".format(record)
local_filename, headers = urllib.request.urlretrieve(urlfile)
html = open(local_filename)
print('Downloading record {} ..'.format(record))
# Load data
import scipy.io
mat_data = scipy.io.loadmat(local_filename)
data = mat_data['val']
# Parameters
FS = 300
maxlen = 30*FS
classes = ['A', 'N', 'O','~']
# Preprocessing data
print("Preprocessing recording ..")
import numpy as np
X = np.zeros((1,maxlen))
data = np.nan_to_num(data) # removing NaNs and Infs
data = data[0,0:maxlen]
data = data - np.mean(data)
data = data/np.std(data)
X[0,:len(data)] = data.T # padding sequence
data = X
data = np.expand_dims(data, axis=2) # required by Keras
del X
# Load and apply model
print("Loading model")
from keras.models import load_model
model = load_model('ResNet_30s_34lay_16conv.hdf5')
print("Applying model ..")
prob = model.predict(data)
ann = np.argmax(prob)
print("Record {} classified as {} with {:3.1f}% certainty".format(record,classes[ann],100*prob[0,ann]))
# Visualising output of first 16 convolutions for some layers
from keras import backend as K
import matplotlib.pyplot as plt
plt.plot(data[0,0:1000,0],)
plt.title('Input signal')
#plt.savefig('layinput.eps', format='eps', dpi=1000) # saving?
for l in range(1,34):#range(1,34):
Np = 1000
## Example of plotting first layer output
layer_name = 'conv1d_{}'.format(l)
layer_dict = dict([(layer.name, layer) for layer in model.layers])
layer_output = layer_dict[layer_name].output
# K.learning_phase() is a flag that indicates if the network is in training or
# predict phase. It allow layer (e.g. Dropout) to only be applied during training
get_layer_output = K.function([model.layers[0].input, K.learning_phase()],
[layer_output])
filtout = get_layer_output([data,0])[0]
Npnew = int(Np*filtout.shape[1]/data.shape[1])
fig, ax = plt.subplots(nrows=4, ncols=4, sharex='col', sharey='row')
count = 0
for row in ax:
for col in row:
col.plot(range(Npnew), filtout[0,0:Npnew,count],linewidth=1.0,color='olive')
count += 1
plt.suptitle('Layer {}'.format(l))
#plt.savefig('layoutput{}.eps'.format(l), format='eps', dpi=1000) # saving?
| fernandoandreotti/cinc-challenge2017 | deeplearn-approach/predict.py | Python | gpl-3.0 | 3,999 | [
"VisIt"
] | f1f29a1259d59a50048c3557148b32225d544bd24bb7ec2511adaf52b2cfeda2 |
"""
[2017-09-20] Challenge #332 [Intermediate] Training for Summiting Everest
https://www.reddit.com/r/dailyprogrammer/comments/71gbqj/20170920_challenge_332_intermediate_training_for/
# Description
You and your friend wish to summit Mount Everest the highest peak in the world. One problem: you live at sea level and
despite being in great shape haven't been at altitude very long. So you propose a series of stays on mountaintops
around the world using increasing elevations to prepare your body for the extremes you'll encounter.
You and your friend gather a list of mountain peaks that you'd like to visit on your way there. You can't deviate from
your path but you can choose to go up the mountain or not. But you have to pick ones that go higher than the previous
one. If you go _down_ your body will suffer and your trip to the summit of Everest will be in peril.
Your friend has done the job of lining up the route to get you from home to basecamp. She looks to you to devise an
algorithm to pick the peaks to summit along the way maximizing your summits but always going higher and higher never
lower than you did before.
Can you devise such an algorithm such that you find the list of peaks to summit along the way? Remember - each has to
be higher than the last you want to hit as many such peaks as possible and there's no turning back to visit a
previously passed peak.
# Input Description
You'll be given a series of integers on a line representing the peak height (in thousands of feet) that you'll pass on
your way to Everest. Example:
0 8 4 12 2 10 6 14 1 9 5 13 3 11 7 15
# Output Description
Your program should emit the peak heights you should summit in order that are always higher than the previous peak. In
some cases multiple solutions of the same length may be possible. Example:
0 2 6 9 11 15
# Challenge Inputs
1 2 2 5 9 5 4 4 1 6
4 9 4 9 9 8 2 9 0 1
0 5 4 6 9 1 7 6 7 8
1 2 20 13 6 15 16 0 7 9 4 0 4 6 7 8 10 18 14 10 17 15 19 0 4 2 12 6 10 5 12 2 1 7 12 12 10 8 9 2 20 19 20 17 5 19 0 11
5 20
# Challenge Output
1 2 4 6
4 8 9
0 1 6 7 8
1 2 4 6 7 8 10 14 15 17 19 20
"""
def main():
pass
if __name__ == "__main__":
main()
| DayGitH/Python-Challenges | DailyProgrammer/DP20170920B.py | Python | mit | 2,176 | [
"VisIt"
] | c909181a0e124d6456b79112b77bfa98ec17ea8a55b02e777f857b3301129c03 |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
if sys.version_info < (3, 6):
print('"TestHarness" requires python version 3.6 or greater, version {}.{} is being used.' \
.format(sys.version_info[0], sys.version_info[1]))
sys.exit(1)
from .TestHarness import TestHarness
from .TestHarness import findDepApps
__all__=['TestHarness', 'findDepApps']
| harterj/moose | python/TestHarness/__init__.py | Python | lgpl-2.1 | 637 | [
"MOOSE"
] | f86c20595dd02d056c9a096781b2a40757d1df496ebef4afc863277d382c2dc2 |
# HF XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# HF X
# HF X f90wrap: F90 to Python interface generator with derived type support
# HF X
# HF X Copyright James Kermode 2011
# HF X
# HF X These portions of the source code are released under the GNU General
# HF X Public License, version 2, http://www.gnu.org/copyleft/gpl.html
# HF X
# HF X If you would like to license the source code under different terms,
# HF X please contact James Kermode, james.kermode@gmail.com
# HF X
# HF X When using this software, please cite the following reference:
# HF X
# HF X http://www.jrkermode.co.uk/f90wrap
# HF X
# HF XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
from __future__ import print_function
import copy
import logging
import re
from f90wrap import fortran as ft
class AccessUpdater(ft.FortranTransformer):
"""Visit module contents and update public_symbols and
private_symbols lists to be consistent with (i) default module
access; (ii) public and private statements at module level;
(iii) public and private statement in types; (iv) public
and private attributes of individual elements."""
def __init__(self):
self.mod = None
self.type = None
def update_access(self, node, mod, default_access, in_type=False):
if default_access == 'public':
if ('private' not in getattr(node, 'attributes', []) and
node.name not in mod.private_symbols):
# symbol should be marked as public if it's not already
if not in_type and node.name not in mod.public_symbols:
logging.debug('marking public symbol ' + node.name)
mod.public_symbols.append(node.name)
else:
# symbol should be marked as private if it's not already
if not in_type and (node.name not in mod.private_symbols and
'callback' not in getattr(node, 'attributes', [])):
logging.debug('marking private symbol ' + node.name)
mod.private_symbols.append(node.name)
elif default_access == 'private':
if ('public' not in getattr(node, 'attributes', []) and
node.name not in mod.public_symbols):
# symbol should be marked as private if it's not already
if not in_type and (node.name not in mod.private_symbols and
'callback' not in getattr(node, 'attributes', [])):
logging.debug('marking private symbol ' + node.name)
mod.private_symbols.append(node.name)
else:
# symbol should be marked as public if it's not already
if not in_type and node.name not in mod.public_symbols:
logging.debug('marking public symbol ' + node.name)
mod.public_symbols.append(node.name)
else:
raise ValueError('bad default access %s for reference %s' %
(mod.default_access, mod.name))
def visit_Module(self, mod):
# keep track of the current module
self.mod = mod
mod = self.generic_visit(mod)
self.mod = None
return mod
def visit_Procedure(self, node):
if self.mod is None:
return self.generic_visit(node)
self.update_access(node, self.mod, self.mod.default_access)
return self.generic_visit(node)
def visit_Interface(self, node):
if self.mod is None:
return self.generic_visit(node)
self.update_access(node, self.mod, self.mod.default_access)
return self.generic_visit(node)
def visit_Type(self, node):
if self.mod is None:
return self.generic_visit(node)
self.type = node
self.update_access(node, self.mod, self.mod.default_access)
node.default_access = 'public'
if 'private' in node.attributes:
node.default_access = 'private'
node = self.generic_visit(node)
self.type = None
return node
def visit_Element(self, node):
if self.type is not None:
self.update_access(node, self.mod, self.type.default_access, in_type=True)
else:
self.update_access(node, self.mod, self.mod.default_access)
return node
class PrivateSymbolsRemover(ft.FortranTransformer):
"""
Transform a tree by removing private symbols
"""
def __init__(self):
self.mod = None
def visit_Module(self, mod):
# keep track of the current module
self.mod = mod
mod = self.generic_visit(mod)
self.mod = None
return mod
def visit_Procedure(self, node):
if self.mod is None:
return self.generic_visit(node)
if node.name in self.mod.private_symbols:
logging.debug('removing private symbol %s' % node.name)
return None
if hasattr(node, 'attributes') and 'private' in node.attributes:
return None
return self.generic_visit(node)
def visit_Interface(self, node):
# remove entirely private interfaces
if node.name in self.mod.private_symbols:
logging.debug('removing private symbol %s' % node.name)
return None
# do not call generic_visit(), so we don't
# remove private procedures within public
# interfaces, as these should still be wrapped
return node
visit_Type = visit_Procedure
visit_Element = visit_Procedure
def remove_private_symbols(node):
"""
Walk the tree starting at *node*, removing all private symbols.
This function first applies the AccessUpdater transformer to
ensure module *public_symbols* and *private_symbols* are up to
date with *default_access* and individual `public` and `private`
attributes.
"""
node = AccessUpdater().visit(node)
node = PrivateSymbolsRemover().visit(node)
return node
class UnwrappablesRemover(ft.FortranTransformer):
def __init__(self, callbacks, types, constructors, destructors):
self.callbacks = callbacks
self.types = types
self.constructors = constructors
self.destructors = destructors
def visit_Interface(self, node):
# don't wrap operator overloading routines
if node.name.startswith('operator('):
return None
return self.generic_visit(node)
def visit_Procedure(self, node):
# special case: keep all constructors and destructors, although
# they may have pointer arguments
for suff in self.constructors + self.destructors:
if node.name.endswith(suff):
return self.generic_visit(node)
# don't wrap operator overloading routines
if node.name.startswith('operator('):
return None
# FIXME don't wrap callback arguments
if 'callback' in node.attributes:
return None
args = node.arguments[:]
if isinstance(node, ft.Function):
args.append(node.ret_val)
for arg in args:
# only callback functions in self.callbacks
if 'callback' in arg.attributes:
if node.name not in self.callbacks:
logging.debug('removing callback routine %s' % node.name)
return None
else:
continue
if 'optional' in arg.attributes:
# we can remove the argument instead of the whole routine
return self.generic_visit(node)
else:
# no allocatables or pointers
if 'allocatable' in arg.attributes or 'pointer' in arg.attributes:
logging.debug('removing routine %s due to allocatable/pointer arguments' % node.name)
return None
dims = [attrib for attrib in arg.attributes if attrib.startswith('dimension')]
# # no complex scalars (arrays are OK)
# if arg.type.startswith('complex') and len(dims) == 0:
# logging.debug('removing routine %s due to complex scalar arguments' % node.name)
# return None
# no derived types apart from those in self.types
if arg.type.startswith('type') and ft.split_type(arg.type) not in self.types:
logging.debug('removing routine %s due to unsupported derived type %s' %
(node.name, arg.type))
return None
# no arrays of derived types
if arg.type.startswith('type') and len(dims) != 0:
logging.debug('removing routine %s due to unsupported derived type array %s' %
(node.name, arg.type))
return None
return self.generic_visit(node)
def visit_Argument(self, node):
if not hasattr(node, 'attributes'):
return self.generic_visit(node)
if not 'optional' in node.attributes:
return self.generic_visit(node)
# remove optional allocatable/pointer arguments
if 'allocatable' in node.attributes or 'pointer' in node.attributes:
logging.debug('removing optional argument %s due to allocatable/pointer attributes' %
node.name)
return None
dims = [attrib for attrib in node.attributes if attrib.startswith('dimension')]
# remove optional complex scalar arguments
if node.type.startswith('complex') and len(dims) == 0:
logging.debug('removing optional argument %s as it is a complex scalar' % node.name)
return None
# remove optional derived types not in self.types
if node.type.startswith('type') and ft.split_type(node.type) not in self.types:
logging.debug('removing optional argument %s due to unsupported derived type %s' %
(node.name, node.type))
return None
# remove arrays of derived types
if node.type.startswith('type') and len(dims) != 0:
logging.debug('removing optional argument %s due to unsupported derived type array %s' %
(node.name, node.type))
return None
return self.generic_visit(node)
def visit_Type(self, node):
"""
Remove unwrappable elements inside derived types
"""
if node.name not in self.types:
logging.debug('removing type %s' % node.name)
return None
else:
elements = []
for element in node.elements:
# Get the number of dimensions of the element (if any)
dims = filter(lambda x: x.startswith('dimension'), element.attributes)
# Skip this if the type is not do-able
if 'pointer' in element.attributes and dims != []:
logging.debug('removing %s.%s due to pointer attribute' %
(node.name, element.name))
continue
if element.type.lower() == 'type(c_ptr)':
logging.debug('removing %s.%s as type(c_ptr) unsupported' %
(node.name, element.name))
continue
if element.type.startswith('type') and element.type not in self.types:
logging.debug('removing %s.%s as type %s unsupported' %
(node.name, element.name, element.type))
continue
elements.append(element)
node.elements = elements
return self.generic_visit(node)
def visit_Module(self, node):
"""
Remove unwrappable elements inside modules.
As above, but also includes derived type elements from modules
that do not have the "target" attribute
"""
elements = []
for element in node.elements:
# Get the number of dimensions of the element (if any)
dims = filter(lambda x: x.startswith('dimension'), element.attributes)
if 'pointer' in element.attributes and dims != []:
logging.debug('removing %s.%s due to pointer attribute' %
(node.name, element.name))
continue
if element.type.lower() == 'type(c_ptr)':
logging.debug('removing %s.%s as type(c_ptr) unsupported' %
(node.name, element.name))
continue
if element.type.startswith('type') and 'target' not in element.attributes:
logging.debug('removing %s.%s as missing "target" attribute' %
(node.name, element.name))
continue
if element.type.startswith('type') and element.type not in self.types:
logging.debug('removing %s.%s as type %s unsupported' %
(node.name, element.name, element.type))
continue
# parameter arrays in modules live only in the mind of the compiler
if 'parameter' in element.attributes and dims != []:
logging.debug('removing %s.%s as it has "parameter" attribute' %
(node.name, element.name))
continue
elements.append(element)
node.elements = elements
return self.generic_visit(node)
def fix_subroutine_uses_clauses(tree, types):
"""Walk over all nodes in tree, updating subroutine uses
clauses to include the parent module and all necessary
modules from types"""
for mod, sub, arguments in ft.walk_procedures(tree):
sub.uses = set()
sub.mod_name = None
if mod is not None:
sub_name = sub.name
if hasattr(sub, 'call_name'):
sub_name = sub.call_name
sub.uses.add((mod.name, (sub_name,)))
sub.mod_name = mod.name
for arg in arguments:
if arg.type.startswith('type') and ft.strip_type(arg.type) in types:
sub.uses.add((types[ft.strip_type(arg.type)].mod_name, (ft.strip_type(arg.type),)))
return tree
def fix_element_uses_clauses(tree, types):
"""
Add uses clauses to derived type elements in modules
"""
for mod in ft.walk_modules(tree):
for el in mod.elements:
el.uses = set()
if el.type.startswith('type') and ft.strip_type(el.type) in types:
el.uses.add((types[el.type].mod_name, (ft.strip_type(el.type),)))
return tree
def set_intent(attributes, intent):
"""Remove any current "intent" from attributes and replace with intent given"""
attributes = [attr for attr in attributes if not attr.startswith('intent')]
attributes.append(intent)
return attributes
def convert_derived_type_arguments(tree, init_lines, sizeof_fortran_t):
for mod, sub, arguments in ft.walk_procedures(tree, include_ret_val=True):
sub.types = set()
sub.transfer_in = []
sub.transfer_out = []
sub.allocate = []
sub.deallocate = []
if 'constructor' in sub.attributes:
sub.arguments[0].attributes = set_intent(sub.arguments[0].attributes, 'intent(out)')
if 'destructor' in sub.attributes:
logging.debug('deallocating arg "%s" in %s' % (sub.arguments[0].name, sub.name))
sub.deallocate.append(sub.arguments[0].name)
for arg in arguments:
if not hasattr(arg, 'type') or not arg.type.startswith('type'):
continue
# save original Fortran intent since we'll be overwriting it
# with intent of the opaque pointer
arg.attributes = arg.attributes + ['fortran_' + attr for attr in
arg.attributes if attr.startswith('intent')]
typename = ft.strip_type(arg.type)
arg.wrapper_type = 'integer'
arg.wrapper_dim = sizeof_fortran_t
sub.types.add(typename)
if typename in init_lines:
use, (exe, exe_optional) = init_lines[typename]
if use is not None:
sub.uses.add((use, [typename]))
arg.init_lines = (exe_optional, exe)
if 'intent(out)' in arg.attributes:
arg.attributes = set_intent(arg.attributes, 'intent(out)')
sub.transfer_out.append(arg.name)
if 'pointer' not in arg.attributes:
logging.debug('allocating arg "%s" in %s' % (arg.name, sub.name))
sub.allocate.append(arg.name)
else:
arg.attributes = set_intent(arg.attributes, 'intent(in)')
sub.transfer_in.append(arg.name)
return tree
def convert_array_intent_out_to_intent_inout(tree):
"""
Find all intent(out) array arguments and convert to intent(inout)
"""
for mod, sub, arguments in ft.walk_procedures(tree, include_ret_val=True):
for arg in arguments:
dims = [attr for attr in arg.attributes if attr.startswith('dimension') ]
if dims == []:
continue
if len(dims) != 1:
raise ValueError('more than one dimension attribute found for arg %s' % arg.name)
if 'intent(out)' in arg.attributes:
arg.attributes = set_intent(arg.attributes, 'intent(inout)')
return tree
class StringLengthConverter(ft.FortranVisitor):
"""Convert lengths of all character strings to standard format
Looks in all Procedure arguments and Type elements.
Changes from '(len=*)' or '(*)' syntax to *(*) syntax.
"""
def __init__(self, string_lengths, default_string_length):
self.string_lengths = string_lengths
self.default_string_length = default_string_length
def visit_Declaration(self, node):
if not node.type.startswith('character'):
return
try:
lind = node.type.index('(')
rind = node.type.rindex(')')
typ = node.type[:lind] + '*' + node.type[lind:rind + 1].replace('len=', '')
string_length = typ[11:-1]
# Try to get length of string arguments
if not string_length == '*' and not all([x in '0123456789' for x in string_length]):
string_length = self.string_lengths.get(string_length, self.default_string_length)
# Default string length for intent(out) strings
if string_length == '*' and 'intent(out)' in node.attributes:
string_length = self.default_string_length
except ValueError:
string_length = 1
node.type = 'character*(%s)' % str(string_length)
class ArrayDimensionConverter(ft.FortranVisitor):
"""
Transform unspecified dimensions into additional dummy arguments
e.g. the following code
subroutine foo(a)
integer a(:)
end subroutine foo
becomes:
subroutine foo(a, n0)
integer a(n0)
integer n0
!f2py intent(hide), depend(a) :: n0 = shape(a,0)
end subroutine foo
"""
valid_dim_re = re.compile(r'^(([-0-9.e]+)|(size\([_a-zA-Z0-9\+\-\*\/,]*\))|(len\(.*\)))$')
@staticmethod
def split_dimensions(dim):
"""Given a string like "dimension(a,b,c)" return the list of dimensions ['a','b','c']."""
dim = dim[10:-1] # remove "dimension(" and ")"
br = 0
d = 1
ds = ['']
for c in dim:
if c != ',': ds[-1] += c
if c == '(': br += 1
elif c == ')': br -= 1
elif c == ',':
if br == 0: ds.append('')
else: ds[-1] += ','
return ds
def visit_Procedure(self, node):
n_dummy = 0
for arg in node.arguments:
dims = [attr for attr in arg.attributes if attr.startswith('dimension') ]
if dims == []:
continue
if len(dims) != 1:
raise ValueError('more than one dimension attribute found for arg %s' % arg.name)
ds = ArrayDimensionConverter.split_dimensions(dims[0])
new_dummy_args = []
new_ds = []
for i, d in enumerate(ds):
if ArrayDimensionConverter.valid_dim_re.match(d):
if d.startswith('len'):
arg.f2py_line = ('!f2py %s %s, dimension(%s) :: %s' % \
(arg.type,
','.join([attr for attr in arg.attributes if not attr.startswith('dimension')]),
d.replace('len', 'slen'), arg.name))
new_ds.append(d)
continue
dummy_arg = ft.Argument(name='n%d' % n_dummy, type='integer', attributes=['intent(hide)'])
if 'intent(out)' not in arg.attributes:
dummy_arg.f2py_line = ('!f2py intent(hide), depend(%s) :: %s = shape(%s,%d)' %
(arg.name, dummy_arg.name, arg.name, i))
new_dummy_args.append(dummy_arg)
new_ds.append(dummy_arg.name)
n_dummy += 1
if new_dummy_args != []:
logging.debug('adding dummy arguments %r to %s' % (new_dummy_args, node.name))
arg.attributes = ([attr for attr in arg.attributes if not attr.startswith('dimension')] +
['dimension(%s)' % ','.join(new_ds)])
node.arguments.extend(new_dummy_args)
class MethodFinder(ft.FortranTransformer):
def __init__(self, types, constructor_names, destructor_names, short_names, move_methods):
self.types = types
self.constructor_names = constructor_names
self.destructor_names = destructor_names
self.short_names = short_names
self.move_methods = move_methods
def visit_Interface(self, node):
new_procs = []
for proc in node.procedures:
if isinstance(proc, ft.Procedure):
new_proc = self.visit_Procedure(proc, interface=node)
if new_proc is not None:
new_procs.append(new_proc)
else:
new_procs.append(proc)
if new_procs == []:
# interface is now empty: all routines have been moved into Interfaces inside types
return None
else:
# some procedures remain so we need to keep the Interface around
node.procedures = new_procs
return node
def visit_Procedure(self, node, interface=None):
if (len(node.arguments) == 0 or
(node.arguments[0] is not None and
node.arguments[0].type not in self.types)):
# procedure is not a method, so leave it alone
return node
# remove prefix from subroutine name to get method name
typ = self.types[node.arguments[0].type]
node.method_name = node.name
prefices = [typ.name + '_']
if typ.name in self.short_names:
prefices.append(self.short_names[typ.name] + '_')
for prefix in prefices:
if node.name.startswith(prefix):
node.method_name = node.name[len(prefix):]
# label constructors and destructors
if node.method_name in self.constructor_names:
node.attributes.append('constructor')
elif node.method_name in self.destructor_names:
node.attributes.append('destructor')
if (self.move_methods or
'constructor' in node.attributes or
'destructor' in node.attributes):
node.attributes.append('method')
node.type_name = typ.name
if interface is None:
# just a regular method - move into typ.procedures
typ.procedures.append(node)
logging.debug('added method %s to type %s' %
(node.method_name, typ.name))
else:
# this method was originally inside an interface,
# so we need to replicate Interface inside the Type
for intf in typ.interfaces:
if intf.name == interface.name:
intf.procedures.append(node)
logging.debug('added method %s to interface %s in type %s' %
(node.method_name, intf.name, typ.name))
break
else:
intf = ft.Interface(interface.name,
interface.filename,
interface.doc,
interface.lineno,
[node])
typ.interfaces.append(intf)
logging.debug('added method %s to new interface %s in type %s' %
(node.method_name, intf.name, typ.name))
# remove method from parent since we've added it to Type
return None
else:
return node
def collapse_single_interfaces(tree):
"""Collapse interfaces which contain only a single procedure."""
class _InterfaceCollapser(ft.FortranTransformer):
"""Replace interfaces with only one procedure by that procedure"""
def visit_Interface(self, node):
if len(node.procedures) == 1:
proc = node.procedures[0]
proc.doc = node.doc + proc.doc
logging.debug('collapsing single-component interface %s' % proc.name)
return proc
else:
return node
class _ProcedureRelocator(ft.FortranTransformer):
"""Filter interfaces and procedures into correct lists"""
def visit_Type(self, node):
logging.debug('visiting %r' % node)
interfaces = []
procedures = []
for child in ft.iter_child_nodes(node):
if isinstance(child, ft.Interface):
interfaces.append(child)
elif isinstance(child, ft.Procedure):
procedures.append(child)
else:
# other child nodes should be left where they are
pass
node.interfaces = interfaces
node.procedures = procedures
return self.generic_visit(node)
visit_Module = visit_Type
tree = _InterfaceCollapser().visit(tree)
tree = _ProcedureRelocator().visit(tree)
return tree
def add_missing_constructors(tree):
for node in ft.walk(tree):
if not isinstance(node, ft.Type):
continue
for child in ft.iter_child_nodes(node):
if isinstance(child, ft.Procedure):
if 'constructor' in child.attributes:
logging.info('found constructor %s' % child.name)
break
else:
logging.info('adding missing constructor for %s' % node.name)
new_node = ft.Subroutine('%s_initialise' % node.name,
node.filename,
['Automatically generated constructor for %s' % node.name],
node.lineno,
[ft.Argument(name='this',
filename=node.filename,
doc=['Object to be constructed'],
lineno=node.lineno,
attributes=['intent(out)'],
type='type(%s)' % node.name)],
node.uses,
['constructor', 'skip_call'],
mod_name=node.mod_name,
type_name=node.name)
new_node.method_name = '__init__'
node.procedures.append(new_node)
return tree
def add_missing_destructors(tree):
for node in ft.walk(tree):
if not isinstance(node, ft.Type):
continue
for child in ft.iter_child_nodes(node):
if isinstance(child, ft.Procedure):
if 'destructor' in child.attributes:
logging.info('found destructor %s' % child.name)
break
else:
logging.info('adding missing destructor for %s' % node.name)
new_node = ft.Subroutine('%s_finalise' % node.name,
node.filename,
['Automatically generated destructor for %s' % node.name],
node.lineno,
[ft.Argument(name='this',
filename=node.filename,
doc=['Object to be destructed'],
lineno=node.lineno,
attributes=['intent(inout)'],
type='type(%s)' % node.name)],
node.uses,
['destructor', 'skip_call'],
mod_name=node.mod_name,
type_name=node.name)
new_node.method_name = '__del__'
node.procedures.append(new_node)
return tree
class FunctionToSubroutineConverter(ft.FortranTransformer):
"""Convert all functions to subroutines, with return value as an
intent(out) argument after the last non-optional argument"""
def visit_Function(self, node):
# insert ret_val after last non-optional argument
arguments = node.arguments[:]
i = 0
for i, arg in enumerate(arguments):
if 'optional' in arg.attributes:
break
arguments.insert(i, node.ret_val)
arguments[i].name = 'ret_' + arguments[i].name
arguments[i].attributes.append('intent(out)')
new_node = ft.Subroutine(node.name,
node.filename,
node.doc,
node.lineno,
arguments,
node.uses,
node.attributes,
mod_name=node.mod_name)
if hasattr(node, 'call_name'):
new_node.call_name = node.call_name
if hasattr(node, 'type'):
new_node.type = node.type
new_node.orig_name = node.orig_name
new_node.orig_node = node # keep a reference to the original node
return new_node
class IntentOutToReturnValues(ft.FortranTransformer):
"""
Convert all Subroutine and Function intent(out) arguments to return values
"""
def visit_Procedure(self, node):
if 'constructor' in node.attributes:
node.arguments[0].attributes = set_intent(node.arguments[0].attributes,
'intent(out)')
ret_val = []
ret_val_doc = None
if isinstance(node, ft.Function) and node.ret_val is not None:
ret_val.append(node.ret_val)
if node.ret_val_doc is not None:
ret_val_doc = node.ret_val_doc
arguments = []
for arg in node.arguments:
if 'intent(out)' in arg.attributes:
ret_val.append(arg)
else:
arguments.append(arg)
if ret_val == []:
new_node = node # no changes needed
else:
new_node = ft.Function(node.name,
node.filename,
node.doc,
node.lineno,
arguments,
node.uses,
node.attributes,
ret_val,
ret_val_doc,
mod_name=node.mod_name,
type_name=node.type_name)
new_node.orig_node = node
if hasattr(node, 'method_name'):
new_node.method_name = node.method_name
return new_node
class RenameReservedWords(ft.FortranVisitor):
def __init__(self, types, name_map=None):
self.types = types
self.name_map = {}
if name_map is not None:
self.name_map.update(name_map)
# rename Python keywords by appending an underscore
import keyword
self.name_map.update(dict((key, key + '_') for key in keyword.kwlist))
# apply same renaming as f2py
import numpy.f2py.crackfortran
self.name_map.update(numpy.f2py.crackfortran.badnames)
# remove some of these which are not Python reserved words
del self.name_map['stdout']
del self.name_map['stderr']
del self.name_map['stdin']
def visit_Argument(self, node):
if not hasattr(node, 'orig_name'):
node.orig_name = node.name
node.name = self.name_map.get(node.name, node.name)
if isinstance(node, ft.Argument):
# replace names in dimension attribute expressions
for (old_name, new_name) in self.name_map.items():
new_attribs = []
for attrib in node.attributes:
if attrib.startswith('dimension('):
new_attribs.append(attrib.replace(old_name, new_name))
else:
new_attribs.append(attrib)
node.attributes = new_attribs
return self.generic_visit(node)
visit_Procedure = visit_Argument
visit_Element = visit_Argument
visit_Module = visit_Argument
visit_Type = visit_Argument
class RenameArgumentsPython(ft.FortranVisitor):
def __init__(self, types):
self.types = types
def visit_Procedure(self, node):
if hasattr(node, 'method_name'):
if 'constructor' in node.attributes:
node.ret_val[0].py_name = 'self'
elif len(node.arguments) >= 1 and node.arguments[0].type in self.types:
node.arguments[0].py_name = 'self'
elif hasattr(node, 'attributes') and 'callback' in node.attributes:
self.visit_Argument(node)
return self.generic_visit(node)
def visit_Argument(self, node):
if not hasattr(node, 'py_name'):
node.py_name = node.name
if node.type.startswith('type'):
node.py_value = node.py_name + '._handle'
else:
node.py_value = node.py_name
return node
class RenameInterfacesPython(ft.FortranVisitor):
def visit_Interface(self, node):
for proc in node.procedures:
if hasattr(proc, 'method_name'):
proc.method_name = '_'+proc.method_name
else:
proc.method_name = '_'+proc.name
return node
class OnlyAndSkip(ft.FortranTransformer):
"""
This class does the job of removing nodes from the tree
which are not necessary to write wrappers for (given user-supplied
values for only and skip).
Currently it takes a list of subroutines and a list of modules to write
wrappers for. If empty, it does all of them.
"""
def __init__(self, kept_subs, kept_mods):
self.kept_subs = kept_subs
self.kept_mods = kept_mods
def visit_Procedure(self, node):
if len(self.kept_subs) > 0:
if node not in self.kept_subs:
return None
return self.generic_visit(node)
def visit_Module(self, node):
if len(self.kept_mods) > 0:
if node not in self.kept_mods:
return None
return self.generic_visit(node)
class NormaliseTypes(ft.FortranVisitor):
"""
Convert all type names to standard form and resolve kind names
"""
def __init__(self, kind_map):
self.kind_map = kind_map
def visit_Declaration(self, node):
node.type = ft.normalise_type(node.type, self.kind_map)
return self.generic_visit(node)
visit_Argument = visit_Declaration
class SetInterfaceProcedureCallNames(ft.FortranVisitor):
"""
Set call names of procedures within overloaded interfaces to the name of the interface
"""
def visit_Interface(self, node):
for proc in node.procedures:
logging.info('setting call_name of %s to %s' % (proc.name, node.name))
proc.call_name = node.name
return node
def transform_to_generic_wrapper(tree, types, callbacks, constructors,
destructors, short_names, init_lines,
only_subs, only_mods, argument_name_map,
move_methods):
"""
Apply a number of rules to *tree* to make it suitable for passing to
a F90 and Python wrapper generators. Transformations performed are:
* Removal of procedures and modules not provided by the user
* Removal of private symbols
* Removal of unwrappable routines and optional arguments
* Addition of missing constructor and destructor wrappers
* Conversion of all functions to subroutines
* Updatting call names of procedures within interfaces
* Update of subroutine uses clauses
"""
tree = OnlyAndSkip(only_subs, only_mods).visit(tree)
tree = remove_private_symbols(tree)
tree = UnwrappablesRemover(callbacks, types, constructors, destructors).visit(tree)
tree = MethodFinder(types, constructors, destructors, short_names, move_methods).visit(tree)
SetInterfaceProcedureCallNames().visit(tree)
tree = collapse_single_interfaces(tree)
tree = fix_subroutine_uses_clauses(tree, types)
tree = fix_element_uses_clauses(tree, types)
tree = add_missing_constructors(tree)
tree = add_missing_destructors(tree)
tree = convert_array_intent_out_to_intent_inout(tree)
RenameReservedWords(types, argument_name_map).visit(tree)
return tree
def transform_to_f90_wrapper(tree, types, callbacks, constructors,
destructors, short_names, init_lines,
string_lengths, default_string_length,
sizeof_fortran_t, kind_map):
"""
Additional Fortran-specific transformations:
* Conversion of derived type arguments to opaque integer arrays
via Fortran transfer() intrinsic.
* Normalise type declarations
"""
FunctionToSubroutineConverter().visit(tree)
tree = convert_derived_type_arguments(tree, init_lines, sizeof_fortran_t)
StringLengthConverter(string_lengths, default_string_length).visit(tree)
ArrayDimensionConverter().visit(tree)
NormaliseTypes(kind_map).visit(tree)
return tree
def transform_to_py_wrapper(tree, types):
"""
Additional Python-specific transformations:
* Convert intent(out) arguments to additional return values
* Rename arguments (e.g. this -> self)
* Prefix procedure names within interfaces with an underscore
"""
IntentOutToReturnValues().visit(tree)
RenameArgumentsPython(types).visit(tree)
RenameInterfacesPython().visit(tree)
return tree
def find_referenced_modules(mods, tree):
"""
Given a set of modules in a parse tree, find any modules (recursively)
used by these.
Parameters
----------
mods : set
initial modules to search, must be included in the tree.
tree : `fortran.Root()` object.
the full fortran parse tree from which the mods have been taken.
Returns
-------
all_mods : set
Module() objects which are recursively used by the given modules.
"""
new_mods = copy.copy(mods)
while new_mods != set():
temp = list(new_mods)
for m in temp:
for m2 in m.uses:
for m3 in ft.walk_modules(tree):
if m3.name == m2:
new_mods.add(m3)
new_mods -= mods
mods |= new_mods
return mods
def find_referenced_types(mods, tree):
"""
Given a set of modules in a parse tree, find any types either defined in
or referenced by the module, recursively.
Parameters
----------
mods : set
initial modules to search, must be included in the tree.
tree : the full fortran parse tree from which the mods have been taken.
tree : `fortran.Root` object.
the full fortran parse tree from which the mods have been taken.
Returns
-------
kept_types : set of Type() objects which are referenced or defined in the
modules given, or recursively referenced by those types.
"""
# Get used types now
kept_types = set()
for mod in mods:
for t in mod.types:
kept_types.add(t)
for el in mod.elements:
if el.type.startswith('type'):
for mod2 in ft.walk_modules(tree):
for mt in mod2.types:
if mt.name in el.type:
kept_types.add(mt)
# kept_types is now all types defined/referenced directly in kept_mods. But we also
# need those referenced by them.
new_set = copy.copy(kept_types)
while new_set != set():
temp_set = list(new_set)
for t in temp_set:
for el in t.elements:
if el.type.startswith('type'): # a referenced type, need to find def
for mod2 in ft.walk_modules(tree):
for mt in mod2.types:
if mt.name in el.type:
new_set.add(mt)
# take out all the original types from new_set
new_set -= kept_types
# update the kept_types with new ones
kept_types |= new_set
return kept_types
| davidovitch/f90wrap | f90wrap/transform.py | Python | gpl-2.0 | 42,373 | [
"VisIt"
] | 6d8a31ae7d43abc3dfe60811a8493edfa9e59816c58bca0d33259b0632635db8 |
"""
Traits View definition file.
The view trait of the parent class has been extracted from the model
definition file. This file can either be exec()ed or imported. See
core/base.py:Base.trait_view() for what is currently used. Using exec()
allows view changes without needing to restart Mayavi, but is slower than
importing.
"""
# Authors: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Judah De Paula <judah@enthought.com>
# Copyright (c) 2005-2008, Enthought, Inc.
# License: BSD Style.
from traitsui.api import Item, Group, View
view = View(Group(Item(name='filled_contours',
defined_when='show_filled_contours'),
Item(name='auto_contours'),
# One group or the other, but not both.
Group(
Item(name='contours',
style='custom',
visible_when='not auto_contours',
show_label=False),
),
Group(
Item(name='number_of_contours'),
Item(name='minimum_contour'),
Item(name='maximum_contour'),
visible_when='auto_contours',
),
Item(name='auto_update_range'),
Group(
Item(name='_data_min',
label='Data minimum'),
Item(name='_data_max',
label='Data maximum'),
visible_when='not auto_update_range',
)
)
)
| dmsurti/mayavi | mayavi/components/ui/contour.py | Python | bsd-3-clause | 1,652 | [
"Mayavi"
] | b8ca25daa4e087a96decb2c8d2842bd90108db097bcafeaf5e8bcb781f8c4292 |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import date, timedelta
from workalendar.core import WesternCalendar, ChristianMixin
from workalendar.core import SUN, MON, TUE, WED, THU, FRI, SAT
class UnitedStates(WesternCalendar, ChristianMixin):
"United States of America"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(7, 4, 'Independence Day'),
(11, 11, 'Veterans Day'),
)
@staticmethod
def is_presidential_year(year):
return (year % 4) == 0
def get_variable_days(self, year):
# usual variable days
days = super(UnitedStates, self).get_variable_days(year)
days += [
(UnitedStates.get_nth_weekday_in_month(year, 1, MON, 3),
'Martin Luther King, Jr. Day'),
(UnitedStates.get_nth_weekday_in_month(year, 2, MON, 3),
"Washington's Birthday"),
(UnitedStates.get_last_weekday_in_month(year, 5, MON),
"Memorial Day"),
(UnitedStates.get_nth_weekday_in_month(year, 9, MON),
"Labor Day"),
(UnitedStates.get_nth_weekday_in_month(year, 10, MON, 2),
"Colombus Day"),
(UnitedStates.get_nth_weekday_in_month(year, 11, THU, 4),
"Thanksgiving Day"),
]
# Inauguration day
if UnitedStates.is_presidential_year(year - 1):
inauguration_day = date(year, 1, 20)
if inauguration_day.weekday() == SUN:
inauguration_day = date(year, 1, 21)
days.append((inauguration_day, "Inauguration Day"))
#Christmas shift, exchanges have the day after christmas as a holiday if it's
christmas = date(year, 12, 25)
if christmas.weekday() in self.get_weekend_days():
shift = self.find_following_weekday(christmas)
days.append((shift, "Christmas Shift"))
days.append((shift + timedelta(days=1), "Boxing Day Shift"))
return days
class Brazil(WesternCalendar, ChristianMixin):
"Brazil"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(4, 21, "Tiradentes' Day"),
(5, 1, "Labour Day"),
(9, 7, "Independence Day"),
(10, 12, "Our Lady of Aparecida"),
(11, 2, "All Souls' Day"),
(11, 15, "Republic Day"),
)
class BrazilSaoPauloState(Brazil):
"Brazil São Paulo State"
FIXED_HOLIDAYS = Brazil.FIXED_HOLIDAYS + (
(7, 9, "Constitutional Revolution of 1932"),
)
class BrazilSaoPauloCity(BrazilSaoPauloState):
"Brazil São Paulo City"
FIXED_HOLIDAYS = BrazilSaoPauloState.FIXED_HOLIDAYS + (
(1, 25, "Anniversary of the city of São Paulo"),
(11, 20, "Dia da Consciência Negra")
)
include_easter_sunday = True
include_corpus_christi = True
def get_carnaval(self, year):
return self.get_easter_sunday(year) - timedelta(days=47)
def get_variable_days(self, year):
days = super(BrazilSaoPauloCity, self).get_variable_days(year)
days.append((self.get_carnaval(year), "Carnaval"))
days.append((self.get_good_friday(year), "Sexta-feira da Paixão"))
return days
class Chile(WesternCalendar, ChristianMixin):
"Chile"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 1, "Labour Day"),
(5, 21, "Navy Day"),
(6, 29, "Saint Peter and Saint Paul"),
(7, 16, "Our Lady of Mount Carmel"),
(9, 18, "National holiday"),
(9, 19, "Army holiday"),
(10, 12, "Columbus Day"),
(12, 31, "Banking Holiday"),
)
include_good_friday = True
include_easter_saturday = True
include_assumption = True
include_all_saints = True
include_immaculate_conception = True
def get_variable_days(self, year):
days = super(Chile, self).get_variable_days(year)
september_17 = date(year, 9, 17)
if september_17.weekday() == MON:
days.append((september_17, '"Bridge" holiday'))
september_20 = date(year, 9, 20)
if september_20.weekday() == FRI:
days.append((september_20, '"Bridge" holiday'))
reformation_day = date(year, 10, 31)
if reformation_day.weekday() == WED:
reformation_day = date(year, 11, 2)
elif reformation_day.weekday() == TUE:
reformation_day = date(year, 10, 27)
days.append((reformation_day, "Reformation Day"))
return days
class Mexico(WesternCalendar, ChristianMixin):
"Mexico"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 1, "Labour Day"),
(9, 16, "Independence Day"),
)
def get_variable_days(self, year):
days = super(Mexico, self).get_variable_days(year)
days.append(
(Mexico.get_nth_weekday_in_month(year, 2, MON),
"Constitution Day"))
days.append(
(Mexico.get_nth_weekday_in_month(year, 3, MON, 3),
"Benito Juárez's birthday"))
days.append(
(Mexico.get_nth_weekday_in_month(year, 11, MON, 3),
"Revolution Day"))
return days
def get_calendar_holidays(self, year):
days = super(Mexico, self).get_calendar_holidays(year)
# If any statutory day is on Sunday, the monday is off
# If it's on a Saturday, the Friday is off
for day, label in days:
if day.weekday() == SAT:
days.append((day - timedelta(days=1), "%s substitute" % label))
elif day.weekday() == SUN:
days.append((day + timedelta(days=1), "%s substitute" % label))
# Extra: if new year's day is a saturday, the friday before is off
next_new_year = date(year + 1, 1, 1)
if next_new_year.weekday():
days.append((date(year, 12, 31), "New Year Day substitute"))
return days
class Panama(WesternCalendar, ChristianMixin):
"Panama"
include_good_friday = True
include_easter_saturday = True
include_easter_sunday = True
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(1, 9, "Martyrs' Day"),
(5, 1, "Labour Day"),
(11, 3, "Independence Day"),
(11, 5, "Colon Day"),
(11, 10, "Shout in Villa de los Santos"),
(12, 2, "Independence from Spain"),
(12, 8, "Mothers' Day"),
)
def get_variable_days(self, year):
days = super(Panama, self).get_variable_days(year)
days.append(
(self.get_ash_wednesday(year) - timedelta(days=1), "Carnival")
)
return days
| ChrisStevens/workalendar | workalendar/america.py | Python | mit | 6,675 | [
"COLUMBUS"
] | 705c1067928c83223faa4d3482cf1c1be47b4df68ef603a3d622691c7288043a |
import sys
import mechanize
import re
import json
import time
import urllib
import dogcatcher
import HTMLParser
import os
h = HTMLParser.HTMLParser()
cdir = os.path.dirname(os.path.abspath(__file__)) + "/"
tmpdir = cdir + "tmp/"
voter_state = "MI"
source = "State"
result = [("authority_name", "first_name", "last_name", "county_name", "fips",
"street", "city", "address_state", "zip_code",
"po_street", "po_city", "po_state", "po_zip_code",
"reg_authority_name", "reg_first", "reg_last",
"reg_street", "reg_city", "reg_state", "reg_zip_code",
"reg_po_street", "reg_po_city", "reg_po_state", "reg_po_zip_code",
"reg_phone", "reg_fax", "reg_email", "reg_website", "reg_hours",
"phone", "fax", "email", "website", "hours", "voter_state", "source", "review")]
#We need to obtain a list of county clerks and of municipal clerks.
#Each county page has a list of municipal pages.
#So we acquire every county page and clean it.
#In the process of cleaning it, we extract the full list of municipal pages.
#We then access each of those pages and clean them.
#And then we're done. Whee!
#Every county is a different item in a dropdown menu, so we have to cycle through them all.
#To do so, we grab the dropdown menu, extract a list of counties, then grab a series of web pages based on that list.
#This grabs a page containing a list of PA counties and writes it to a file. Writing it isn't strictly necessary, but saves some run time in the long run.
#We save separate lists of county names and county references because the references used in the dropdown menus are different from the county's names.
county_list_re = re.compile("<option value=\"(.+?)\">.+? County</option>")
county_name_re = re.compile("<option value=\".+?\">(.+?) County</option>")
output_path = os.path.join(tmpdir, "michigan-counties.html")
url = "https://webapps.sos.state.mi.us/mivote/ClerkSearch.aspx"
data = urllib.urlopen(url).read()
output = open(output_path,"w")
output.write(data)
output.close()
county_list = county_list_re.findall(data)
county_names = county_name_re.findall(data)
#This uses the mechanize package to submit every item in county_list--the list of county names as used in the menu--and grab and save a webpage for each one.
trim_re = re.compile("(<td style=\"vertical-align:top;\">.+)\s+</tr>\s+</table>\s+</div>\s+</td>", re.DOTALL)
for county in county_list:
print county
br = mechanize.Browser() #Creates a mechanize browser object.
br.set_handle_robots(False) # ignore robots.txt
br.open(url) #Opens the page.
br.select_form(name = "aspnetForm") #The drop-down menu is titled aspnetForm.
br["ctl00$ContentPlaceHolder1$csCnty"] = [county,] #It takes an input called ctl00$ContentPlaceHolder1$csCnty.
res = br.submit() #res is the resulting page when we submit the inputs from earlier
content = res.read() #this creates a string of the page.
trimmed_content = trim_re.findall(content)[0] #this trims the page down to only what we need.
#This writes the page to a file.
file_path = tmpdir + county + "-MI-clerks.html"
output = open(file_path,"w")
output.write(trimmed_content)
output.close()
municipality_list = []
for county_id in county_list:
file_path = tmpdir + county_id + "-MI-clerks.html"
data = open(file_path).read()
county_data_re = re.compile("<td style=\"vertical-align:top;\">.+?</td>", re.DOTALL)
county = county_data_re.findall(data)[0]
county_name_re = re.compile(">(.+?)\s+County</span>")
official_name_re = re.compile("ClerkorLocationName.+?class=\"clerkText\">(.+?)</span>")
phone_re = re.compile("Ph:(.+?)</span>")
fax_re = re.compile("Fax:(.+?)</span>")
address_re = re.compile("Address\" class=\"clerkText\">(.+?)</span><br />", re.DOTALL)
csz_re = re.compile("CityStateZip.+?>(.+?)</span><br />")
city_re = re.compile("(.+?) [A-Z][A-Z]")
state_re = re.compile(" ([A-Z][A-Z]) ")
zip_re = re.compile(" (\d{5}[\d-]*)")
po_re = re.compile("(P[oO] Box .+) *", re.DOTALL)
email_re = re.compile("Email: (.+?) *<")
municipal_re = re.compile("href=\"LocalClerk\.aspx\?jd=(\d{5})")
municipality_list.extend(municipal_re.findall(data))
authority_name, first_name, last_name, county_name, town_name, fips, street, city, address_state, zip_code, po_street, po_city, po_state, po_zip_code, reg_authority_name, reg_first, reg_last, reg_street, reg_city, reg_state, reg_zip_code, reg_po_street, reg_po_city, reg_po_state, reg_po_zip_code, reg_phone, reg_fax, reg_email, reg_website, reg_hours, phone, fax, email, website, hours, review = dogcatcher.begin(voter_state)
county_name = county_name_re.findall(county)[0]
official = official_name_re.findall(county)[0]
first_name, last_name, official_name, review = dogcatcher.make_name(official, ",", review)
email = dogcatcher.find_emails(email_re, county)
phone = dogcatcher.find_phone(phone_re, county)
fax = dogcatcher.find_phone(fax_re, county)
#This section finds the address. After finding the address, it identifies a city/state/zip (csz) combination and a PO Box number if that exists.
#It removes both the CSZ and the PO Address (if it exists) from the full address, leaving behind a street address with some garbage.
#It then cleans up the street address and pulls the city, state, and zip out of the csz, and assigns them as appropriate to the street address and state.
address = address_re.findall(county)[0].replace("</span><br><span ID=\"lblAddress2\" Class=\"clerkText\">","")
csz = csz_re.findall(county)[0]
try:
po_street = po_re.findall(address)[0].replace(csz,"").strip(", ")
except:
po_street = ""
street = address.replace(po_street,"").replace(csz,"")
street = street.replace("\n",", ").replace("\r","").replace(" ,",",").strip(" ,")
if po_street:
po_city = city_re.findall(csz)[0].strip()
po_state = state_re.findall(csz)[0].strip()
po_zip_code = zip_re.findall(csz)[0].strip()
if street:
city = city_re.findall(csz)[0].strip()
address_state = state_re.findall(csz)[0].strip()
zip_code = zip_re.findall(csz)[0].strip()
if county_name == "Wayne" and street == "2 Woodward Ave":
street = street + " Suite 502"
fips = dogcatcher.find_fips(county_name, voter_state)
result.append([authority_name, first_name, last_name, county_name, fips,
street, city, address_state, zip_code,
po_street, po_city, po_state, po_zip_code,
reg_authority_name, reg_first, reg_last,
reg_street, reg_city, reg_state, reg_zip_code,
reg_po_street, reg_po_city, reg_po_state, reg_po_zip_code,
reg_phone, reg_fax, reg_email, reg_website, reg_hours,
phone, fax, email, website, hours, voter_state, source, review])
#This outputs the results to a separate text file.
dogcatcher.output(result, voter_state, cdir)
print municipality_list
#Note that in MI, we give an unusual item: town_name_full. This is the town name, but with "Township" or "City of" included.
#This is necessary because, in at least one county, there is a city and a county by the same name.
result = [("authority_name", "first_name", "last_name", "town_name", "county_name", "fips",
"street", "city", "address_state", "zip_code",
"po_street", "po_city", "po_state", "po_zip_code",
"reg_authority_name", "reg_first", "reg_last",
"reg_street", "reg_city", "reg_state", "reg_zip_code",
"reg_po_street", "reg_po_city", "reg_po_state", "reg_po_zip_code",
"reg_phone", "reg_fax", "reg_email", "reg_website", "reg_hours",
"phone", "fax", "email", "website", "hours", "voter_state", "source", "review", "town_name_full")]
town_data_re = re.compile("<span id=\"ctl00_ContentPlaceHolder1_lblCounty\".+?</td>\s", re.DOTALL)
town_name_re = re.compile("display:inline-block;\">(.+?)</span>")
county_name_re = re.compile("class=\"countyName\">(.+?)</span>")
address_re = re.compile("Address\" class=\"clerkText\">(.+?)</span><br />\s+<span id=\"ctl00_ContentPlaceHolder1_lblCityStateZip\"", re.DOTALL)
po_re = re.compile("<span ID=\"lblAddress2\" Class=\"clerkText\">(.+)")
for town_id in municipality_list:
print town_id
authority_name, first_name, last_name, county_name, town_name, fips, street, city, address_state, zip_code, po_street, po_city, po_state, po_zip_code, reg_authority_name, reg_first, reg_last, reg_street, reg_city, reg_state, reg_zip_code, reg_po_street, reg_po_city, reg_po_state, reg_po_zip_code, reg_phone, reg_fax, reg_email, reg_website, reg_hours, phone, fax, email, website, hours, review = dogcatcher.begin(voter_state)
#The URLs are uniformly formatted; we insert every URL suffix on municipality_list into the URL format, and then grab and save a webpage based on that.
#(Writing it to a file isn't strictly necessary, but saves some time down the line.)
file_name = tmpdir + town_id + "-MI-municipal-clerks.html"
#To be used when part of the data has already been extracted.
try:
data = open(file_name).read()
town = data
except:
town_url = "https://webapps.sos.state.mi.us/mivote/LocalClerk.aspx?jd=" + town_id
data = urllib.urlopen(town_url).read()
output = open(file_name,"w")
output.write(data)
output.close()
town = town_data_re.findall(data)[0]
data = open(file_name).read()
# #To be used when collecting a fresh set of data.
# town_url = "https://webapps.sos.state.mi.us/mivote/LocalClerk.aspx?jd=" + town_id
# data = urllib.urlopen(town_url).read()
# output = open(file_name,"w")
# output.write(data)
# output.close()
# town = town_data_re.findall(data)[0]
# data = open(file_name).read()
authority_name, first_name, last_name, county_name, town_name, fips, street, city, address_state, zip_code, po_street, po_city, po_state, po_zip_code, reg_authority_name, reg_first, reg_last, reg_street, reg_city, reg_state, reg_zip_code, reg_po_street, reg_po_city, reg_po_state, reg_po_zip_code, reg_phone, reg_fax, reg_email, reg_website, reg_hours, phone, fax, email, website, hours, review = dogcatcher.begin(voter_state)
authority_name = "Clerk"
town_name_full = town_name_re.findall(town)[0].strip()
town_name = town_name_full.replace("City of","").replace("Township","").strip()
county_name = county_name_re.findall(town)[0].replace("County","").strip()
official_name = official_name_re.findall(town)[0].partition(",")[0]
first_name, last_name, review = dogcatcher.split_name(official_name, review)
email = dogcatcher.find_emails(email_re, town)
phone = dogcatcher.find_phone(phone_re, town)
fax = dogcatcher.find_phone(fax_re, town)
#There are many known errors in both phone numbers or fax numbers. This fixes them.
#They're currently commented out because they don't work well with phone_find as written.
# if town_name == "Fayette" or phone == "357-4145":
# phone = "517-" + phone
# if "906 906" in phone:
# phone.replace("906 ","",1)
# elif county_name == "Lenawee" and town_name == "Franklin":
# fax = "517-431-2320"
# if town_name == "Fayette" or fax == "458-2390":
# fax = "517-" + fax
# elif county_name == "Lenawee" and town_name == "Franklin":
# fax = "517-431-2320"
# elif town_name == "Hamtramck":
# fax = "313-876-7703"
#A few towns don't have addresses listed; this catches them before we try to clean addresses from them.
if "Address\" class=\"clerkText\"><" in town:
address = ""
#This section finds the full address. After finding the address, it identifies a city/state/zip (csz) combination and a PO Box number if that exists.
#It removes both the CSZ and the PO Address (if it exists) from the full address, leaving behind a street address with some garbage.
#It then cleans up the street address and pulls the city, state, and zip out of the csz, and assigns them as appropriate to the street address and state.
else:
address = address_re.findall(town)[0]
csz = csz_re.findall(town)[0]
try:
po_street = po_re.findall(address)[0].strip(", ")
except:
po_street = ""
street = address.replace(po_street,"").replace("</span><br><span ID=\"lblAddress2\" Class=\"clerkText\">","")
street = street.replace("\n",", ").replace("\r","").replace(" ,",",").strip(" ,")
#I don't remember how or why I found these corrections, but they all seem to be accurate.
if town_name_full == "Jefferson Township" and county_name == "Cass":
street = "24725 Jefferson Center Street"
po_street = "P.O. Box 188"
elif town_name_full == "Au Gres Township" and county_name == "Arenac":
street = "1865 South Swenson Road"
elif town_name_full == "Crystal Township" and county_name == "Montcalm":
po_street = "PO Box 358"
elif town_name_full == "Geneva Township" and county_name == "Midland":
street = "3704 W Barden Rd"
elif town_name_full == "Whiteford Township" and county_name == "Monroe":
street = "8000 Yankee Rd, Ste 100"
elif town_name_full == "Swan Creek Township" and county_name == "Saginaw":
street = "11415 Lakefield Rd"
po_street = "P.O. Box 176"
elif town_name_full == "Star Township" and county_name == "Antrim":
po_street = "P.O. Box 94"
elif town_name_full == "Sherman Township" and county_name == "Gladwin":
street = "4013 Oberlin Rd"
elif town_name_full == "Sanilac Township" and county_name == "Sanilac":
po_street = "P.O. Box 631"
street = "20 N. Ridge Street"
elif town_name_full == "Mullett Township" and county_name == "Cheboygan":
po_street = "P.O. Box 328"
elif town_name_full == "Houghton Township" and county_name == "Keweenaw":
street = "5059 Fourth Street"
elif town_name_full == "Greenland Township" and county_name == "Ontonagon":
po_street = "P.O. Box 204"
street = "1502 Mass Avenue"
if po_street:
if street:
city = city_re.findall(csz)[0]
address_state = state_re.findall(csz)[0]
zip_code = zip_re.findall(csz)[0]
po_city = city_re.findall(csz)[0].strip()
po_state = state_re.findall(csz)[0].strip()
po_zip_code = zip_re.findall(csz)[0].strip()
else:
city = city_re.findall(csz)[0].strip()
address_state = state_re.findall(csz)[0].strip()
zip_code = zip_re.findall(csz)[0].strip()
print [address]
fips = dogcatcher.find_fips(county_name, voter_state)
result.append([authority_name, first_name, last_name, town_name, county_name, fips,
street, city, address_state, zip_code,
po_street, po_city, po_state, po_zip_code,
reg_authority_name, reg_first, reg_last,
reg_street, reg_city, reg_state, reg_zip_code,
reg_po_street, reg_po_city, reg_po_state, reg_po_zip_code,
reg_phone, reg_fax, reg_email, reg_website, reg_hours,
phone, fax, email, website, hours, voter_state, source, review, town_name_full])
#This outputs the results to a separate text file.
dogcatcher.output(result, voter_state, cdir, "cities")
| democracyworks/dog-catcher | michigan.py | Python | mit | 14,550 | [
"CRYSTAL"
] | 4b98324201c8ec758b0c77c2e6046e430e84b301a8dc089367ddb1fd6aed5faa |
# TODO: Determine which tests are valid for GLSAR, and under what conditions
# TODO: Fix issue with constant and GLS
# TODO: GLS: add options Iterative GLS, for iterative fgls if sigma is None
# TODO: GLS: default if sigma is none should be two-step GLS
# TODO: Check nesting when performing model based tests, lr, wald, lm
"""
This module implements standard regression models:
Generalized Least Squares (GLS)
Ordinary Least Squares (OLS)
Weighted Least Squares (WLS)
Generalized Least Squares with autoregressive error terms GLSAR(p)
Models are specified with an endogenous response variable and an
exogenous design matrix and are fit using their `fit` method.
Subclasses that have more complicated covariance matrices
should write over the 'whiten' method as the fit method
prewhitens the response by calling 'whiten'.
General reference for regression models:
D. C. Montgomery and E.A. Peck. "Introduction to Linear Regression
Analysis." 2nd. Ed., Wiley, 1992.
Econometrics references for regression models:
R. Davidson and J.G. MacKinnon. "Econometric Theory and Methods," Oxford,
2004.
W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, lzip, range
__docformat__ = 'restructuredtext en'
__all__ = ['GLS', 'WLS', 'OLS', 'GLSAR']
import numpy as np
import pandas as pd
from scipy.linalg import toeplitz
from scipy import stats
from scipy import optimize
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tools.tools import add_constant, chain_dot, pinv_extended
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly,
cache_writable)
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.emplike.elregress import _ELRegOpts
import warnings
from statsmodels.tools.sm_exceptions import InvalidTestWarning
# need import in module instead of lazily to copy `__doc__`
from . import _prediction as pred
def _get_sigma(sigma, nobs):
"""
Returns sigma (matrix, nobs by nobs) for GLS and the inverse of its
Cholesky decomposition. Handles dimensions and checks integrity.
If sigma is None, returns None, None. Otherwise returns sigma,
cholsigmainv.
"""
if sigma is None:
return None, None
sigma = np.asarray(sigma).squeeze()
if sigma.ndim == 0:
sigma = np.repeat(sigma, nobs)
if sigma.ndim == 1:
if sigma.shape != (nobs,):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = 1/np.sqrt(sigma)
else:
if sigma.shape != (nobs, nobs):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = np.linalg.cholesky(np.linalg.pinv(sigma)).T
return sigma, cholsigmainv
class RegressionModel(base.LikelihoodModel):
"""
Base class for linear regression models. Should not be directly called.
Intended for subclassing.
"""
def __init__(self, endog, exog, **kwargs):
super(RegressionModel, self).__init__(endog, exog, **kwargs)
self._data_attr.extend(['pinv_wexog', 'wendog', 'wexog', 'weights'])
def initialize(self):
self.wexog = self.whiten(self.exog)
self.wendog = self.whiten(self.endog)
# overwrite nobs from class Model:
self.nobs = float(self.wexog.shape[0])
self._df_model = None
self._df_resid = None
self.rank = None
@property
def df_model(self):
"""
The model degree of freedom, defined as the rank of the regressor
matrix minus 1 if a constant is included.
"""
if self._df_model is None:
if self.rank is None:
self.rank = np_matrix_rank(self.exog)
self._df_model = float(self.rank - self.k_constant)
return self._df_model
@df_model.setter
def df_model(self, value):
self._df_model = value
@property
def df_resid(self):
"""
The residual degree of freedom, defined as the number of observations
minus the rank of the regressor matrix.
"""
if self._df_resid is None:
if self.rank is None:
self.rank = np_matrix_rank(self.exog)
self._df_resid = self.nobs - self.rank
return self._df_resid
@df_resid.setter
def df_resid(self, value):
self._df_resid = value
def whiten(self, X):
raise NotImplementedError("Subclasses should implement.")
def fit(self, method="pinv", cov_type='nonrobust', cov_kwds=None,
use_t=None, **kwargs):
"""
Full fit of the model.
The results include an estimate of covariance matrix, (whitened)
residuals and an estimate of scale.
Parameters
----------
method : str, optional
Can be "pinv", "qr". "pinv" uses the Moore-Penrose pseudoinverse
to solve the least squares problem. "qr" uses the QR
factorization.
cov_type : str, optional
See `regression.linear_model.RegressionResults` for a description
of the available covariance estimators
cov_kwds : list or None, optional
See `linear_model.RegressionResults.get_robustcov_results` for a
description required keywords for alternative covariance estimators
use_t : bool, optional
Flag indicating to use the Student's t distribution when computing
p-values. Default behavior depends on cov_type. See
`linear_model.RegressionResults.get_robustcov_results` for
implementation details.
Returns
-------
A RegressionResults class instance.
See Also
---------
regression.linear_model.RegressionResults
regression.linear_model.RegressionResults.get_robustcov_results
Notes
-----
The fit method uses the pseudoinverse of the design/exogenous variables
to solve the least squares minimization.
"""
if method == "pinv":
if ((not hasattr(self, 'pinv_wexog')) or
(not hasattr(self, 'normalized_cov_params')) or
(not hasattr(self, 'rank'))):
self.pinv_wexog, singular_values = pinv_extended(self.wexog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
# Cache these singular values for use later.
self.wexog_singular_values = singular_values
self.rank = np_matrix_rank(np.diag(singular_values))
beta = np.dot(self.pinv_wexog, self.wendog)
elif method == "qr":
if ((not hasattr(self, 'exog_Q')) or
(not hasattr(self, 'exog_R')) or
(not hasattr(self, 'normalized_cov_params')) or
(getattr(self, 'rank', None) is None)):
Q, R = np.linalg.qr(self.wexog)
self.exog_Q, self.exog_R = Q, R
self.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
# Cache singular values from R.
self.wexog_singular_values = np.linalg.svd(R, 0, 0)
self.rank = np_matrix_rank(R)
else:
Q, R = self.exog_Q, self.exog_R
# used in ANOVA
self.effects = effects = np.dot(Q.T, self.wendog)
beta = np.linalg.solve(R, effects)
if self._df_model is None:
self._df_model = float(self.rank - self.k_constant)
if self._df_resid is None:
self.df_resid = self.nobs - self.rank
if isinstance(self, OLS):
lfit = OLSResults(self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t)
else:
lfit = RegressionResults(self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t,
**kwargs)
return RegressionResultsWrapper(lfit)
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
params : array-like
Parameters of a linear model
exog : array-like, optional.
Design / exogenous data. Model exog is used if None.
Returns
-------
An array of fitted values
Notes
-----
If the model has not yet been fit, params is not optional.
"""
#JP: this doesn't look correct for GLMAR
#SS: it needs its own predict method
if exog is None:
exog = self.exog
return np.dot(exog, params)
def get_distribution(self, params, scale, exog=None, dist_class=None):
"""
Returns a random number generator for the predictive distribution.
Parameters
----------
params : array-like
The model parameters (regression coefficients).
scale : scalar
The variance parameter.
exog : array-like
The predictor variable matrix.
dist_class : class
A random number generator class. Must take 'loc' and
'scale' as arguments and return a random number generator
implementing an `rvs` method for simulating random values.
Defaults to Gaussian.
Returns a frozen random number generator object with mean and
variance determined by the fitted linear model. Use the
``rvs`` method to generate random values.
Notes
-----
Due to the behavior of ``scipy.stats.distributions objects``,
the returned random number generator must be called with
``gen.rvs(n)`` where ``n`` is the number of observations in
the data set used to fit the model. If any other value is
used for ``n``, misleading results will be produced.
"""
fit = self.predict(params, exog)
if dist_class is None:
from scipy.stats.distributions import norm
dist_class = norm
gen = dist_class(loc=fit, scale=np.sqrt(scale))
return gen
class GLS(RegressionModel):
__doc__ = r"""
Generalized least squares model with a general covariance structure.
%(params)s
sigma : scalar or array
`sigma` is the weighting matrix of the covariance.
The default is None for no scaling. If `sigma` is a scalar, it is
assumed that `sigma` is an n x n diagonal matrix with the given
scalar, `sigma` as the value of each diagonal element. If `sigma`
is an n-length vector, then `sigma` is assumed to be a diagonal
matrix with the given `sigma` on the diagonal. This should be the
same as WLS.
%(extra_params)s
**Attributes**
pinv_wexog : array
`pinv_wexog` is the p x n Moore-Penrose pseudoinverse of `wexog`.
cholsimgainv : array
The transpose of the Cholesky decomposition of the pseudoinverse.
df_model : float
p - 1, where p is the number of regressors including the intercept.
of freedom.
df_resid : float
Number of observations n less the number of parameters p.
llf : float
The value of the likelihood function of the fitted model.
nobs : float
The number of observations n.
normalized_cov_params : array
p x p array :math:`(X^{T}\Sigma^{-1}X)^{-1}`
results : RegressionResults instance
A property that returns the RegressionResults class if fit.
sigma : array
`sigma` is the n x n covariance structure of the error terms.
wexog : array
Design matrix whitened by `cholsigmainv`
wendog : array
Response variable whitened by `cholsigmainv`
Notes
-----
If sigma is a function of the data making one of the regressors
a constant, then the current postestimation statistics will not be correct.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> ols_resid = sm.OLS(data.endog, data.exog).fit().resid
>>> res_fit = sm.OLS(ols_resid[1:], ols_resid[:-1]).fit()
>>> rho = res_fit.params
`rho` is a consistent estimator of the correlation of the residuals from
an OLS fit of the longley data. It is assumed that this is the true rho
of the AR process data.
>>> from scipy.linalg import toeplitz
>>> order = toeplitz(np.arange(16))
>>> sigma = rho**order
`sigma` is an n x n matrix of the autocorrelation structure of the
data.
>>> gls_model = sm.GLS(data.endog, data.exog, sigma=sigma)
>>> gls_results = gls_model.fit()
>>> print(gls_results.summary())
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, sigma=None, missing='none', hasconst=None,
**kwargs):
#TODO: add options igls, for iterative fgls if sigma is None
#TODO: default if sigma is none should be two-step GLS
sigma, cholsigmainv = _get_sigma(sigma, len(endog))
super(GLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, sigma=sigma,
cholsigmainv=cholsigmainv, **kwargs)
#store attribute names for data arrays
self._data_attr.extend(['sigma', 'cholsigmainv'])
def whiten(self, X):
"""
GLS whiten method.
Parameters
-----------
X : array-like
Data to be whitened.
Returns
-------
np.dot(cholsigmainv,X)
See Also
--------
regression.GLS
"""
X = np.asarray(X)
if self.sigma is None or self.sigma.shape == ():
return X
elif self.sigma.ndim == 1:
if X.ndim == 1:
return X * self.cholsigmainv
else:
return X * self.cholsigmainv[:, None]
else:
return np.dot(self.cholsigmainv, X)
def loglike(self, params):
"""
Returns the value of the Gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `endog`.
Parameters
----------
params : array-like
The parameter estimates
Returns
-------
loglike : float
The value of the log-likelihood function for a GLS Model.
Notes
-----
The log-likelihood function for the normal distribution is
.. math:: -\\frac{n}{2}\\log\\left(\\left(Y-\\hat{Y}\\right)^{\\prime}\\left(Y-\\hat{Y}\\right)\\right)-\\frac{n}{2}\\left(1+\\log\\left(\\frac{2\\pi}{n}\\right)\\right)-\\frac{1}{2}\\log\\left(\\left|\\Sigma\\right|\\right)
Y and Y-hat are whitened.
"""
#TODO: combine this with OLS/WLS loglike and add _det_sigma argument
nobs2 = self.nobs / 2.0
SSR = np.sum((self.wendog - np.dot(self.wexog, params))**2, axis=0)
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with likelihood constant
if np.any(self.sigma):
#FIXME: robust-enough check? unneeded if _det_sigma gets defined
if self.sigma.ndim==2:
det = np.linalg.slogdet(self.sigma)
llf -= .5*det[1]
else:
llf -= 0.5*np.sum(np.log(self.sigma))
# with error covariance matrix
return llf
def hessian_factor(self, params, scale=None, observed=True):
"""Weights for calculating Hessian
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
hessian_factor : ndarray, 1d
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`
"""
if self.sigma is None or self.sigma.shape == ():
return np.ones(self.exog.shape[0])
elif self.sigma.ndim == 1:
return self.cholsigmainv
else:
return np.diag(self.cholsigmainv)
class WLS(RegressionModel):
__doc__ = """
A regression model with diagonal but non-identity covariance structure.
The weights are presumed to be (proportional to) the inverse of the
variance of the observations. That is, if the variables are to be
transformed by 1/sqrt(W) you must supply weights = 1/W.
%(params)s
weights : array-like, optional
1d array of weights. If you supply 1/W then the variables are pre-
multiplied by 1/sqrt(W). If no weights are supplied the default value
is 1 and WLS results are the same as OLS.
%(extra_params)s
Attributes
----------
weights : array
The stored weights supplied as an argument.
See regression.GLS
Examples
---------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> wls_model = sm.WLS(Y,X, weights=list(range(1,8)))
>>> results = wls_model.fit()
>>> results.params
array([ 2.91666667, 0.0952381 ])
>>> results.tvalues
array([ 2.0652652 , 0.35684428])
>>> print(results.t_test([1, 0]))
<T test: effect=array([ 2.91666667]), sd=array([[ 1.41224801]]), t=array([[ 2.0652652]]), p=array([[ 0.04690139]]), df_denom=5>
>>> print(results.f_test([0, 1]))
<F test: F=array([[ 0.12733784]]), p=[[ 0.73577409]], df_denom=5, df_num=1>
Notes
-----
If the weights are a function of the data, then the post estimation
statistics such as fvalue and mse_model might not be correct, as the
package does not yet support no-constant regression.
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, weights=1., missing='none', hasconst=None,
**kwargs):
weights = np.array(weights)
if weights.shape == ():
if (missing == 'drop' and 'missing_idx' in kwargs and
kwargs['missing_idx'] is not None):
# patsy may have truncated endog
weights = np.repeat(weights, len(kwargs['missing_idx']))
else:
weights = np.repeat(weights, len(endog))
# handle case that endog might be of len == 1
if len(weights) == 1:
weights = np.array([weights.squeeze()])
else:
weights = weights.squeeze()
super(WLS, self).__init__(endog, exog, missing=missing,
weights=weights, hasconst=hasconst, **kwargs)
nobs = self.exog.shape[0]
weights = self.weights
# Experimental normalization of weights
weights = weights / np.sum(weights) * nobs
if weights.size != nobs and weights.shape[0] != nobs:
raise ValueError('Weights must be scalar or same length as design')
def whiten(self, X):
"""
Whitener for WLS model, multiplies each column by sqrt(self.weights)
Parameters
----------
X : array-like
Data to be whitened
Returns
-------
sqrt(weights)*X
"""
#print(self.weights.var()))
X = np.asarray(X)
if X.ndim == 1:
return X * np.sqrt(self.weights)
elif X.ndim == 2:
return np.sqrt(self.weights)[:, None]*X
def loglike(self, params):
"""
Returns the value of the gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `Y`.
Parameters
----------
params : array-like
The parameter estimates.
Returns
-------
llf : float
The value of the log-likelihood function for a WLS Model.
Notes
--------
.. math:: -\\frac{n}{2}\\log\\left(Y-\\hat{Y}\\right)-\\frac{n}{2}\\left(1+\\log\\left(\\frac{2\\pi}{n}\\right)\\right)-\\frac{1}{2}log\\left(\\left|W\\right|\\right)
where :math:`W` is a diagonal matrix
"""
nobs2 = self.nobs / 2.0
SSR = np.sum((self.wendog - np.dot(self.wexog,params))**2, axis=0)
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with constant
llf += 0.5 * np.sum(np.log(self.weights))
return llf
def hessian_factor(self, params, scale=None, observed=True):
"""Weights for calculating Hessian
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
hessian_factor : ndarray, 1d
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`
"""
return self.weights
class OLS(WLS):
__doc__ = """
A simple ordinary least squares model.
%(params)s
%(extra_params)s
Attributes
----------
weights : scalar
Has an attribute weights = array(1.0) due to inheritance from WLS.
See Also
--------
GLS
Examples
--------
>>> import numpy as np
>>>
>>> import statsmodels.api as sm
>>>
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>>
>>> model = sm.OLS(Y,X)
>>> results = model.fit()
>>> results.params
array([ 2.14285714, 0.25 ])
>>> results.tvalues
array([ 1.87867287, 0.98019606])
>>> print(results.t_test([1, 0]))
<T test: effect=array([ 2.14285714]), sd=array([[ 1.14062282]]), t=array([[ 1.87867287]]), p=array([[ 0.05953974]]), df_denom=5>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 19.46078431]]), p=[[ 0.00437251]], df_denom=5, df_num=2>
Notes
-----
No constant is added by the model unless you are using formulas.
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
#TODO: change example to use datasets. This was the point of datasets!
def __init__(self, endog, exog=None, missing='none', hasconst=None,
**kwargs):
super(OLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, **kwargs)
if "weights" in self._init_keys:
self._init_keys.remove("weights")
def loglike(self, params, scale=None):
"""
The likelihood function for the OLS model.
Parameters
----------
params : array-like
The coefficients with which to estimate the log-likelihood.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
The likelihood function evaluated at params.
"""
nobs2 = self.nobs / 2.0
nobs = float(self.nobs)
resid = self.endog - np.dot(self.exog, params)
if hasattr(self, 'offset'):
resid -= self.offset
ssr = np.sum(resid**2)
if scale is None:
# profile log likelihood
llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2
else:
# log-likelihood
llf = -nobs2 * np.log(2 * np.pi * scale) - ssr / (2*scale)
return llf
def whiten(self, Y):
"""
OLS model whitener does nothing: returns Y.
"""
return Y
def score(self, params, scale=None):
"""
Evaluate the score function at a given point.
The score corresponds to the profile (concentrated)
log-likelihood in which the scale parameter has been profiled
out.
Parameters
----------
params : array-like
The parameter vector at which the score function is
computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
The score vector.
"""
if not hasattr(self, "_wexog_xprod"):
self._setup_score_hess()
xtxb = np.dot(self._wexog_xprod, params)
sdr = -self._wexog_x_wendog + xtxb
if scale is None:
ssr = self._wendog_xprod - 2 * np.dot(self._wexog_x_wendog.T, params)
ssr += np.dot(params, xtxb)
return -self.nobs * sdr / ssr
else:
return -sdr / scale
def _setup_score_hess(self):
y = self.wendog
if hasattr(self, 'offset'):
y = y - self.offset
self._wendog_xprod = np.sum(y * y)
self._wexog_xprod = np.dot(self.wexog.T, self.wexog)
self._wexog_x_wendog = np.dot(self.wexog.T, y)
def hessian(self, params, scale=None):
"""
Evaluate the Hessian function at a given point.
Parameters
----------
params : array-like
The parameter vector at which the Hessian is computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
The Hessian matrix.
"""
if not hasattr(self, "_wexog_xprod"):
self._setup_score_hess()
xtxb = np.dot(self._wexog_xprod, params)
if scale is None:
ssr = self._wendog_xprod - 2 * np.dot(self._wexog_x_wendog.T, params)
ssr += np.dot(params, xtxb)
ssrp = -2*self._wexog_x_wendog + 2*xtxb
hm = self._wexog_xprod / ssr - np.outer(ssrp, ssrp) / ssr**2
return -self.nobs * hm / 2
else:
return -self._wexog_xprod / scale
return hess
def hessian_factor(self, params, scale=None, observed=True):
"""Weights for calculating Hessian
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
hessian_factor : ndarray, 1d
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`
"""
return np.ones(self.exog.shape[0])
def fit_regularized(self, method="elastic_net", alpha=0.,
L1_wt=1., start_params=None, profile_scale=False,
refit=False, **kwargs):
r"""
Return a regularized fit to a linear regression model.
Parameters
----------
method : string
Only the 'elastic_net' approach is currently implemented.
alpha : scalar or array-like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt: scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is a
ridge fit, if 1 it is a lasso fit.
start_params : array-like
Starting values for ``params``.
profile_scale : bool
If True the penalized fit is computed using the profile
(concentrated) log-likelihood for the Gaussian model.
Otherwise the fit uses the residual sum of squares.
refit : bool
If True, the model is refit using only the variables that
have non-zero coefficients in the regularized fit. The
refitted model is not regularized.
distributed : bool
If True, the model uses distributed methods for fitting,
will raise an error if True and partitions is None.
generator : function
generator used to partition the model, allows for handling
of out of memory/parallel computing.
partitions : scalar
The number of partitions desired for the distributed
estimation.
threshold : scalar or array-like
The threshold below which coefficients are zeroed out,
only used for distributed estimation
Returns
-------
An array of coefficients, or a RegressionResults object of the
same type returned by ``fit``.
Notes
-----
The elastic net approach closely follows that implemented in
the glmnet package in R. The penalty is a combination of L1
and L2 penalties.
The function that is minimized is:
.. math::
0.5*RSS/n + alpha*((1-L1\_wt)*|params|_2^2/2 + L1\_wt*|params|_1)
where RSS is the usual regression sum of squares, n is the
sample size, and :math:`|*|_1` and :math:`|*|_2` are the L1 and L2
norms.
Post-estimation results are based on the same data used to
select variables, hence may be subject to overfitting biases.
The elastic_net method uses the following keyword arguments:
maxiter : int
Maximum number of iterations
cnvrg_tol : float
Convergence threshold for line searches
zero_tol : float
Coefficients below this threshold are treated as zero.
References
----------
Friedman, Hastie, Tibshirani (2008). Regularization paths for
generalized linear models via coordinate descent. Journal of
Statistical Software 33(1), 1-22 Feb 2010.
"""
from statsmodels.base.elastic_net import fit_elasticnet
if L1_wt == 0:
return self._fit_ridge(alpha)
# In the future we could add support for other penalties, e.g. SCAD.
if method != "elastic_net":
raise ValueError("method for fit_regularized must be elastic_net")
# Set default parameters.
defaults = {"maxiter" : 50, "cnvrg_tol" : 1e-10,
"zero_tol" : 1e-10}
defaults.update(kwargs)
# If a scale parameter is passed in, the non-profile
# likelihood (residual sum of squares divided by -2) is used,
# otherwise the profile likelihood is used.
if profile_scale:
loglike_kwds = {}
score_kwds = {}
hess_kwds = {}
else:
loglike_kwds = {"scale": 1}
score_kwds = {"scale": 1}
hess_kwds = {"scale": 1}
return fit_elasticnet(self, method=method,
alpha=alpha,
L1_wt=L1_wt,
start_params=start_params,
loglike_kwds=loglike_kwds,
score_kwds=score_kwds,
hess_kwds=hess_kwds,
refit=refit,
check_step=False,
**defaults)
def _fit_ridge(self, alpha):
"""
Fit a linear model using ridge regression.
Parameters
----------
alpha : scalar or array-like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
Notes
-----
Equivalent to fit_regularized with L1_wt = 0 (but implemented
more efficiently).
"""
u, s, vt = np.linalg.svd(self.exog, 0)
v = vt.T
s2 = s*s + alpha * self.nobs
params = np.dot(u.T, self.endog) * s / s2
params = np.dot(v, params)
from statsmodels.base.elastic_net import RegularizedResults
return RegularizedResults(self, params)
class GLSAR(GLS):
__doc__ = """
A regression model with an AR(p) covariance structure.
%(params)s
rho : int
Order of the autoregressive covariance
%(extra_params)s
Examples
--------
>>> import statsmodels.api as sm
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> Y = [1,3,4,5,8,10,9]
>>> model = sm.GLSAR(Y, X, rho=2)
>>> for i in range(6):
... results = model.fit()
... print("AR coefficients: {0}".format(model.rho))
... rho, sigma = sm.regression.yule_walker(results.resid,
... order=model.order)
... model = sm.GLSAR(Y, X, rho)
...
AR coefficients: [ 0. 0.]
AR coefficients: [-0.52571491 -0.84496178]
AR coefficients: [-0.6104153 -0.86656458]
AR coefficients: [-0.60439494 -0.857867 ]
AR coefficients: [-0.6048218 -0.85846157]
AR coefficients: [-0.60479146 -0.85841922]
>>> results.params
array([-0.66661205, 1.60850853])
>>> results.tvalues
array([ -2.10304127, 21.8047269 ])
>>> print(results.t_test([1, 0]))
<T test: effect=array([-0.66661205]), sd=array([[ 0.31697526]]), t=array([[-2.10304127]]), p=array([[ 0.06309969]]), df_denom=3>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 1815.23061844]]), p=[[ 0.00002372]], df_denom=3, df_num=2>
Or, equivalently
>>> model2 = sm.GLSAR(Y, X, rho=2)
>>> res = model2.iterative_fit(maxiter=6)
>>> model2.rho
array([-0.60479146, -0.85841922])
Notes
-----
GLSAR is considered to be experimental.
The linear autoregressive process of order p--AR(p)--is defined as:
TODO
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog=None, rho=1, missing='none', **kwargs):
#this looks strange, interpreting rho as order if it is int
if isinstance(rho, np.int):
self.order = rho
self.rho = np.zeros(self.order, np.float64)
else:
self.rho = np.squeeze(np.asarray(rho))
if len(self.rho.shape) not in [0,1]:
raise ValueError("AR parameters must be a scalar or a vector")
if self.rho.shape == ():
self.rho.shape = (1,)
self.order = self.rho.shape[0]
if exog is None:
#JP this looks wrong, should be a regression on constant
#results for rho estimate now identical to yule-walker on y
#super(AR, self).__init__(endog, add_constant(endog))
super(GLSAR, self).__init__(endog, np.ones((endog.shape[0],1)),
missing=missing, **kwargs)
else:
super(GLSAR, self).__init__(endog, exog, missing=missing,
**kwargs)
def iterative_fit(self, maxiter=3, rtol=1e-4, **kwds):
"""
Perform an iterative two-stage procedure to estimate a GLS model.
The model is assumed to have AR(p) errors, AR(p) parameters and
regression coefficients are estimated iteratively.
Parameters
----------
maxiter : integer, optional
the number of iterations
rtol : float, optional
Relative tolerance between estimated coefficients to stop the
estimation. Stops if
max(abs(last - current) / abs(last)) < rtol
"""
# TODO: update this after going through example.
converged = False
i = -1 # need to initialize for maxiter < 1 (skip loop)
history = {'params': [], 'rho':[self.rho]}
for i in range(maxiter - 1):
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
results = self.fit()
history['params'].append(results.params)
if i == 0:
last = results.params
else:
diff = np.max(np.abs(last - results.params) / np.abs(last))
if diff < rtol:
converged = True
break
last = results.params
self.rho, _ = yule_walker(results.resid,
order=self.order, df=None)
history['rho'].append(self.rho)
# why not another call to self.initialize
# Use kwarg to insert history
if not converged and maxiter > 0:
# maxiter <= 0 just does OLS
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
# if converged then this is a duplicate fit, because we didn't update rho
results = self.fit(history=history, **kwds)
results.iter = i + 1
# add last fit to history, not if duplicate fit
if not converged:
results.history['params'].append(results.params)
results.iter += 1
results.converged = converged
return results
def whiten(self, X):
"""
Whiten a series of columns according to an AR(p)
covariance structure. This drops initial p observations.
Parameters
----------
X : array-like
The data to be whitened,
Returns
-------
whitened array
"""
#TODO: notation for AR process
X = np.asarray(X, np.float64)
_X = X.copy()
#the following loops over the first axis, works for 1d and nd
for i in range(self.order):
_X[(i+1):] = _X[(i+1):] - self.rho[i] * X[0:-(i+1)]
return _X[self.order:]
def yule_walker(X, order=1, method="unbiased", df=None, inv=False, demean=True):
"""
Estimate AR(p) parameters from a sequence X using Yule-Walker equation.
Unbiased or maximum-likelihood estimator (mle)
See, for example:
http://en.wikipedia.org/wiki/Autoregressive_moving_average_model
Parameters
----------
X : array-like
1d array
order : integer, optional
The order of the autoregressive process. Default is 1.
method : string, optional
Method can be "unbiased" or "mle" and this determines denominator in
estimate of autocorrelation function (ACF) at lag k. If "mle", the
denominator is n=X.shape[0], if "unbiased" the denominator is n-k.
The default is unbiased.
df : integer, optional
Specifies the degrees of freedom. If `df` is supplied, then it is assumed
the X has `df` degrees of freedom rather than `n`. Default is None.
inv : bool
If inv is True the inverse of R is also returned. Default is False.
demean : bool
True, the mean is subtracted from `X` before estimation.
Returns
-------
rho
The autoregressive coefficients
sigma
TODO
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load()
>>> rho, sigma = sm.regression.yule_walker(data.endog,
... order=4, method="mle")
>>> rho
array([ 1.28310031, -0.45240924, -0.20770299, 0.04794365])
>>> sigma
16.808022730464351
"""
#TODO: define R better, look back at notes and technical notes on YW.
#First link here is useful
#http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm
method = str(method).lower()
if method not in ["unbiased", "mle"]:
raise ValueError("ACF estimation method must be 'unbiased' or 'MLE'")
X = np.array(X, dtype=np.float64)
if demean:
X -= X.mean() # automatically demean's X
n = df or X.shape[0]
if method == "unbiased": # this is df_resid ie., n - p
denom = lambda k: n - k
else:
denom = lambda k: n
if X.ndim > 1 and X.shape[1] != 1:
raise ValueError("expecting a vector to estimate AR parameters")
r = np.zeros(order+1, np.float64)
r[0] = (X**2).sum() / denom(0)
for k in range(1,order+1):
r[k] = (X[0:-k]*X[k:]).sum() / denom(k)
R = toeplitz(r[:-1])
rho = np.linalg.solve(R, r[1:])
sigmasq = r[0] - (r[1:]*rho).sum()
if inv==True:
return rho, np.sqrt(sigmasq), np.linalg.inv(R)
else:
return rho, np.sqrt(sigmasq)
class RegressionResults(base.LikelihoodModelResults):
r"""
This class summarizes the fit of a linear regression model.
It handles the output of contrasts, estimates of covariance, etc.
Returns
-------
**Attributes**
aic
Akaike's information criteria. For a model with a constant
:math:`-2llf + 2(df\_model + 1)`. For a model without a constant
:math:`-2llf + 2(df\_model)`.
bic
Bayes' information criteria. For a model with a constant
:math:`-2llf + \log(n)(df\_model+1)`. For a model without a constant
:math:`-2llf + \log(n)(df\_model)`
bse
The standard errors of the parameter estimates.
pinv_wexog
See specific model class docstring
centered_tss
The total (weighted) sum of squares centered about the mean.
cov_HC0
Heteroscedasticity robust covariance matrix. See HC0_se below.
cov_HC1
Heteroscedasticity robust covariance matrix. See HC1_se below.
cov_HC2
Heteroscedasticity robust covariance matrix. See HC2_se below.
cov_HC3
Heteroscedasticity robust covariance matrix. See HC3_se below.
cov_type
Parameter covariance estimator used for standard errors and t-stats
df_model
Model degress of freedom. The number of regressors `p`. Does not
include the constant if one is present
df_resid
Residual degrees of freedom. `n - p - 1`, if a constant is present.
`n - p` if a constant is not included.
ess
Explained sum of squares. If a constant is present, the centered
total sum of squares minus the sum of squared residuals. If there is
no constant, the uncentered total sum of squares is used.
fvalue
F-statistic of the fully specified model. Calculated as the mean
squared error of the model divided by the mean squared error of the
residuals.
f_pvalue
p-value of the F-statistic
fittedvalues
The predicted the values for the original (unwhitened) design.
het_scale
adjusted squared residuals for heteroscedasticity robust standard
errors. Is only available after `HC#_se` or `cov_HC#` is called.
See HC#_se for more information.
history
Estimation history for iterative estimators
HC0_se
White's (1980) heteroskedasticity robust standard errors.
Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)
where e_i = resid[i]
HC0_se is a cached property.
When HC0_se or cov_HC0 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is just
resid**2.
HC1_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as sqrt(diag(n/(n-p)*HC_0)
HC1_see is a cached property.
When HC1_se or cov_HC1 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
n/(n-p)*resid**2.
HC2_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
HC2_see is a cached property.
When HC2_se or cov_HC2 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii).
HC3_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
HC3_see is a cached property.
When HC3_se or cov_HC3 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii)^(2).
model
A pointer to the model instance that called fit() or results.
mse_model
Mean squared error the model. This is the explained sum of squares
divided by the model degrees of freedom.
mse_resid
Mean squared error of the residuals. The sum of squared residuals
divided by the residual degrees of freedom.
mse_total
Total mean squared error. Defined as the uncentered total sum of
squares divided by n the number of observations.
nobs
Number of observations n.
normalized_cov_params
See specific model class docstring
params
The linear coefficients that minimize the least squares criterion. This
is usually called Beta for the classical linear model.
pvalues
The two-tailed p values for the t-stats of the params.
resid
The residuals of the model.
resid_pearson
`wresid` normalized to have unit variance.
rsquared
R-squared of a model with an intercept. This is defined here as
1 - `ssr`/`centered_tss` if the constant is included in the model and
1 - `ssr`/`uncentered_tss` if the constant is omitted.
rsquared_adj
Adjusted R-squared. This is defined here as
1 - (`nobs`-1)/`df_resid` * (1-`rsquared`) if a constant is included
and 1 - `nobs`/`df_resid` * (1-`rsquared`) if no constant is included.
scale
A scale factor for the covariance matrix.
Default value is ssr/(n-p). Note that the square root of `scale` is
often called the standard error of the regression.
ssr
Sum of squared (whitened) residuals.
uncentered_tss
Uncentered sum of squares. Sum of the squared values of the
(whitened) endogenous response variable.
wresid
The residuals of the transformed/whitened regressand and regressor(s)
"""
_cache = {} # needs to be a class attribute for scale setter?
def __init__(self, model, params, normalized_cov_params=None, scale=1.,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
super(RegressionResults, self).__init__(model, params,
normalized_cov_params,
scale)
self._cache = resettable_cache()
if hasattr(model, 'wexog_singular_values'):
self._wexog_singular_values = model.wexog_singular_values
else:
self._wexog_singular_values = None
self.df_model = model.df_model
self.df_resid = model.df_resid
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
if use_t is None:
self.use_t = True # TODO: class default
else:
if cov_kwds is None:
cov_kwds = {}
if 'use_t' in cov_kwds:
# TODO: we want to get rid of 'use_t' in cov_kwds
use_t_2 = cov_kwds.pop('use_t')
if use_t is None:
use_t = use_t_2
# TODO: warn or not?
self.get_robustcov_results(cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
for key in kwargs:
setattr(self, key, kwargs[key])
def __str__(self):
self.summary()
def conf_int(self, alpha=.05, cols=None):
"""
Returns the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
Notes
-----
The confidence interval is based on Student's t-distribution.
"""
# keep method for docstring for now
ci = super(RegressionResults, self).conf_int(alpha=alpha, cols=cols)
return ci
@cache_readonly
def nobs(self):
return float(self.model.wexog.shape[0])
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params, self.model.exog)
@cache_readonly
def wresid(self):
return self.model.wendog - self.model.predict(self.params,
self.model.wexog)
@cache_readonly
def resid(self):
return self.model.endog - self.model.predict(self.params,
self.model.exog)
#TODO: fix writable example
@cache_writable()
def scale(self):
wresid = self.wresid
return np.dot(wresid, wresid) / self.df_resid
@cache_readonly
def ssr(self):
wresid = self.wresid
return np.dot(wresid, wresid)
@cache_readonly
def centered_tss(self):
model = self.model
weights = getattr(model, 'weights', None)
if weights is not None:
return np.sum(weights*(model.endog - np.average(model.endog,
weights=weights))**2)
else: # this is probably broken for GLS
centered_endog = model.wendog - model.wendog.mean()
return np.dot(centered_endog, centered_endog)
@cache_readonly
def uncentered_tss(self):
wendog = self.model.wendog
return np.dot(wendog, wendog)
@cache_readonly
def ess(self):
if self.k_constant:
return self.centered_tss - self.ssr
else:
return self.uncentered_tss - self.ssr
@cache_readonly
def rsquared(self):
if self.k_constant:
return 1 - self.ssr/self.centered_tss
else:
return 1 - self.ssr/self.uncentered_tss
@cache_readonly
def rsquared_adj(self):
return 1 - np.divide(self.nobs - self.k_constant, self.df_resid) * (1 - self.rsquared)
@cache_readonly
def mse_model(self):
return self.ess/self.df_model
@cache_readonly
def mse_resid(self):
return self.ssr/self.df_resid
@cache_readonly
def mse_total(self):
if self.k_constant:
return self.centered_tss / (self.df_resid + self.df_model)
else:
return self.uncentered_tss / (self.df_resid + self.df_model)
@cache_readonly
def fvalue(self):
if hasattr(self, 'cov_type') and self.cov_type != 'nonrobust':
# with heteroscedasticity or correlation robustness
k_params = self.normalized_cov_params.shape[0]
mat = np.eye(k_params)
const_idx = self.model.data.const_idx
# TODO: What if model includes implicit constant, e.g. all dummies but no constant regressor?
# TODO: Restats as LM test by projecting orthogonalizing to constant?
if self.model.data.k_constant == 1:
# if constant is implicit, return nan see #2444
if const_idx is None:
return np.nan
idx = lrange(k_params)
idx.pop(const_idx)
mat = mat[idx] # remove constant
ft = self.f_test(mat)
# using backdoor to set another attribute that we already have
self._cache['f_pvalue'] = ft.pvalue
return ft.fvalue
else:
# for standard homoscedastic case
return self.mse_model/self.mse_resid
@cache_readonly
def f_pvalue(self):
return stats.f.sf(self.fvalue, self.df_model, self.df_resid)
@cache_readonly
def bse(self):
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def aic(self):
return -2 * self.llf + 2 * (self.df_model + self.k_constant)
@cache_readonly
def bic(self):
return (-2 * self.llf + np.log(self.nobs) * (self.df_model +
self.k_constant))
@cache_readonly
def eigenvals(self):
"""
Return eigenvalues sorted in decreasing order.
"""
if self._wexog_singular_values is not None:
eigvals = self._wexog_singular_values ** 2
else:
eigvals = np.linalg.linalg.eigvalsh(np.dot(self.model.wexog.T, self.model.wexog))
return np.sort(eigvals)[::-1]
@cache_readonly
def condition_number(self):
"""
Return condition number of exogenous matrix.
Calculated as ratio of largest to smallest eigenvalue.
"""
eigvals = self.eigenvals
return np.sqrt(eigvals[0]/eigvals[-1])
#TODO: make these properties reset bse
def _HCCM(self, scale):
H = np.dot(self.model.pinv_wexog,
scale[:,None]*self.model.pinv_wexog.T)
return H
@cache_readonly
def cov_HC0(self):
"""
See statsmodels.RegressionResults
"""
self.het_scale = self.wresid**2
cov_HC0 = self._HCCM(self.het_scale)
return cov_HC0
@cache_readonly
def cov_HC1(self):
"""
See statsmodels.RegressionResults
"""
self.het_scale = self.nobs/(self.df_resid)*(self.wresid**2)
cov_HC1 = self._HCCM(self.het_scale)
return cov_HC1
@cache_readonly
def cov_HC2(self):
"""
See statsmodels.RegressionResults
"""
# probably could be optimized
h = np.diag(chain_dot(self.model.wexog,
self.normalized_cov_params,
self.model.wexog.T))
self.het_scale = self.wresid**2/(1-h)
cov_HC2 = self._HCCM(self.het_scale)
return cov_HC2
@cache_readonly
def cov_HC3(self):
"""
See statsmodels.RegressionResults
"""
h = np.diag(chain_dot(self.model.wexog,
self.normalized_cov_params,
self.model.wexog.T))
self.het_scale=(self.wresid/(1-h))**2
cov_HC3 = self._HCCM(self.het_scale)
return cov_HC3
@cache_readonly
def HC0_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC0))
@cache_readonly
def HC1_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC1))
@cache_readonly
def HC2_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC2))
@cache_readonly
def HC3_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC3))
@cache_readonly
def resid_pearson(self):
"""
Residuals, normalized to have unit variance.
Returns
-------
An array wresid/sqrt(scale)
"""
if not hasattr(self, 'resid'):
raise ValueError('Method requires residuals.')
eps = np.finfo(self.wresid.dtype).eps
if np.sqrt(self.scale) < 10 * eps * self.model.endog.mean():
# don't divide if scale is zero close to numerical precision
from warnings import warn
warn("All residuals are 0, cannot compute normed residuals.",
RuntimeWarning)
return self.wresid
else:
return self.wresid / np.sqrt(self.scale)
def _is_nested(self, restricted):
"""
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
nested : bool
True if nested, otherwise false
Notes
-----
A most nests another model if the regressors in the smaller model are spanned
by the regressors in the larger model and the regressand is identical.
"""
if self.model.nobs != restricted.model.nobs:
return False
full_rank = self.model.rank
restricted_rank = restricted.model.rank
if full_rank <= restricted_rank:
return False
restricted_exog = restricted.model.wexog
full_wresid = self.wresid
scores = restricted_exog * full_wresid[:,None]
score_l2 = np.sqrt(np.mean(scores.mean(0) ** 2))
# TODO: Could be improved, and may fail depending on scale of regressors
return np.allclose(score_l2,0)
def compare_lm_test(self, restricted, demean=True, use_lr=False):
"""Use Lagrange Multiplier test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
demean : bool
Flag indicating whether the demean the scores based on the residuals
from the restricted model. If True, the covariance of the scores
are used and the LM test is identical to the large sample version
of the LR test.
Returns
-------
lm_value : float
test statistic, chi2 distributed
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
TODO: explain LM text
"""
import statsmodels.stats.sandwich_covariance as sw
from numpy.linalg import inv
if not self._is_nested(restricted):
raise ValueError("Restricted model is not nested by full model.")
wresid = restricted.wresid
wexog = self.model.wexog
scores = wexog * wresid[:,None]
n = self.nobs
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
s = scores.mean(axis=0)
if use_lr:
scores = wexog * self.wresid[:,None]
demean = False
if demean:
scores = scores - scores.mean(0)[None,:]
# Form matters here. If homoskedastics can be sigma^2 (X'X)^-1
# If Heteroskedastic then the form below is fine
# If HAC then need to use HAC
# If Cluster, shoudl use cluster
cov_type = getattr(self, 'cov_type', 'nonrobust')
if cov_type == 'nonrobust':
sigma2 = np.mean(wresid**2)
XpX = np.dot(wexog.T,wexog) / n
Sinv = inv(sigma2 * XpX)
elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
Sinv = inv(np.dot(scores.T,scores) / n)
elif cov_type == 'HAC':
print("HAC")
maxlags = self.cov_kwds['maxlags']
Sinv = inv(sw.S_hac_simple(scores, maxlags) / n)
elif cov_type == 'cluster':
#cluster robust standard errors
groups = self.cov_kwds['groups']
# TODO: Might need demean option in S_crosssection by group?
Sinv = inv(sw.S_crosssection(scores, groups))
else:
raise ValueError('Only nonrobust, HC, HAC and cluster are ' +
'currently connected')
lm_value = n * chain_dot(s,Sinv,s.T)
p_value = stats.chi2.sf(lm_value, df_diff)
return lm_value, p_value, df_diff
def compare_f_test(self, restricted):
"""use F test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
f_value : float
test statistic, F distributed
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
See mailing list discussion October 17,
This test compares the residual sum of squares of the two models.
This is not a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results under the assumption of homoscedasticity
and no autocorrelation (sphericity).
"""
has_robust1 = getattr(self, 'cov_type', 'nonrobust') != 'nonrobust'
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=
'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('F test for comparison is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
ssr_full = self.ssr
ssr_restr = restricted.ssr
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
f_value = (ssr_restr - ssr_full) / df_diff / ssr_full * df_full
p_value = stats.f.sf(f_value, df_diff, df_full)
return f_value, p_value, df_diff
def compare_lr_test(self, restricted, large_sample=False):
"""
Likelihood ratio test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current model.
The result instance of the restricted model is required to have two
attributes, residual sum of squares, `ssr`, residual degrees of
freedom, `df_resid`.
large_sample : bool
Flag indicating whether to use a heteroskedasticity robust version
of the LR test, which is a modified LM test.
Returns
-------
lr_stat : float
likelihood ratio, chisquare distributed with df_diff degrees of
freedom
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
The exact likelihood ratio is valid for homoskedastic data, and is
defined as
.. math:: D=-2\\log\\left(\\frac{\\mathcal{L}_{null}}
{\\mathcal{L}_{alternative}}\\right)
where :math:`\\mathcal{L}` is the likelihood of the model. With :math:`D`
distributed as chisquare with df equal to difference in number of
parameters or equivalently difference in residual degrees of freedom.
The large sample version of the likelihood ratio is defined as
.. math:: D=n s^{\\prime}S^{-1}s
where :math:`s=n^{-1}\\sum_{i=1}^{n} s_{i}`
.. math:: s_{i} = x_{i,alternative} \\epsilon_{i,null}
is the average score of the model evaluated using the residuals from
null model and the regressors from the alternative model and :math:`S`
is the covariance of the scores, :math:`s_{i}`. The covariance of the
scores is estimated using the same estimator as in the alternative model.
This test compares the loglikelihood of the two models.
This may not be a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results without taking unspecified
heteroscedasticity or correlation into account.
This test compares the loglikelihood of the two models.
This may not be a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results without taking unspecified
heteroscedasticity or correlation into account.
is the average score of the model evaluated using the residuals from
null model and the regressors from the alternative model and :math:`S`
is the covariance of the scores, :math:`s_{i}`. The covariance of the
scores is estimated using the same estimator as in the alternative model.
TODO: put into separate function, needs tests
"""
# See mailing list discussion October 17,
if large_sample:
return self.compare_lm_test(restricted, use_lr=True)
has_robust1 = (getattr(self, 'cov_type', 'nonrobust') != 'nonrobust')
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=
'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('Likelihood Ratio test is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
llf_full = self.llf
llf_restr = restricted.llf
df_full = self.df_resid
df_restr = restricted.df_resid
lrdf = (df_restr - df_full)
lrstat = -2*(llf_restr - llf_full)
lr_pvalue = stats.chi2.sf(lrstat, lrdf)
return lrstat, lr_pvalue, lrdf
def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwds):
"""create new results instance with robust covariance as default
Parameters
----------
cov_type : string
the type of robust sandwich estimator to use. see Notes below
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
If `use_t` is None, then an appropriate default is used, which is
`true` if the cov_type is nonrobust, and `false` in all other cases.
kwds : depends on cov_type
Required or optional arguments for robust covariance calculation.
see Notes below
Returns
-------
results : results instance
This method creates a new results instance with the requested
robust covariance as the default covariance of the parameters.
Inferential statistics like p-values and hypothesis tests will be
based on this covariance matrix.
Notes
-----
The following covariance types and required or optional arguments are
currently available:
- 'fixed scale' and optional keyword argument 'scale' which uses
a predefined scale estimate with default equal to one.
- 'HC0', 'HC1', 'HC2', 'HC3' and no keyword arguments:
heteroscedasticity robust covariance
- 'HAC' and keywords
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` bool (optional) : If true, use small sample
correction
- 'cluster' and required keyword `groups`, integer group indicator
- `groups` array_like, integer (required) :
index of clusters or groups
- `use_correction` bool (optional) :
If True the sandwich covariance is calculated with a small
sample correction.
If False the sandwich covariance is calculated without
small sample correction.
- `df_correction` bool (optional)
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is adjusted.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum' Driscoll and Kraay, heteroscedasticity and
autocorrelation robust standard errors in panel data
keywords
- `time` array_like (required) : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional) :
If False the the sandwich covariance is calulated without
small sample correction.
If `use_correction = 'cluster'` (default), then the same
small sample correction as in the case of 'covtype='cluster''
is used.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
#TODO: we need more options here
- 'hac-panel' heteroscedasticity and autocorrelation robust standard
errors in panel data.
The data needs to be sorted in this case, the time series for
each panel unit or cluster need to be stacked. The membership to
a timeseries of an individual or group can be either specified by
group indicators or by increasing time periods.
keywords
- either `groups` or `time` : array_like (required)
`groups` : indicator for groups
`time` : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional) :
If False the sandwich covariance is calculated without
small sample correction.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
#TODO: we need more options here
Reminder:
`use_correction` in "hac-groupsum" and "hac-panel" is not bool,
needs to be in [False, 'hac', 'cluster']
TODO: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx`
"""
import statsmodels.stats.sandwich_covariance as sw
#normalize names
if cov_type == 'nw-panel':
cov_type = 'hac-panel'
if cov_type == 'nw-groupsum':
cov_type = 'hac-groupsum'
if 'kernel' in kwds:
kwds['weights_func'] = kwds.pop('kernel')
# TODO: make separate function that returns a robust cov plus info
use_self = kwds.pop('use_self', False)
if use_self:
res = self
else:
res = self.__class__(self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
res.cov_type = cov_type
# use_t might already be defined by the class, and already set
if use_t is None:
use_t = self.use_t
res.cov_kwds = {'use_t':use_t} # store for information
res.use_t = use_t
adjust_df = False
if cov_type in ['cluster', 'hac-panel', 'hac-groupsum']:
df_correction = kwds.get('df_correction', None)
# TODO: check also use_correction, do I need all combinations?
if df_correction is not False: # i.e. in [None, True]:
# user didn't explicitely set it to False
adjust_df = True
res.cov_kwds['adjust_df'] = adjust_df
# verify and set kwds, and calculate cov
# TODO: this should be outsourced in a function so we can reuse it in
# other models
# TODO: make it DRYer repeated code for checking kwds
if cov_type in ['fixed scale', 'fixed_scale']:
res.cov_kwds['description'] = ('Standard Errors are based on ' +
'fixed scale')
res.cov_kwds['scale'] = scale = kwds.get('scale', 1.)
res.cov_params_default = scale * res.normalized_cov_params
elif cov_type.upper() in ('HC0', 'HC1', 'HC2', 'HC3'):
if kwds:
raise ValueError('heteroscedasticity robust covarians ' +
'does not use keywords')
res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' +
'robust ' + '(' + cov_type + ')')
# TODO cannot access cov without calling se first
getattr(self, cov_type.upper() + '_se')
res.cov_params_default = getattr(self, 'cov_' + cov_type.upper())
elif cov_type.lower() == 'hac':
maxlags = kwds['maxlags'] # required?, default in cov_hac_simple
res.cov_kwds['maxlags'] = maxlags
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
use_correction = kwds.get('use_correction', False)
res.cov_kwds['use_correction'] = use_correction
res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' +
'and autocorrelation robust (HAC) using %d lags and %s small ' +
'sample correction') % (maxlags, ['without', 'with'][use_correction])
res.cov_params_default = sw.cov_hac_simple(self, nlags=maxlags,
weights_func=weights_func,
use_correction=use_correction)
elif cov_type.lower() == 'cluster':
#cluster robust standard errors, one- or two-way
groups = kwds['groups']
if not hasattr(groups, 'shape'):
groups = np.asarray(groups).T
if groups.ndim >= 2:
groups = groups.squeeze()
res.cov_kwds['groups'] = groups
use_correction = kwds.get('use_correction', True)
res.cov_kwds['use_correction'] = use_correction
if groups.ndim == 1:
if adjust_df:
# need to find number of groups
# duplicate work
self.n_groups = n_groups = len(np.unique(groups))
res.cov_params_default = sw.cov_cluster(self, groups,
use_correction=use_correction)
elif groups.ndim == 2:
if hasattr(groups, 'values'):
groups = groups.values
if adjust_df:
# need to find number of groups
# duplicate work
n_groups0 = len(np.unique(groups[:,0]))
n_groups1 = len(np.unique(groups[:, 1]))
self.n_groups = (n_groups0, n_groups1)
n_groups = min(n_groups0, n_groups1) # use for adjust_df
# Note: sw.cov_cluster_2groups has 3 returns
res.cov_params_default = sw.cov_cluster_2groups(self, groups,
use_correction=use_correction)[0]
else:
raise ValueError('only two groups are supported')
res.cov_kwds['description'] = ('Standard Errors are robust to' +
'cluster correlation ' + '(' + cov_type + ')')
elif cov_type.lower() == 'hac-panel':
#cluster robust standard errors
res.cov_kwds['time'] = time = kwds.get('time', None)
res.cov_kwds['groups'] = groups = kwds.get('groups', None)
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'hac')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if groups is not None:
tt = (np.nonzero(groups[:-1] != groups[1:])[0] + 1).tolist()
nobs_ = len(groups)
elif time is not None:
# TODO: clumsy time index in cov_nw_panel
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1).tolist()
nobs_ = len(time)
else:
raise ValueError('either time or groups needs to be given')
groupidx = lzip([0] + tt, tt + [nobs_])
self.n_groups = n_groups = len(groupidx)
res.cov_params_default = sw.cov_nw_panel(self, maxlags, groupidx,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = ('Standard Errors are robust to' +
'cluster correlation ' + '(' + cov_type + ')')
elif cov_type.lower() == 'hac-groupsum':
# Driscoll-Kraay standard errors
res.cov_kwds['time'] = time = kwds['time']
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'cluster')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if adjust_df:
# need to find number of groups
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1)
self.n_groups = n_groups = len(tt) + 1
res.cov_params_default = sw.cov_nw_groupsum(self, maxlags, time,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = (
'Driscoll and Kraay Standard Errors are robust to ' +
'cluster correlation ' + '(' + cov_type + ')')
else:
raise ValueError('cov_type not recognized. See docstring for ' +
'available options and spelling')
if adjust_df:
# Note: df_resid is used for scale and others, add new attribute
res.df_resid_inference = n_groups - 1
return res
def get_prediction(self, exog=None, transform=True, weights=None,
row_labels=None, **kwds):
return pred.get_prediction(self, exog=exog, transform=transform,
weights=weights, row_labels=row_labels, **kwds)
get_prediction.__doc__ = pred.get_prediction.__doc__
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
#TODO: import where we need it (for now), add as cached attributes
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest, durbin_watson)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[-1])
#TODO not used yet
#diagn_left_header = ['Models stats']
#diagn_right_header = ['Residual stats']
#TODO: requiring list/iterable is a bit annoying
#need more control over formatting
#TODO: default don't work if it's not identically spelled
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None), #[self.df_resid]), #TODO: spelling
('Df Model:', None), #[self.df_model])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
top_right = [('R-squared:', ["%#8.3f" % self.rsquared]),
('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
('F-statistic:', ["%#8.4g" % self.fvalue] ),
('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
('Log-Likelihood:', None), #["%#6.4g" % self.llf]),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:', ["%#8.3f" % durbin_watson(self.wresid)]),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
yname=yname, xname=xname,
title="")
#add warnings/notes, added to text format only
etext =[]
if hasattr(self, 'cov_type'):
etext.append(self.cov_kwds['description'])
if self.model.exog.shape[0] < self.model.exog.shape[1]:
wstr = "The input rank is higher than the number of observations."
etext.append(wstr)
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: #TODO: what is recommended
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
etext = ["[{0}] {1}".format(i + 1, text) for i, text in enumerate(etext)]
etext.insert(0, "Warnings:")
smry.add_extra_txt(etext)
return smry
#top = summary_top(self, gleft=topleft, gright=diagn_left, #[],
# yname=yname, xname=xname,
# title=self.model.__class__.__name__ + ' ' +
# "Regression Results")
#par = summary_params(self, yname=yname, xname=xname, alpha=.05,
# use_t=False)
#
#diagn = summary_top(self, gleft=diagn_left, gright=diagn_right,
# yname=yname, xname=xname,
# title="Linear Model")
#
#return summary_return([top, par, diagn], return_fmt=return_fmt)
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary function to summarize the regression results
Parameters
-----------
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
# Diagnostics
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest,
durbin_watson)
from statsmodels.compat.collections import OrderedDict
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
dw = durbin_watson(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
eigvals = np.sort(eigvals) #in increasing order
diagnostic = OrderedDict([
('Omnibus:', "%.3f" % omni),
('Prob(Omnibus):', "%.3f" % omnipv),
('Skew:', "%.3f" % skew),
('Kurtosis:', "%.3f" % kurtosis),
('Durbin-Watson:', "%.3f" % dw),
('Jarque-Bera (JB):', "%.3f" % jb),
('Prob(JB):', "%.3f" % jbpv),
('Condition No.:', "%.0f" % condno)
])
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
smry.add_dict(diagnostic)
# Warnings
if eigvals[-1] < 1e-10:
warn = "The smallest eigenvalue is %6.3g. This might indicate that\
there are strong multicollinearity problems or that the design\
matrix is singular." % eigvals[-1]
smry.add_text(warn)
if condno > 1000:
warn = "* The condition number is large (%.g). This might indicate \
strong multicollinearity or other numerical problems." % condno
smry.add_text(warn)
return smry
class OLSResults(RegressionResults):
"""
Results class for for an OLS model.
Most of the methods and attributes are inherited from RegressionResults.
The special methods that are only available for OLS are:
- get_influence
- outlier_test
- el_test
- conf_int_el
See Also
--------
RegressionResults
"""
def get_influence(self):
"""
get an instance of Influence with influence and outlier measures
Returns
-------
infl : Influence instance
the instance has methods to calculate the main influence and
outlier measures for the OLS regression
See also
--------
statsmodels.stats.outliers_influence.OLSInfluence
"""
from statsmodels.stats.outliers_influence import OLSInfluence
return OLSInfluence(self)
def outlier_test(self, method='bonf', alpha=.05):
"""
Test observations for outliers according to method
Parameters
----------
method : str
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
familywise error rate
Returns
-------
table : ndarray or DataFrame
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
from statsmodels.stats.outliers_influence import outlier_test
return outlier_test(self, method, alpha)
def el_test(self, b0_vals, param_nums, return_weights=0,
ret_params=0, method='nm',
stochastic_exog=1, return_params=0):
"""
Tests single or joint hypotheses of the regression parameters using
Empirical Likelihood.
Parameters
----------
b0_vals : 1darray
The hypothesized value of the parameter to be tested
param_nums : 1darray
The parameter number to be tested
print_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. Default is False
ret_params : bool
If true, returns the parameter vector that maximizes the likelihood
ratio at b0_vals. Also returns the weights. Default is False
method : string
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'
stochastic_exog : bool
When TRUE, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. Default = TRUE
Returns
-------
res : tuple
The p-value and -2 times the log-likelihood ratio for the
hypothesized values.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load()
>>> endog = data.endog
>>> exog = sm.add_constant(data.exog)
>>> model = sm.OLS(endog, exog)
>>> fitted = model.fit()
>>> fitted.params
>>> array([-39.91967442, 0.7156402 , 1.29528612, -0.15212252])
>>> fitted.rsquared
>>> 0.91357690446068196
>>> # Test that the slope on the first variable is 0
>>> fitted.el_test([0], [1])
>>> (27.248146353888796, 1.7894660442330235e-07)
"""
params = np.copy(self.params)
opt_fun_inst = _ELRegOpts() # to store weights
if len(param_nums) == len(params):
llr = opt_fun_inst._opt_nuis_regress([],
param_nums=param_nums,
endog=self.model.endog,
exog=self.model.exog,
nobs=self.model.nobs,
nvar=self.model.exog.shape[1],
params=params,
b0_vals=b0_vals,
stochastic_exog=stochastic_exog)
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
x0 = np.delete(params, param_nums)
args = (param_nums, self.model.endog, self.model.exog,
self.model.nobs, self.model.exog.shape[1], params,
b0_vals, stochastic_exog)
if method == 'nm':
llr = optimize.fmin(opt_fun_inst._opt_nuis_regress, x0, maxfun=10000,
maxiter=10000, full_output=1, disp=0,
args=args)[1]
if method == 'powell':
llr = optimize.fmin_powell(opt_fun_inst._opt_nuis_regress, x0,
full_output=1, disp=0,
args=args)[1]
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if ret_params:
return llr, pval, opt_fun_inst.new_weights, opt_fun_inst.new_params
elif return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
def conf_int_el(self, param_num, sig=.05, upper_bound=None, lower_bound=None,
method='nm', stochastic_exog=1):
"""
Computes the confidence interval for the parameter given by param_num
using Empirical Likelihood
Parameters
----------
param_num : float
The parameter for which the confidence interval is desired
sig : float
The significance level. Default is .05
upper_bound : float
The maximum value the upper limit can be. Default is the
99.9% confidence value under OLS assumptions.
lower_bound : float
The minimum value the lower limit can be. Default is the 99.9%
confidence value under OLS assumptions.
method : string
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'
Returns
-------
ci : tuple
The confidence interval
See Also
--------
el_test
Notes
-----
This function uses brentq to find the value of beta where
test_beta([beta], param_num)[1] is equal to the critical
value.
The function returns the results of each iteration of brentq at
each value of beta.
The current function value of the last printed optimization
should be the critical value at the desired significance level.
For alpha=.05, the value is 3.841459.
To ensure optimization terminated successfully, it is suggested to
do el_test([lower_limit], [param_num])
If the optimization does not terminate successfully, consider switching
optimization algorithms.
If optimization is still not successful, try changing the values of
start_int_params. If the current function value repeatedly jumps
from a number between 0 and the critical value and a very large number
(>50), the starting parameters of the interior minimization need
to be changed.
"""
r0 = stats.chi2.ppf(1 - sig, 1)
if upper_bound is None:
upper_bound = self.conf_int(.01)[param_num][1]
if lower_bound is None:
lower_bound = self.conf_int(.01)[param_num][0]
f = lambda b0: self.el_test(np.array([b0]), np.array([param_num]),
method=method,
stochastic_exog=stochastic_exog)[0]-r0
lowerl = optimize.brenth(f, lower_bound,
self.params[param_num])
upperl = optimize.brenth(f, self.params[param_num],
upper_bound)
# ^ Seems to be faster than brentq in most cases
return (lowerl, upperl)
class RegressionResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'chisq' : 'columns',
'sresid' : 'rows',
'weights' : 'rows',
'wresid' : 'rows',
'bcov_unscaled' : 'cov',
'bcov_scaled' : 'cov',
'HC0_se' : 'columns',
'HC1_se' : 'columns',
'HC2_se' : 'columns',
'HC3_se' : 'columns',
'norm_resid' : 'rows',
}
_wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(
base.LikelihoodResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(RegressionResultsWrapper,
RegressionResults)
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.longley.load()
data.exog = add_constant(data.exog, prepend=False)
ols_results = OLS(data.endog, data.exog).fit() #results
gls_results = GLS(data.endog, data.exog).fit() #results
print(ols_results.summary())
tables = ols_results.summary(returns='tables')
csv = ols_results.summary(returns='csv')
"""
Summary of Regression Results
=======================================
| Dependent Variable: ['y']|
| Model: OLS|
| Method: Least Squares|
| Date: Tue, 29 Jun 2010|
| Time: 22:32:21|
| # obs: 16.0|
| Df residuals: 9.0|
| Df model: 6.0|
===========================================================================
| coefficient std. error t-statistic prob.|
---------------------------------------------------------------------------
| x1 15.0619 84.9149 0.1774 0.8631|
| x2 -0.0358 0.0335 -1.0695 0.3127|
| x3 -2.0202 0.4884 -4.1364 0.002535|
| x4 -1.0332 0.2143 -4.8220 0.0009444|
| x5 -0.0511 0.2261 -0.2261 0.8262|
| x6 1829.1515 455.4785 4.0159 0.003037|
| const -3482258.6346 890420.3836 -3.9108 0.003560|
===========================================================================
| Models stats Residual stats |
---------------------------------------------------------------------------
| R-squared: 0.995479 Durbin-Watson: 2.55949 |
| Adjusted R-squared: 0.992465 Omnibus: 0.748615 |
| F-statistic: 330.285 Prob(Omnibus): 0.687765 |
| Prob (F-statistic): 4.98403e-10 JB: 0.352773 |
| Log likelihood: -109.617 Prob(JB): 0.838294 |
| AIC criterion: 233.235 Skew: 0.419984 |
| BIC criterion: 238.643 Kurtosis: 2.43373 |
---------------------------------------------------------------------------
"""
| bert9bert/statsmodels | statsmodels/regression/linear_model.py | Python | bsd-3-clause | 103,184 | [
"Gaussian"
] | 63fee05f3692770b52f13c423920014e061b0b73ce198172c4b14d5835483d28 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
""" Script to test the precise aeif behaviours """
import argparse
from itertools import chain
import time
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib import rcParams
import nest
from nest.raster_plot import from_device
nest.Install("nngt_module")
#-----------------------------------------------------------------------------#
# Parser
#------------------------
#
parser = argparse.ArgumentParser(description="Script to compare the grid-precise and usual models.", usage='%(prog)s [options]')
parser.add_argument("-i", "--indivdual", action="store", default=True,
help="Compare the individual-neuron dynamics.")
parser.add_argument("-nn", "--no_network", action="store_true",
help="Compare network dynamics.")
group = parser.add_mutually_exclusive_group()
group.add_argument("-nt", "--notime", action="store_true",
help="Do not compare runtime.")
group.add_argument("-s", "--size", action="store", default=5000,
help="Compare for a given network size.")
## parse
args = parser.parse_args()
args.no_network = True
#-----------------------------------------------------------------------------#
# Parameters
#------------------------
#
#~ models = [ "aeif_cond_exp", "aeif_cond_alpha_mod", "gp_aeif_cond_exp", "ps_aeif_cond_exp" ]
models = [ "gp_aeif_psc_exp", "ps_aeif_psc_exp" ]
#~ models = [ "gp_aeif_cond_exp", "ps_aeif_cond_exp" ]
num_neurons = len(models)
tpl_ignore = ( "V_m", "w", "vp", "global_id", "thread_local_id", "thread", "model", "local_id", "t_spike" )
#-----------------------------------------------------------------------------#
# Individual dynamics
#------------------------
#
nest.ResetKernel()
nest.SetKernelStatus({"local_num_threads": 1, "overwrite_files":True})
if args.indivdual:
r_resolution = 0.01
nest.SetKernelStatus({"resolution":r_resolution})
d_step_current = 100.
r_min_voltage = -70.
# compare the precise implementation and the others
voltage_precise = [ None for _ in range(num_neurons) ]
# create AdExp neurons
di_param = {
'V_reset': -48.,
'V_peak': 0.0,
'V_th': -50.,
'I_e': 0.0,
'g_L': 12.,
'tau_w': 130.,
'E_L': -60.,
'Delta_T': 2.,
'a': -11.,
'b': 30.,
'C_m': 100.,
'V_m': -60.
}
# models
lst_neurons = [ nest.Create(model,params=di_param) for model in models ]
poisson = nest.Create("poisson_generator",1,{"rate":10000.})
parrot = nest.Create("parrot_neuron")
nest.Connect(poisson,parrot)
step_gen = nest.Create("step_current_generator",1,{"amplitude_times": [50.,1500.], "amplitude_values":[d_step_current,0.]})
multimeter = nest.Create("multimeter",num_neurons)
nest.SetStatus(multimeter, {"withtime":True, "interval":r_resolution, "record_from":["V_m","w", "I_ex"], "to_file":True})
for i,neuron in enumerate(lst_neurons):
print(nest.GetStatus(neuron)[0]["t_ref"])
nest.Connect(step_gen,neuron)
nest.Connect(multimeter[i],neuron[0])
nest.Connect(parrot,neuron, syn_spec={'weight':80.})
nest.Simulate(1600.0)
# plot
fig, (ax1, ax2, ax3) = plt.subplots(3,1,sharex=True)
# get the neuron's membrane potential
for i in range(num_neurons):
dmm = nest.GetStatus(multimeter)[i]
da_voltage = dmm["events"]["V_m"]
voltage_precise[i] = da_voltage
if i == num_neurons-1:
for j in range(num_neurons-1):
voltage_precise[j] -= da_voltage
da_adapt = dmm["events"]["w"]
da_syn = dmm["events"]["I_ex"]
da_time = dmm["events"]["times"]
ax1.plot(da_time,da_voltage,c=cm.hot(0.8*i/float(num_neurons)), label=models[i])
ax1.set_ylabel('Voltage (mV)')
ax2.plot(da_time,da_adapt,c=cm.hot(0.8*i/float(num_neurons)), label=models[i])
ax2.set_ylabel('Current (pA)')
ax3.plot(da_time,da_syn,c=cm.hot(0.8*i/float(num_neurons)), label=models[i])
ax3.set_xlabel('Time (ms)')
ax3.set_ylabel('Conductance (nS)')
plt.legend(loc=4)
#~ lst_di_param = [ nest.GetStatus(neuron)[0] for neuron in lst_neurons ]
#~ for di in lst_di_param:
#~ b_equal = True
#~ for key,val in lst_di_param[-1].iteritems():
#~ if key not in tpl_ignore:
#~ b_equal *= (val == di[key])
#~ if not b_equal:
#~ print(key,val,di[key])
#~ print(b_equal)
fig2, axes = plt.subplots(num_neurons,sharex=True)
for i,varray in enumerate(voltage_precise):
axes[i].plot(varray)
axes[i].set_title(models[i])
#-----------------------------------------------------------------------------#
# Compare network dynamics
#------------------------
#
if not args.no_network:
# time the simulations for each neural model and network size
sim_time = 1000.
lst_network_sizes = [args.size] if args.notime else np.arange(1000, 17000, 5000)
num_runs = len(lst_network_sizes)
lst_times = [ np.zeros(num_runs) for _ in range(len(models)) ]
lst_spikes = [ np.zeros(num_runs) for _ in range(len(models)) ]
# fraction of inhibitory neurons
ifrac = 0.2
# average degree
avg_deg = 100
graph, gids = None, None
for i,size in enumerate(lst_network_sizes):
for j,model in enumerate(models):
nest.ResetKernel()
nest.SetKernelStatus({"local_num_threads": 9})
# synaptic weight
weight = 30. if "exp" in model else 20.
inhib_start = int(size*(1-ifrac))
gids = nest.Create(model, size)
nest.Connect(gids[:inhib_start], gids, conn_spec={'rule': 'fixed_indegree', 'indegree': int(avg_deg/2), 'autapses': False}, syn_spec={'weight':weight})
nest.Connect(gids[inhib_start:], gids, conn_spec={'rule': 'fixed_indegree', 'indegree': int(avg_deg/2), 'autapses': False}, syn_spec={'weight':-weight})
# in nest
dc = nest.Create("dc_generator", params={"amplitude": 800.})
sd = nest.Create("spike_detector", params={"withtime": True, "withgid": True})
nest.Connect(dc, gids[:int(inhib_start/3)])
nest.Connect(gids,sd)
start = time.time()
nest.Simulate(sim_time)
lst_times[j][i] = time.time() - start
lst_spikes[j][i] = len(nest.GetStatus(sd)[0]["events"]["senders"]) / sim_time
#~ from_device(sd, title="Raster for {} neurons of type {}".format(size, model))
fig, (ax1, ax2) = plt.subplots(2,1)
for i, model in enumerate(models):
ax1.plot(lst_network_sizes, lst_times[i], c=cm.hot(i/float(num_neurons)), label=model)
ax1.set_xlabel("Network size")
ax1.set_ylabel("Runtime for a 1s simulation.")
ax2.scatter(lst_spikes[i], lst_times[i], c=cm.hot(i/float(num_neurons)), label=model)
ax2.set_xlabel("Number of spikes generated")
ax2.set_ylabel("Runtime")
ax1.legend(loc=2)
ax2.legend(loc=2)
plt.show()
| Silmathoron/nest-models | test/compare_aeif_gp.py | Python | gpl-2.0 | 7,109 | [
"NEURON"
] | 727654dbe75b0a63f4959f45dcb904ee3f2722a2a413e8bcb7acfd3c0b2d21d4 |
#!/usr/bin/env python
"""
Install.py tool to download, unpack, build, and link to the plumed2 library
used to automate the steps described in the README file in this dir
"""
from __future__ import print_function
import sys, os, subprocess, shutil
from argparse import ArgumentParser
sys.path.append('..')
from install_helpers import get_cpus, fullpath, geturl, checkmd5sum
parser = ArgumentParser(prog='Install.py',
description="LAMMPS library build wrapper script")
# settings
version = "2.5.1"
mode = "static"
# help message
HELP = """
Syntax from src dir: make lib-plumed args="-b"
or: make lib-plumed args="-b -v 2.4.3"
or: make lib-plumed args="-p /usr/local/plumed2 -m shared"
Syntax from lib dir: python Install.py -b -v 2.4.3
or: python Install.py -b
or: python Install.py -p /usr/local/plumed2 -m shared
Example:
make lib-plumed args="-b" # download/build in lib/plumed/plumed2
make lib-plumed args="-p $HOME/plumed2 -m shared" # use existing Plumed2 installation in $HOME/plumed2
"""
# known checksums for different PLUMED versions. used to validate the download.
checksums = { \
'2.4.2' : '88188743a6e03ef076e5377d03ebb0e7', \
'2.4.3' : 'b1be7c48971627febc11c61b70767fc5', \
'2.4.4' : '71ed465bdc7c2059e282dbda8d564e71', \
'2.5.0' : '6224cd089493661e19ceacccd35cf911', \
'2.5.1' : 'c2a7b519e32197a120cdf47e0f194f81', \
}
# parse and process arguments
pgroup = parser.add_mutually_exclusive_group()
pgroup.add_argument("-b", "--build", action="store_true",
help="download and build the plumed2 library")
pgroup.add_argument("-p", "--path",
help="specify folder of existing plumed2 installation")
parser.add_argument("-v", "--version", default=version, choices=checksums.keys(),
help="set version of plumed to download and build (default: %s)" % version)
parser.add_argument("-m", "--mode", default=mode, choices=['static', 'shared', 'runtime'],
help="set plumed linkage mode: static (default), shared, or runtime")
args = parser.parse_args()
# print help message and exit, if neither build nor path options are given
if not args.build and not args.path:
parser.print_help()
sys.exit(HELP)
buildflag = args.build
pathflag = args.path is not None
plumedpath = args.path
mode = args.mode
homepath = fullpath('.')
homedir = "%s/plumed2" % (homepath)
if pathflag:
if not os.path.isdir(plumedpath):
sys.exit("Plumed2 path %s does not exist" % plumedpath)
homedir = fullpath(plumedpath)
if not os.path.isdir(os.path.join(homedir, 'include', 'plumed', 'core')):
sys.exit("No Plumed2 installation found at %s" % plumedpath)
# download and unpack plumed2 tarball
if buildflag:
url = "https://github.com/plumed/plumed2/releases/download/v%s/plumed-src-%s.tgz" % (version, version)
filename = "plumed-src-%s.tar.gz" %version
print("Downloading plumed ...")
geturl(url, filename)
# verify downloaded archive integrity via md5 checksum, if known.
if version in checksums:
if not checkmd5sum(checksums[version], filename):
sys.exit("Checksum for plumed2 library does not match")
print("Unpacking plumed2 source tarball ...")
if os.path.exists("%s/plumed-%s" % (homepath, version)):
shutil.rmtree("%s/plumed-%s" % (homepath, version))
if os.path.exists(homedir):
shutil.rmtree(homedir)
cmd = 'cd "%s"; tar -xzvf %s' % (homepath, filename)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
os.remove(os.path.join(homepath, filename))
# build plumed
print("Building plumed ...")
n_cpus = get_cpus()
cmd = 'cd %s/plumed-%s; ./configure --prefix=%s --enable-modules=all --enable-static-patch ; make -j%d ; make install' % (homepath, version, homedir, n_cpus)
try:
txt = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print(txt.decode('UTF-8'))
except subprocess.CalledProcessError as e:
print("Make failed with:\n %s" % e.output.decode('UTF-8'))
sys.exit(1)
# create 2 links in lib/plumed to plumed2 installation dir
print("Creating links to plumed2 include and lib files")
if os.path.isfile("includelink") or os.path.islink("includelink"):
os.remove("includelink")
if os.path.isfile("liblink") or os.path.islink("liblink"):
os.remove("liblink")
os.symlink(os.path.join(homedir, 'include'), 'includelink')
libpath = os.path.join(homedir, 'lib64')
if not os.path.exists(libpath):
libpath = os.path.join(homedir, 'lib')
os.symlink(libpath, 'liblink')
if os.path.isfile("Makefile.lammps.%s" % mode):
print("Creating Makefile.lammps")
plumedinc = os.path.join('liblink', 'plumed', 'src', 'lib', 'Plumed.inc.' + mode)
lines1 = open(plumedinc, 'r').readlines()
lines2 = open("Makefile.lammps.%s" % mode, 'r').readlines()
fp = open("Makefile.lammps", 'w')
fp.write("PLUMED_LIBDIR=" + os.path.join(homedir, "lib\n"))
for line in lines1:
fp.write(line)
for line in lines2:
fp.write(line)
fp.close()
| timattox/lammps_USER-DPD | lib/plumed/Install.py | Python | gpl-2.0 | 5,096 | [
"LAMMPS"
] | b6883fbd256404857836e9550346ad45f112c4c205444c16b4337c0e2524acd5 |
import numpy as np
from traits.api import HasTraits, Instance, Button, \
on_trait_change
from traitsui.api import View, Item, HSplit, Group
from mayavi import mlab
from mayavi.core.ui.api import MlabSceneModel, SceneEditor
class MyDialog(HasTraits):
scene1 = Instance(MlabSceneModel, ())
scene2 = Instance(MlabSceneModel, ())
button1 = Button('Redraw')
button2 = Button('Redraw')
@on_trait_change('button1')
def redraw_scene1(self):
self.redraw_scene(self.scene1)
@on_trait_change('button2')
def redraw_scene2(self):
self.redraw_scene(self.scene2)
def redraw_scene(self, scene):
# Notice how each mlab call points explicitely to the figure it
# applies to.
mlab.clf(figure=scene.mayavi_scene)
x, y, z, s = np.random.random((4, 100))
mlab.points3d(x, y, z, s, figure=scene.mayavi_scene)
# The layout of the dialog created
view = View(HSplit(
Group(
Item('scene1',
editor=SceneEditor(), height=250,
width=300),
'button1',
show_labels=False,
),
Group(
Item('scene2',
editor=SceneEditor(), height=250,
width=300, show_label=False),
'button2',
show_labels=False,
),
),
resizable=True,
)
m = MyDialog()
m.configure_traits() | HPCGISLab/STDataViz | WorkingVersion/LibTry/MayaviGUI/mlabMultiScene.py | Python | bsd-3-clause | 1,603 | [
"Mayavi"
] | 7a5f7ec42a74654abac515d9376116ad75240bc4633b65383c5ea86dc6026438 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Bulkloader Transform Helper functions.
A collection of helper functions for bulkloading data, typically referenced
from a bulkloader.yaml file.
"""
import base64
import datetime
import os
import re
import tempfile
from google.appengine.api import datastore
from google.appengine.api import datastore_types
from google.appengine.ext.bulkload import bulkloader_errors
CURRENT_PROPERTY = None
KEY_TYPE_NAME = 'name'
KEY_TYPE_ID = 'ID'
# Decorators
def none_if_empty(fn):
"""A wrapper for a value to return None if it's empty. Useful on import.
Can be used in config files (e.g. "transform.none_if_empty(int)" or
as a decorator.
Args:
fn: Single argument transform function.
Returns:
Wrapped function.
"""
def wrapper(value):
if value == '' or value is None:
return None
return fn(value)
return wrapper
def empty_if_none(fn):
"""A wrapper for a value to return '' if it's None. Useful on export.
Can be used in config files (e.g. "transform.empty_if_none(unicode)" or
as a decorator.
Args:
fn: Single argument transform function.
Returns:
Wrapped function.
"""
def wrapper(value):
if value is None:
return ''
return fn(value)
return wrapper
# Key helpers.
def create_foreign_key(kind, key_is_id=False):
"""A method to make one-level Key objects.
These are typically used in ReferenceProperty in Python, where the reference
value is a key with kind (or model) name name.
This helper method does not support keys with parents. Use create_deep_key
instead to create keys with parents.
Args:
kind: The kind name of the reference as a string.
key_is_id: If true, convert the key into an integer to be used as an id.
If false, leave the key in the input format (typically a string).
Returns:
Single argument method which parses a value into a Key of kind entity_kind.
"""
def generate_foreign_key_lambda(value):
if key_is_id:
value = int(value)
return datastore.Key.from_path(kind, value)
return generate_foreign_key_lambda
def create_deep_key(*path_info):
"""A method to make multi-level Key objects.
Generates multi-level key from multiple fields in the input dictionary.
This is typically used for Keys for entities which have variable parent keys,
e.g. ones with owned relationships. It can used for both __key__ and
references.
Use create_foreign_key as a simpler way to create single level keys.
Args:
path_info: List of tuples, describing (kind, property, is_id=False).
kind: The kind name.
property: The external property in the current import dictionary, or
transform.CURRENT_PROPERTY for the value passed to the transform.
is_id: Converts value to int and treats as numeric ID if True, otherwise
the value is a string name. Default is False.
Example:
create_deep_key(('rootkind', 'rootcolumn'),
('childkind', 'childcolumn', True),
('leafkind', transform.CURRENT_PROPERTY))
Returns:
Transform method which parses the info from the current neutral dictionary
into a Key with parents as described by path_info.
"""
validated_path_info = []
for level_info in path_info:
if len(level_info) == 3:
key_is_id = level_info[2]
elif len(level_info) == 2:
key_is_id = False
else:
raise bulkloader_errors.InvalidConfiguration(
'Each list in create_deep_key must specify exactly 2 or 3 '
'parameters, (kind, property, is_id=False). You specified: %s' %
repr(path_info))
kind_name = level_info[0]
property_name = level_info[1]
validated_path_info.append((kind_name, property_name, key_is_id))
def create_deep_key_lambda(value, bulkload_state):
path = []
for kind_name, property_name, key_is_id in validated_path_info:
if property_name is CURRENT_PROPERTY:
name_or_id = value
else:
name_or_id = bulkload_state.current_dictionary[property_name]
if key_is_id:
name_or_id = int(name_or_id)
path += [kind_name, name_or_id]
return datastore.Key.from_path(*path)
return create_deep_key_lambda
def _key_id_or_name_n(key, index):
"""Internal helper function for key id and name transforms.
Args:
key: A datastore key.
index: The depth in the key to return; 0 is root, -1 is leaf.
Returns:
The id or name of the nth deep sub key in key.
"""
if not key:
return None
path = key.to_path()
if not path:
return None
path_index = (index * 2) + 1
return path[path_index]
def key_id_or_name_as_string_n(index):
"""Pull out the nth (0-based) key id or name from a key which has parents.
If a key is present, return its id or name as a string.
Note that this loses the distinction between integer IDs and strings
which happen to look like integers. Use key_type to distinguish them.
This is a useful complement to create_deep_key.
Args:
index: The depth of the id or name to extract. Zero is the root key.
Negative one is the leaf key.
Returns:
Function extracting the name or ID of the key at depth index, as a unicode
string. Returns '' if key is empty (unsaved), otherwise raises IndexError
if the key is not as deep as described.
"""
def transform_function(key):
id_or_name = _key_id_or_name_n(key, index)
if not id_or_name:
return u''
return unicode(id_or_name)
return transform_function
# # Commonly used helper which returns the value of the leaf key.
key_id_or_name_as_string = key_id_or_name_as_string_n(-1)
def key_type_n(index):
"""Pull out the nth (0-based) key type from a key which has parents.
This is most useful when paired with key_id_or_name_as_string_n.
This is a useful complement to create_deep_key.
Args:
index: The depth of the id or name to extract. Zero is the root key.
Negative one is the leaf key.
Returns:
Method returning the type ('ID' or 'name') of the key at depth index.
Returns '' if key is empty (unsaved), otherwise raises IndexError
if the key is not as deep as described.
"""
def transform_function(key):
id_or_name = _key_id_or_name_n(key, index)
if id_or_name is None:
return ''
if isinstance(id_or_name, basestring):
return KEY_TYPE_NAME
return KEY_TYPE_ID
return transform_function
# # Commonly used helper which returns the type of the leaf key.
key_type = key_type_n(-1)
def key_kind_n(index):
"""Pull out the nth (0-based) key kind from a key which has parents.
This is a useful complement to create_deep_key.
Args:
index: The depth of the id or name to extract. Zero is the root key.
Negative one is the leaf key.
Returns:
Function returning the kind of the key at depth index, or raising
IndexError if the key is not as deep as described.
"""
@empty_if_none
def transform_function(key):
path = key.to_path()
path_index = (index * 2)
return unicode(path[path_index])
return transform_function
# Commonly used helper which returns the kind of the leaf key.
key_kind = key_kind_n(-1)
# Blob and ByteString helpers.
@none_if_empty
def blobproperty_from_base64(value):
"""Return a datastore blob property containing the base64 decoded value."""
decoded_value = base64.b64decode(value)
return datastore_types.Blob(decoded_value)
@none_if_empty
def bytestring_from_base64(value):
"""Return a datastore bytestring property from a base64 encoded value."""
decoded_value = base64.b64decode(value)
return datastore_types.ByteString(decoded_value)
def blob_to_file(filename_hint_propertyname=None,
directory_hint=''):
"""Write the blob contents to a file, and replace them with the filename.
Args:
filename_hint_propertyname: If present, the filename will begin with
the contents of this value in the entity being exported.
directory_hint: If present, the files will be stored in this directory.
Returns:
A function which writes the input blob to a file.
"""
directory = []
def transform_function(value, bulkload_state):
if not directory:
parent_dir = os.path.dirname(bulkload_state.filename)
directory.append(os.path.join(parent_dir, directory_hint))
if directory[0] and not os.path.exists(directory[0]):
os.makedirs(directory[0])
filename_hint = 'blob_'
suffix = ''
filename = ''
if filename_hint_propertyname:
filename_hint = bulkload_state.current_entity[filename_hint_propertyname]
filename = os.path.join(directory[0], filename_hint)
if os.path.exists(filename):
filename = ''
(filename_hint, suffix) = os.path.splitext(filename_hint)
if not filename:
filename = tempfile.mktemp(suffix, filename_hint, directory[0])
f = open(filename, 'wb')
f.write(value)
f.close()
return filename
return transform_function
# Formatted string helpers: Extract, convert to boolean, date, or list.
def import_date_time(format, _strptime=None):
"""A wrapper around strptime. Also returns None if the input is empty.
Args:
format: Format string for strptime.
Returns:
Single argument method which parses a string into a datetime using format.
"""
if not _strptime:
_strptime = datetime.datetime.strptime
def import_date_time_lambda(value):
if not value:
return None
return _strptime(value, format)
return import_date_time_lambda
def export_date_time(format):
"""A wrapper around strftime. Also returns '' if the input is None.
Args:
format: Format string for strftime.
Returns:
Single argument method which convers a datetime into a string using format.
"""
def export_date_time_lambda(value):
if not value:
return ''
return datetime.datetime.strftime(value, format)
return export_date_time_lambda
def regexp_extract(pattern):
"""Return first group in the value matching the pattern.
Args:
pattern: A regular expression to match on with at least one group.
Returns:
A single argument method which returns the first group matched,
or None if no match or no group was found.
"""
def regexp_extract_lambda(value):
if not value:
return None
matches = re.match(pattern, value)
if not matches:
return None
return matches.group(1)
return regexp_extract_lambda
def regexp_bool(regexp, flags=0):
"""Return a boolean if the expression matches with re.match.
Note that re.match anchors at the start but not end of the string.
Args:
regexp: String, regular expression.
flags: Optional flags to pass to re.match.
Returns:
Method which returns a Boolean if the expression matches.
"""
def transform_function(value):
return bool(re.match(regexp, value, flags))
return transform_function
def split_string(delimeter):
"""Split a string using the delimeter into a list.
This is just a wrapper for string.split.
Args:
delimeter: The delimiter to split the string on.
Returns:
Method which splits the string into a list along the delimeter.
"""
def split_string_lambda(value):
return value.split(delimeter)
return split_string_lambda
def join_list(delimeter):
"""Join a list into a string using the delimeter.
This is just a wrapper for string.join.
Args:
delimeter: The delimiter to use when joining the string.
Returns:
Method which joins the list into a string with the delimeter.
"""
def join_string_lambda(value):
return delimeter.join(value)
return join_string_lambda
def list_from_multiproperty(*external_names):
"""Create a list from multiple properties.
Args:
external_names: List of the properties to use.
Returns:
Transform function which returns a list of the properties in external_names.
"""
def list_from_multiproperty_lambda(unused_value, bulkload_state):
result = []
for external_name in external_names:
value = bulkload_state.current_dictionary.get(external_name)
if value:
result.append(value)
return result
return list_from_multiproperty_lambda
def property_from_list(index):
"""Return the Nth item from a list, or '' if the list is shorter.
Args:
index: Item in the list to return.
Returns:
Function returning the item from a list, or '' if the list is too short.
"""
@empty_if_none
def property_from_list_lambda(values):
if len(values) > index:
return values[index]
return ''
return property_from_list_lambda
# SimpleXML list Helpers
def list_from_child_node(xpath, suppress_blank=False):
"""Return a list property from child nodes of the current xml node.
This applies only the simplexml helper, as it assumes __node__, the current
ElementTree node corresponding to the import record.
Sample usage for structure:
<Visit>
<VisitActivities>
<Activity>A1</Activity>
<Activity>A2</Activity>
</VisitActivities>
</Visit>
property: activities
external_name: VisitActivities # Ignored on import, used on export.
import_transform: list_from_xml_node('VisitActivities/Activity')
export_transform: child_node_from_list('Activity')
Args:
xpath: XPath to run on the current node.
suppress_blank: if True, ndoes with no text will be skipped.
Returns:
Transform function which works as described in the args.
"""
def list_from_child_node_lambda(unused_value, bulkload_state):
result = []
for node in bulkload_state.current_dictionary['__node__'].findall(xpath):
if node.text:
result.append(node.text)
elif not suppress_blank:
result.append('')
return result
return list_from_child_node_lambda
def child_node_from_list(child_node_name):
"""Return a value suitable for generating an XML child node on export.
The return value is a list of tuples which the simplexml connector will
use to build a child node.
See also list_from_child_node
Args:
child_node_name: The name to use for each child node.
Returns:
Transform function which works as described in the args.
"""
def child_node_from_list_lambda(values):
return [(child_node_name, value) for value in values]
return child_node_from_list_lambda
| octavioturra/aritial | google_appengine/google/appengine/ext/bulkload/transform.py | Python | apache-2.0 | 14,943 | [
"VisIt"
] | ead9098c59b0bfb436163e231e3feb2b388c71004ba9dbbfb2aef6a6c2ccfe8b |
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
import sys
# Add path to Bio
sys.path.append('../../..')
"""Perform two-point crossovers between the genomes of two organisms.
This module performs single-point crossover between two genomes.
SinglePointCrossover:
genome 1 -- A B C*D E F
genome 2 -- a b c*d e f
new genome 1 -- A B C d e f
new genome 2 -- a b c D E F
"""
# standard modules
from Bio.GA.Crossover.GeneralPoint import TwoCrossover
class SinglePointCrossover(TwoCrossover):
"""Perform point crossover between genomes at some defined rate.
This performs a crossover between two genomes at some defined
frequency. Length of genome is preserved, as the crossover
point is the same for either genome.
"""
def __init__(self, crossover_prob=.1):
"""Initialize to do crossovers at the specified probability.
"""
TwoCrossover.__init__(self, 1, crossover_prob)
| Ambuj-UF/ConCat-1.0 | src/Utils/Bio/GA/Crossover/Point.py | Python | gpl-2.0 | 1,060 | [
"Biopython"
] | 1ad64c7da8f2565bd0f6661a8e7c4e7a5348ea7116728c4c155616c38185a5a8 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from photologue.views import PhotoListView, GalleryListView, GalleryDetailView
from forocacao.app.views import HomeView
urlpatterns = [
# django smart selects
url(r'^chaining/', include('smart_selects.urls')),
#url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
#url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("forocacao.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'', include("forocacao.app.urls", namespace="app")),
url(r'^photologue/', include('photologue.urls', namespace='photologue')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
| javierwilson/forocacao | config/urls.py | Python | bsd-3-clause | 1,579 | [
"VisIt"
] | 31e692104d8de59370c6f8c2d93f3544654632c3d83f83e14448748e6e5a7f90 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import time
import threading
import urllib.parse
import re
import datetime
from bs4 import BeautifulSoup
import pymongo.errors
from . import errors
from . import common
from . import engine
# Technically I've read that many operations are thread-safe on Python's
# list implementation, so this may not be necessary, but I think I'd rather
# err on the side of caution at least for now
class SharedList(object):
def __init__(self, lst):
self.mutex = threading.Lock()
self.lst = lst
return
def __contains__(self, val):
return val in self.lst
def __iter__(self):
return self.lst.__iter__()
def pop(self):
self.mutex.acquire()
try:
val = self.lst.pop()
self.mutex.release()
return val
except:
if self.mutex.locked():
self.mutex.release()
return None
def append(self, val):
self.mutex.acquire()
try:
self.lst.append(val)
self.mutex.release()
return True
except:
if self.mutex.locked():
self.mutex.release()
return None
def __len__(self):
return len(self.lst)
def extend(self, lst):
self.mutex.acquire()
try:
self.lst.extend(lst)
self.mutex.release()
return True
except:
if self.mutex.locked():
self.mutex.release()
return True
class EngineWrapper(threading.Thread):
def __init__(self, parent, group = None, name = None,
args = (), kwargs = None):
super(EngineWrapper, self).__init__(group = group, name = name,
args = args, kwargs = kwargs)
self.parent = parent
self.eng = parent.eng.clone()
self.to_visit = parent.to_visit
self.stop = parent.stop
self.delay = parent.delay
def run(self):
while self.to_visit or not self.stop.is_set():
# There are more sites to visit
if self.to_visit:
url = self.to_visit.pop()
site = self.eng.get_page_source(url)
if url and site:
try:
self.parent.notify(site)
# give the dbs a sec to catch up
except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError):
time.sleep(self.delay)
# The parent needs more time to generate more sites.
# Wait the set delay
else:
time.sleep(self.delay)
return
class SearchCrawler(threading.Thread):
def __init__(self, kwds = [], dbhandler = None, eng = engine.DefaultEngine(),
max_threads = 10, delay = 1, group = None, name = None,
args = (), kwargs = None):
super(SearchCrawler, self).__init__(group = group, name = name,
args = args, kwargs = kwargs)
self.max_threads = max_threads
self.eng = eng
self.dbhandler = dbhandler
self.stop = threading.Event()
self.to_visit = SharedList([])
self.delay = delay
self.kwds = kwds
self.children = []
return
def next_page(self, soup):
raise MasterError("next_page has not been implemented for this class")
def get_listings(self, soup):
raise MasterError("get_listings has not been implemented for this class")
def notify(self, message):
if isinstance(message, common.Website):
threading.Thread(target=self.dbhandler.dump(message))
return True
else:
return False
def start_threads(self):
for x in range(0, self.max_threads):
t = EngineWrapper(self)
self.children.append(t)
t.start()
def run(self):
raise MasterError("get_listings has not been implemented for this class")
class BackpageCrawler(SearchCrawler):
def __init__(self, site, kwds = [], dbhandler = None, area = "atlanta",
eng = engine.DefaultEngine(), max_threads = 10, delay = 1):
self.baseurl = "".join(["http://", area, ".backpage.com/", site, "/"])
if kwds:
keywords = " ".join(kwds)
self.url = "?".join([self.baseurl, keywords])
else:
self.url = self.baseurl
super(BackpageCrawler, self).__init__(kwds, dbhandler, eng, max_threads, delay)
def next_page(self, soup):
links = soup.find_all("a", href=True)
for link in links:
innerHTML = link.decode_contents(formatter = "html")
if innerHTML == "Next":
return link["href"]
return None
def get_listings(self, soup):
links = soup.find_all("a", href=True)
valid = []
for link in links:
# remove some non-ad links
if link.has_attr("class"):
continue
href = str(urllib.parse.urljoin(self.baseurl, link["href"]))
# remove urls that are not on the same site
if not re.search(self.baseurl, href):
continue
try:
b_isindb = self.dbhandler.find_by_id(href)
if not href in self.to_visit and not b_isindb:
valid.append(href)
if len(valid) > 100:
self.to_visit.extend(valid)
valid.clear()
except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError):
# try again
self.get_listings(soup)
self.to_visit.extend(valid)
return
def run(self):
self.start_threads()
time.sleep(self.delay)
url = self.url
while url:
site = self.eng.get_page_source(url)
if site:
soup = BeautifulSoup(site.source, "lxml")
self.get_listings(soup)
url = self.next_page(soup)
else:
url = None
self.stop.set()
for t in self.children:
t.join()
| geezhawk/palantiri | src/core/crawler.py | Python | mpl-2.0 | 6,339 | [
"VisIt"
] | 5188c1aa2292a9acb6cbc501b28667980bc56ebed9fac21f9ad7d39622d017e8 |
""" ProxyRepository class is a front-end to the proxy repository Database
"""
__RCSID__ = "$Id$"
import time
import random
import types
import hashlib
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Security.X509Request import X509Request
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.Core.Security.MyProxy import MyProxy
from DIRAC.Core.Security.VOMS import VOMS
from DIRAC.Core.Security import Properties
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
class ProxyDB( DB ):
NOTIFICATION_TIMES = [ 2592000, 1296000 ]
def __init__( self,
useMyProxy = False ):
DB.__init__( self, 'ProxyDB', 'Framework/ProxyDB' )
random.seed()
self.__defaultRequestLifetime = 300 # 5min
self.__defaultTokenLifetime = 86400 * 7 # 1 week
self.__defaultTokenMaxUses = 50
self.__useMyProxy = useMyProxy
self._minSecsToAllowStore = 3600
self.__notifClient = NotificationClient()
retVal = self.__initializeDB()
if not retVal[ 'OK' ]:
raise Exception( "Can't create tables: %s" % retVal[ 'Message' ] )
self.purgeExpiredProxies( sendNotifications = False )
self.__checkDBVersion()
def getMyProxyServer( self ):
return gConfig.getValue( "/DIRAC/VOPolicy/MyProxyServer" , "myproxy.cern.ch" )
def getMyProxyMaxLifeTime( self ):
return gConfig.getValue( "/DIRAC/VOPolicy/MyProxyMaxDelegationTime", 168 ) * 3600
def __initializeDB( self ):
"""
Create the tables
"""
retVal = self._query( "show tables" )
if not retVal[ 'OK' ]:
return retVal
tablesInDB = [ t[0] for t in retVal[ 'Value' ] ]
tablesD = {}
if 'ProxyDB_Requests' not in tablesInDB:
tablesD[ 'ProxyDB_Requests' ] = { 'Fields' : { 'Id' : 'INTEGER AUTO_INCREMENT NOT NULL',
'UserDN' : 'VARCHAR(255) NOT NULL',
'Pem' : 'BLOB',
'ExpirationTime' : 'DATETIME'
},
'PrimaryKey' : 'Id'
}
if 'ProxyDB_Proxies' not in tablesInDB:
tablesD[ 'ProxyDB_Proxies' ] = { 'Fields' : { 'UserName' : 'VARCHAR(64) NOT NULL',
'UserDN' : 'VARCHAR(255) NOT NULL',
'UserGroup' : 'VARCHAR(255) NOT NULL',
'Pem' : 'BLOB',
'ExpirationTime' : 'DATETIME',
'PersistentFlag' : 'ENUM ("True","False") NOT NULL DEFAULT "True"',
},
'PrimaryKey' : [ 'UserDN', 'UserGroup' ]
}
if 'ProxyDB_VOMSProxies' not in tablesInDB:
tablesD[ 'ProxyDB_VOMSProxies' ] = { 'Fields' : { 'UserName' : 'VARCHAR(64) NOT NULL',
'UserDN' : 'VARCHAR(255) NOT NULL',
'UserGroup' : 'VARCHAR(255) NOT NULL',
'VOMSAttr' : 'VARCHAR(255) NOT NULL',
'Pem' : 'BLOB',
'ExpirationTime' : 'DATETIME',
},
'PrimaryKey' : [ 'UserDN', 'UserGroup', 'vomsAttr' ]
}
if 'ProxyDB_Log' not in tablesInDB:
tablesD[ 'ProxyDB_Log' ] = { 'Fields' : { 'ID': 'BIGINT NOT NULL AUTO_INCREMENT',
'IssuerDN' : 'VARCHAR(255) NOT NULL',
'IssuerGroup' : 'VARCHAR(255) NOT NULL',
'TargetDN' : 'VARCHAR(255) NOT NULL',
'TargetGroup' : 'VARCHAR(255) NOT NULL',
'Action' : 'VARCHAR(128) NOT NULL',
'Timestamp' : 'DATETIME',
},
'PrimaryKey': 'ID',
'Indexes' : { 'Timestamp' : [ 'Timestamp' ]}
}
if 'ProxyDB_Tokens' not in tablesInDB:
tablesD[ 'ProxyDB_Tokens' ] = { 'Fields' : { 'Token' : 'VARCHAR(64) NOT NULL',
'RequesterDN' : 'VARCHAR(255) NOT NULL',
'RequesterGroup' : 'VARCHAR(255) NOT NULL',
'ExpirationTime' : 'DATETIME NOT NULL',
'UsesLeft' : 'SMALLINT UNSIGNED DEFAULT 1',
},
'PrimaryKey' : 'Token'
}
if 'ProxyDB_ExpNotifs' not in tablesInDB:
tablesD[ 'ProxyDB_ExpNotifs' ] = { 'Fields' : { 'UserDN' : 'VARCHAR(255) NOT NULL',
'UserGroup' : 'VARCHAR(255) NOT NULL',
'LifeLimit' : 'INTEGER UNSIGNED DEFAULT 0',
'ExpirationTime' : 'DATETIME NOT NULL',
},
'PrimaryKey' : [ 'UserDN', 'UserGroup' ]
}
return self._createTables( tablesD )
def __addUserNameToTable( self, tableName ):
result = self._update( "ALTER TABLE `%s` ADD COLUMN UserName VARCHAR(64) NOT NULL" % tableName )
if not result[ 'OK' ]:
return result
result = self._query( "SELECT DISTINCT UserName, UserDN FROM `%s`" % tableName )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
for userName, userDN in data:
if not userName:
result = Registry.getUsernameForDN( userDN )
if not result[ 'OK' ]:
self.log.error( "Could not retrieve username for DN", userDN )
continue
userName = result[ 'Value' ]
try:
userName = self._escapeString( userName )[ 'Value' ]
userDN = self._escapeString( userDN )[ 'Value' ]
except KeyError:
self.log.error( "Could not escape username or DN", "%s %s" % ( userName, userDN ) )
continue
userName = result[ 'Value' ]
result = self._update( "UPDATE `%s` SET UserName=%s WHERE UserDN=%s" % ( tableName, userName, userDN ) )
if not result[ 'OK' ]:
self.log.error( "Could update username for DN", "%s: %s" % ( userDN, result[ 'Message' ] ) )
continue
self.log.info( "UserDN %s has user %s" % ( userDN, userName ) )
return S_OK()
def __checkDBVersion( self ):
for tableName in ( "ProxyDB_Proxies", "ProxyDB_VOMSProxies" ):
result = self._query( "describe `%s`" % tableName )
if not result[ 'OK' ]:
return result
if 'UserName' not in [ row[0] for row in result[ 'Value' ] ]:
self.log.notice( "Username missing in table %s schema. Adding it" % tableName )
result = self.__addUserNameToTable( tableName )
if not result[ 'OK' ]:
return result
def generateDelegationRequest( self, proxyChain, userDN ):
"""
Generate a request and store it for a given proxy Chain
"""
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return retVal
connObj = retVal[ 'Value' ]
retVal = proxyChain.generateProxyRequest()
if not retVal[ 'OK' ]:
return retVal
request = retVal[ 'Value' ]
retVal = request.dumpRequest()
if not retVal[ 'OK' ]:
return retVal
reqStr = retVal[ 'Value' ]
retVal = request.dumpPKey()
if not retVal[ 'OK' ]:
return retVal
allStr = reqStr + retVal[ 'Value' ]
try:
sUserDN = self._escapeString( userDN )[ 'Value' ]
sAllStr = self._escapeString( allStr )[ 'Value' ]
except KeyError:
return S_ERROR( "Cannot escape DN" )
cmd = "INSERT INTO `ProxyDB_Requests` ( Id, UserDN, Pem, ExpirationTime )"
cmd += " VALUES ( 0, %s, %s, TIMESTAMPADD( SECOND, %d, UTC_TIMESTAMP() ) )" % ( sUserDN,
sAllStr,
int( self.__defaultRequestLifetime ) )
retVal = self._update( cmd, conn = connObj )
if not retVal[ 'OK' ]:
return retVal
#99% of the times we will stop here
if 'lastRowId' in retVal:
return S_OK( { 'id' : retVal['lastRowId'], 'request' : reqStr } )
#If the lastRowId hack does not work. Get it by hand
retVal = self._query( "SELECT Id FROM `ProxyDB_Requests` WHERE Pem='%s'" % reqStr )
if not retVal[ 'OK' ]:
return retVal
data = retVal[ 'Value' ]
if len( data ) == 0:
return S_ERROR( "Insertion of the request in the db didn't work as expected" )
retVal = proxyChain.getDIRACGroup()
if retVal[ 'OK' ] and retVal[ 'Value' ]:
userGroup = retVal[ 'Value' ]
else:
userGroup = "unset"
self.logAction( "request upload", userDN, userGroup, userDN, "any" )
#Here we go!
return S_OK( { 'id' : data[0][0], 'request' : reqStr } )
def retrieveDelegationRequest( self, requestId, userDN ):
"""
Retrieve a request from the DB
"""
try:
sUserDN = self._escapeString( userDN )[ 'Value' ]
except KeyError:
return S_ERROR( "Cannot escape DN" )
cmd = "SELECT Pem FROM `ProxyDB_Requests` WHERE Id = %s AND UserDN = %s" % ( requestId,
sUserDN )
retVal = self._query( cmd )
if not retVal[ 'OK' ]:
return retVal
data = retVal[ 'Value' ]
if len( data ) == 0:
return S_ERROR( "No requests with id %s" % requestId )
request = X509Request()
retVal = request.loadAllFromString( data[0][0] )
if not retVal[ 'OK' ]:
return retVal
return S_OK( request )
def purgeExpiredRequests( self ):
"""
Purge expired requests from the db
"""
cmd = "DELETE FROM `ProxyDB_Requests` WHERE ExpirationTime < UTC_TIMESTAMP()"
return self._update( cmd )
def deleteRequest( self, requestId ):
"""
Delete a request from the db
"""
cmd = "DELETE FROM `ProxyDB_Requests` WHERE Id=%s" % requestId
return self._update( cmd )
def completeDelegation( self, requestId, userDN, delegatedPem ):
"""
Complete a delegation and store it in the db
"""
retVal = self.retrieveDelegationRequest( requestId, userDN )
if not retVal[ 'OK' ]:
return retVal
request = retVal[ 'Value' ]
chain = X509Chain( keyObj = request.getPKey() )
retVal = chain.loadChainFromString( delegatedPem )
if not retVal[ 'OK' ]:
return retVal
retVal = chain.isValidProxy( ignoreDefault = True )
noGroupFlag = False
if not retVal[ 'OK' ]:
if retVal['Message'] == "Proxy does not have an explicit group":
noGroupFlag = True
else:
return retVal
result = chain.isVOMS()
if result[ 'OK' ] and result[ 'Value' ]:
return S_ERROR( "Proxies with VOMS extensions are not allowed to be uploaded" )
retVal = request.checkChain( chain )
if not retVal[ 'OK' ]:
return retVal
if not retVal[ 'Value' ]:
return S_ERROR( "Received chain does not match request: %s" % retVal[ 'Message' ] )
retVal = chain.getDIRACGroup()
if not retVal[ 'OK' ]:
return retVal
userGroup = retVal[ 'Value' ]
if not userGroup:
userGroup = Registry.getDefaultUserGroup()
retVal = Registry.getGroupsForDN( userDN )
if not retVal[ 'OK' ]:
return retVal
if not userGroup in retVal[ 'Value' ]:
return S_ERROR( "%s group is not valid for %s" % ( userGroup, userDN ) )
# For proxies without embedded DIRAC group only one default is allowed
# Cleaning all the proxies for this DN if any before uploading the new one.
if noGroupFlag:
retVal = self.deleteProxy( userDN )
if not retVal[ 'OK' ]:
return retVal
retVal = self.storeProxy( userDN, userGroup, chain )
if not retVal[ 'OK' ]:
return retVal
retVal = self.deleteRequest( requestId )
if not retVal[ 'OK' ]:
return retVal
return S_OK()
def storeProxy( self, userDN, userGroup, chain ):
""" Store user proxy into the Proxy repository for a user specified by his
DN and group.
"""
retVal = Registry.getUsernameForDN( userDN )
if not retVal[ 'OK' ]:
return retVal
userName = retVal[ 'Value' ]
#Get remaining secs
retVal = chain.getRemainingSecs()
if not retVal[ 'OK' ]:
return retVal
remainingSecs = retVal[ 'Value' ]
if remainingSecs < self._minSecsToAllowStore:
return S_ERROR( "Cannot store proxy, remaining secs %s is less than %s" % ( remainingSecs, self._minSecsToAllowStore ) )
#Compare the DNs
retVal = chain.getIssuerCert()
if not retVal[ 'OK' ]:
return retVal
proxyIdentityDN = retVal[ 'Value' ].getSubjectDN()[ 'Value' ]
if not userDN == proxyIdentityDN:
msg = "Mismatch in the user DN"
vMsg = "Proxy says %s and credentials are %s" % ( proxyIdentityDN, userDN )
self.log.error( msg, vMsg )
return S_ERROR( "%s. %s" % ( msg, vMsg ) )
#Check the groups
retVal = chain.getDIRACGroup()
if not retVal[ 'OK' ]:
return retVal
proxyGroup = retVal[ 'Value' ]
if not proxyGroup:
proxyGroup = Registry.getDefaultUserGroup()
if not userGroup == proxyGroup:
msg = "Mismatch in the user group"
vMsg = "Proxy says %s and credentials are %s" % ( proxyGroup, userGroup )
self.log.error( msg, vMsg )
return S_ERROR( "%s. %s" % ( msg, vMsg ) )
#Check if its limited
if chain.isLimitedProxy()['Value']:
return S_ERROR( "Limited proxies are not allowed to be stored" )
dLeft = remainingSecs / 86400
hLeft = remainingSecs / 3600 - dLeft * 24
mLeft = remainingSecs / 60 - hLeft * 60 - dLeft * 1440
sLeft = remainingSecs - hLeft * 3600 - mLeft * 60 - dLeft * 86400
self.log.info( "Storing proxy for credentials %s (%d:%02d:%02d:%02d left)" % ( proxyIdentityDN, dLeft, hLeft, mLeft, sLeft ) )
try:
sUserDN = self._escapeString( userDN )[ 'Value' ]
sUserGroup = self._escapeString( userGroup )[ 'Value' ]
except KeyError:
return S_ERROR( "Cannot escape DN" )
# Check what we have already got in the repository
cmd = "SELECT TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ), Pem FROM `ProxyDB_Proxies` WHERE UserDN=%s AND UserGroup=%s" % ( sUserDN, sUserGroup )
result = self._query( cmd )
if not result['OK']:
return result
# check if there is a previous ticket for the DN
data = result[ 'Value' ]
sqlInsert = True
if len( data ) > 0:
sqlInsert = False
pem = data[0][1]
if pem:
remainingSecsInDB = data[0][0]
if remainingSecs <= remainingSecsInDB:
self.log.info( "Proxy stored is longer than uploaded, omitting.", "%s in uploaded, %s in db" % ( remainingSecs, remainingSecsInDB ) )
return S_OK()
pemChain = chain.dumpAllToString()['Value']
dValues = { 'UserName' : self._escapeString( userName )[ 'Value' ],
'UserDN' : sUserDN,
'UserGroup' : sUserGroup,
'Pem' : self._escapeString( pemChain )[ 'Value' ],
'ExpirationTime' : 'TIMESTAMPADD( SECOND, %d, UTC_TIMESTAMP() )' % int( remainingSecs ),
'PersistentFlag' : "'False'" }
if sqlInsert:
sqlFields = []
sqlValues = []
for key in dValues:
sqlFields.append( key )
sqlValues.append( dValues[ key ] )
cmd = "INSERT INTO `ProxyDB_Proxies` ( %s ) VALUES ( %s )" % ( ", ".join( sqlFields ), ", ".join( sqlValues ) )
else:
sqlSet = []
sqlWhere = []
for k in dValues:
if k in ( 'UserDN', 'UserGroup' ):
sqlWhere.append( "%s = %s" % ( k, dValues[k] ) )
else:
sqlSet.append( "%s = %s" % ( k, dValues[k] ) )
cmd = "UPDATE `ProxyDB_Proxies` SET %s WHERE %s" % ( ", ".join( sqlSet ), " AND ".join( sqlWhere ) )
self.logAction( "store proxy", userDN, userGroup, userDN, userGroup )
return self._update( cmd )
def purgeExpiredProxies( self, sendNotifications = True ):
"""
Purge expired requests from the db
"""
purged = 0
for tableName in ( "ProxyDB_Proxies", "ProxyDB_VOMSProxies" ):
cmd = "DELETE FROM `%s` WHERE ExpirationTime < UTC_TIMESTAMP()" % tableName
result = self._update( cmd )
if not result[ 'OK' ]:
return result
purged += result[ 'Value' ]
self.log.info( "Purged %s expired proxies from %s" % ( result[ 'Value' ], tableName ) )
if sendNotifications:
result = self.sendExpirationNotifications()
if not result[ 'OK' ]:
return result
return S_OK( purged )
def deleteProxy( self, userDN, userGroup='any' ):
""" Remove proxy of the given user from the repository
"""
try:
userDN = self._escapeString( userDN )[ 'Value' ]
if userGroup != 'any':
userGroup = self._escapeString( userGroup )[ 'Value' ]
except KeyError:
return S_ERROR( "Invalid DN or group" )
req = "DELETE FROM `ProxyDB_Proxies` WHERE UserDN=%s" % userDN
if userGroup != 'any':
req += " AND UserGroup=%s" % userGroup
return self._update( req )
def __getPemAndTimeLeft( self, userDN, userGroup = False, vomsAttr = False ):
try:
sUserDN = self._escapeString( userDN )[ 'Value' ]
if userGroup:
sUserGroup = self._escapeString( userGroup )[ 'Value' ]
if vomsAttr:
sVomsAttr = self._escapeString( vomsAttr )[ 'Value' ]
except KeyError:
return S_ERROR( "Invalid DN or group" )
if not vomsAttr:
table = "`ProxyDB_Proxies`"
else:
table = "`ProxyDB_VOMSProxies`"
cmd = "SELECT Pem, TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ) from %s" % table
cmd += "WHERE UserDN=%s AND TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ) > 0" % ( sUserDN )
if userGroup:
cmd += " AND UserGroup=%s" % sUserGroup
if vomsAttr:
cmd += " AND VOMSAttr=%s" % sVomsAttr
retVal = self._query( cmd )
if not retVal['OK']:
return retVal
data = retVal[ 'Value' ]
for record in data:
if record[0]:
return S_OK( ( record[0], record[1] ) )
if userGroup:
userMask = "%s@%s" % ( userDN, userGroup )
else:
userMask = userDN
return S_ERROR( "%s has no proxy registered" % userMask )
def renewFromMyProxy( self, userDN, userGroup, lifeTime = False, chain = False ):
if not lifeTime:
lifeTime = 43200
if not self.__useMyProxy:
return S_ERROR( "myproxy is disabled" )
#Get the chain
if not chain:
retVal = self.__getPemAndTimeLeft( userDN, userGroup )
if not retVal[ 'OK' ]:
return retVal
pemData = retVal[ 'Value' ][0]
chain = X509Chain()
retVal = chain.loadProxyFromString( pemData )
if not retVal[ 'OK' ]:
return retVal
originChainLifeTime = chain.getRemainingSecs()[ 'Value' ]
maxMyProxyLifeTime = self.getMyProxyMaxLifeTime()
#If we have a chain that's 0.8 of max mplifetime don't ask to mp
if originChainLifeTime > maxMyProxyLifeTime * 0.8:
self.log.error( "Skipping myproxy download",
"user %s %s chain has %s secs and requested %s secs" % ( userDN,
userGroup,
originChainLifeTime,
maxMyProxyLifeTime ) )
return S_OK( chain )
lifeTime *= 1.3
if lifeTime > maxMyProxyLifeTime:
lifeTime = maxMyProxyLifeTime
self.log.error( "Renewing proxy from myproxy", "user %s %s for %s secs" % ( userDN, userGroup, lifeTime ) )
myProxy = MyProxy( server = self.getMyProxyServer() )
retVal = myProxy.getDelegatedProxy( chain, lifeTime )
if not retVal[ 'OK' ]:
return retVal
mpChain = retVal[ 'Value' ]
retVal = mpChain.getRemainingSecs()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't retrieve remaining secs from renewed proxy: %s" % retVal[ 'Message' ] )
mpChainSecsLeft = retVal['Value']
if mpChainSecsLeft < originChainLifeTime:
self.log.info( "Chain downloaded from myproxy has less lifetime than the one stored in the db",
"\n Downloaded from myproxy: %s secs\n Stored in DB: %s secs" % ( mpChainSecsLeft, originChainLifeTime ) )
return S_OK( chain )
retVal = mpChain.getDIRACGroup()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't retrieve DIRAC Group from renewed proxy: %s" % retVal[ 'Message' ] )
chainGroup = retVal['Value']
if chainGroup != userGroup:
return S_ERROR( "Mismatch between renewed proxy group and expected: %s vs %s" % ( userGroup, chainGroup ) )
retVal = self.storeProxy( userDN, userGroup, mpChain )
if not retVal[ 'OK' ]:
self.log.error( "Cannot store proxy after renewal", retVal[ 'Message' ] )
retVal = myProxy.getServiceDN()
if not retVal[ 'OK' ]:
hostDN = userDN
else:
hostDN = retVal[ 'Value' ]
self.logAction( "myproxy renewal", hostDN, "host", userDN, userGroup )
return S_OK( mpChain )
def getProxy( self, userDN, userGroup, requiredLifeTime = False ):
""" Get proxy string from the Proxy Repository for use with userDN
in the userGroup
"""
retVal = self.__getPemAndTimeLeft( userDN, userGroup )
if not retVal[ 'OK' ]:
return retVal
pemData = retVal[ 'Value' ][0]
timeLeft = retVal[ 'Value' ][1]
chain = X509Chain()
retVal = chain.loadProxyFromString( pemData )
if not retVal[ 'OK' ]:
return retVal
if requiredLifeTime:
if timeLeft < requiredLifeTime:
retVal = self.renewFromMyProxy( userDN, userGroup, lifeTime = requiredLifeTime, chain = chain )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't get a proxy for %s seconds: %s" % ( requiredLifeTime, retVal[ 'Message' ] ) )
chain = retVal[ 'Value' ]
#Proxy is invalid for some reason, let's delete it
if not chain.isValidProxy()['Value']:
self.deleteProxy( userDN, userGroup )
return S_ERROR( "%s@%s has no proxy registered" % ( userDN, userGroup ) )
return S_OK( ( chain, timeLeft ) )
def __getVOMSAttribute( self, userGroup, requiredVOMSAttribute = False ):
if requiredVOMSAttribute:
return S_OK( { 'attribute' : requiredVOMSAttribute, 'VOMSVO' : Registry.getVOMSVOForGroup( userGroup ) } )
csVOMSMapping = Registry.getVOMSAttributeForGroup( userGroup )
if not csVOMSMapping:
return S_ERROR( "No mapping defined for group %s in the CS" % userGroup )
return S_OK( { 'attribute' : csVOMSMapping, 'VOMSVO' : Registry.getVOMSVOForGroup( userGroup ) } )
def getVOMSProxy( self, userDN, userGroup, requiredLifeTime = False, requestedVOMSAttr = False ):
""" Get proxy string from the Proxy Repository for use with userDN
in the userGroup and VOMS attr
"""
retVal = self.__getVOMSAttribute( userGroup, requestedVOMSAttr )
if not retVal[ 'OK' ]:
return retVal
vomsAttr = retVal[ 'Value' ][ 'attribute' ]
vomsVO = retVal[ 'Value' ][ 'VOMSVO' ]
#Look in the cache
retVal = self.__getPemAndTimeLeft( userDN, userGroup, vomsAttr )
if retVal[ 'OK' ]:
pemData = retVal[ 'Value' ][0]
vomsTime = retVal[ 'Value' ][1]
chain = X509Chain()
retVal = chain.loadProxyFromString( pemData )
if retVal[ 'OK' ]:
retVal = chain.getRemainingSecs()
if retVal[ 'OK' ]:
remainingSecs = retVal[ 'Value' ]
if requiredLifeTime and requiredLifeTime <= vomsTime and requiredLifeTime <= remainingSecs:
return S_OK( ( chain, min( vomsTime, remainingSecs ) ) )
retVal = self.getProxy( userDN, userGroup, requiredLifeTime )
if not retVal[ 'OK' ]:
return retVal
chain, secsLeft = retVal[ 'Value' ]
if requiredLifeTime and requiredLifeTime > secsLeft:
return S_ERROR( "Stored proxy is not long lived enough" )
vomsMgr = VOMS()
retVal = vomsMgr.getVOMSAttributes( chain )
if retVal[ 'OK' ]:
attrs = retVal[ 'Value' ]
if len( attrs ) > 0:
if attrs[0] != vomsAttr:
return S_ERROR( "Stored proxy has already a different VOMS attribute %s than requested %s" % ( vomsAttr, attrs[0] ) )
else:
result = self.__storeVOMSProxy( userDN, userGroup, vomsAttr, chain )
if not result[ 'OK' ]:
return result
secsLeft = result[ 'Value' ]
if requiredLifeTime and requiredLifeTime <= secsLeft:
return S_OK( ( chain, secsLeft ) )
return S_ERROR( "Stored proxy has already a different VOMS attribute and is not long lived enough" )
retVal = vomsMgr.setVOMSAttributes( chain , vomsAttr, vo = vomsVO )
if not retVal[ 'OK' ]:
return S_ERROR( "Cannot append voms extension: %s" % retVal[ 'Message' ] )
chain = retVal[ 'Value' ]
result = self.__storeVOMSProxy( userDN, userGroup, vomsAttr, chain )
if not result[ 'OK' ]:
return result
secsLeft = result[ 'Value' ]
return S_OK( ( chain, secsLeft ) )
def __storeVOMSProxy( self, userDN, userGroup, vomsAttr, chain ):
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return retVal
connObj = retVal[ 'Value' ]
retVal1 = VOMS().getVOMSProxyInfo( chain, 'actimeleft' )
retVal2 = VOMS().getVOMSProxyInfo( chain, 'timeleft' )
if not retVal1[ 'OK' ]:
return retVal1
if not retVal2[ 'OK' ]:
return retVal2
try:
vomsSecsLeft1 = int( retVal1[ 'Value' ].strip() )
vomsSecsLeft2 = int( retVal2[ 'Value' ].strip() )
vomsSecsLeft = min( vomsSecsLeft1, vomsSecsLeft2 )
except Exception, e:
return S_ERROR( "Can't parse VOMS time left: %s" % str( e ) )
secsLeft = min( vomsSecsLeft, chain.getRemainingSecs()[ 'Value' ] )
pemData = chain.dumpAllToString()[ 'Value' ]
result = Registry.getUsernameForDN( userDN )
if not result[ 'OK' ]:
userName = ""
else:
userName = result[ 'Value' ]
try:
sUserName = self._escapeString( userName )[ 'Value' ]
sUserDN = self._escapeString( userDN )[ 'Value' ]
sUserGroup = self._escapeString( userGroup )[ 'Value' ]
sVomsAttr = self._escapeString( vomsAttr )[ 'Value' ]
sPemData = self._escapeString( pemData )[ 'Value' ]
except KeyError:
return S_ERROR( "Could not escape some data" )
cmd = "REPLACE INTO `ProxyDB_VOMSProxies` ( UserName, UserDN, UserGroup, VOMSAttr, Pem, ExpirationTime ) VALUES "
cmd += "( %s, %s, %s, %s, %s, TIMESTAMPADD( SECOND, %d, UTC_TIMESTAMP() ) )" % ( sUserName, sUserDN, sUserGroup,
sVomsAttr, sPemData, secsLeft )
result = self._update( cmd, conn = connObj )
if not result[ 'OK' ]:
return result
return S_OK( secsLeft )
def getRemainingTime( self, userDN, userGroup ):
"""
Returns the remaining time the proxy is valid
"""
try:
userDN = self._escapeString( userDN )[ 'Value' ]
userGroup = self._escapeString( userGroup )[ 'Value' ]
except KeyError:
return S_ERROR( "Invalid DN or group" )
cmd = "SELECT TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ) FROM `ProxyDB_Proxies`"
retVal = self._query( "%s WHERE UserDN = %s AND UserGroup = %s" % ( cmd, userDN, userGroup ) )
if not retVal[ 'OK' ]:
return retVal
data = retVal[ 'Value' ]
if not data:
return S_OK( 0 )
return S_OK( int( data[0][0] ) )
def getUsers( self, validSecondsLeft = 0, dnMask = False, groupMask = False, userMask = False ):
""" Get all the distinct users from the Proxy Repository. Optionally, only users
with valid proxies within the given validity period expressed in seconds
"""
cmd = "SELECT UserName, UserDN, UserGroup, ExpirationTime, PersistentFlag FROM `ProxyDB_Proxies`"
sqlCond = []
if validSecondsLeft:
try:
validSecondsLeft = int( validSecondsLeft )
except ValueError:
return S_ERROR( "Seconds left has to be an integer" )
sqlCond.append( "TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ) > %d" % validSecondsLeft )
for field, mask in ( ( 'UserDN', dnMask ), ( 'UserGroup', groupMask ), ( 'UserName', userMask ) ):
if not mask:
continue
if type( mask ) not in ( types.ListType, types.TupleType ):
mask = [ mask ]
mask = [ self._escapeString( entry )[ 'Value' ] for entry in mask ]
sqlCond.append( "%s in ( %s )" % ( field, ", ".join( mask ) ) )
if sqlCond:
cmd += " WHERE %s" % " AND ".join( sqlCond )
retVal = self._query( cmd )
if not retVal[ 'OK' ]:
return retVal
data = []
for record in retVal[ 'Value' ]:
data.append( { 'Name': record[0],
'DN' : record[1],
'group' : record[2],
'expirationtime' : record[3],
'persistent' : record[4] == 'True' } )
return S_OK( data )
def getCredentialsAboutToExpire( self, requiredSecondsLeft, onlyPersistent = True ):
cmd = "SELECT UserDN, UserGroup, ExpirationTime, PersistentFlag FROM `ProxyDB_Proxies`"
cmd += " WHERE TIMESTAMPDIFF( SECOND, ExpirationTime, UTC_TIMESTAMP() ) < %d and TIMESTAMPDIFF( SECOND, ExpirationTime, UTC_TIMESTAMP() ) > 0" % requiredSecondsLeft
if onlyPersistent:
cmd += " AND PersistentFlag = 'True'"
return self._query( cmd )
def setPersistencyFlag( self, userDN, userGroup, persistent = True ):
""" Set the proxy PersistentFlag to the flag value
"""
try:
sUserDN = self._escapeString( userDN )[ 'Value' ]
sUserGroup = self._escapeString( userGroup )[ 'Value' ]
except KeyError:
return S_ERROR( "Can't escape something" )
if persistent:
sqlFlag = "True"
else:
sqlFlag = "False"
retVal = self._query( "SELECT PersistentFlag FROM `ProxyDB_Proxies` WHERE UserDN=%s AND UserGroup=%s" % ( sUserDN, sUserGroup ) )
sqlInsert = True
if retVal[ 'OK' ]:
data = retVal[ 'Value' ]
if len( data ) > 0:
sqlInsert = False
if data[0][0] == sqlFlag:
return S_OK()
if sqlInsert:
#If it's not in the db and we're removing the persistency then do nothing
if not persistent:
return S_OK()
cmd = "INSERT INTO `ProxyDB_Proxies` ( UserDN, UserGroup, Pem, ExpirationTime, PersistentFlag ) VALUES "
cmd += "( %s, %s, '', UTC_TIMESTAMP(), 'True' )" % ( sUserDN, sUserGroup )
else:
cmd = "UPDATE `ProxyDB_Proxies` SET PersistentFlag='%s' WHERE UserDN=%s AND UserGroup=%s" % ( sqlFlag,
sUserDN,
sUserGroup )
retVal = self._update( cmd )
if not retVal[ 'OK' ]:
return retVal
return S_OK()
def getProxiesContent( self, selDict, sortList, start = 0, limit = 0 ):
"""
Function to get the contents of the db
parameters are a filter to the db
"""
fields = ( "UserName", "UserDN", "UserGroup", "ExpirationTime", "PersistentFlag" )
cmd = "SELECT %s FROM `ProxyDB_Proxies`" % ", ".join( fields )
sqlWhere = [ "Pem is not NULL" ]
for field in selDict:
if field not in fields:
continue
fVal = selDict[field]
if type( fVal ) in ( types.DictType, types.TupleType, types.ListType ):
sqlWhere.append( "%s in (%s)" % ( field, ", ".join( [ self._escapeString( str( value ) )[ 'Value' ] for value in fVal ] ) ) )
else:
sqlWhere.append( "%s = %s" % ( field, self._escapeString( str( fVal ) )[ 'Value' ] ) )
sqlOrder = []
if sortList:
for sort in sortList:
if len( sort ) == 1:
sort = ( sort, "DESC" )
elif len( sort ) > 2:
return S_ERROR( "Invalid sort %s" % sort )
if sort[0] not in fields:
return S_ERROR( "Invalid sorting field %s" % sort[0] )
if sort[1].upper() not in ( "ASC", "DESC" ):
return S_ERROR( "Invalid sorting order %s" % sort[1] )
sqlOrder.append( "%s %s" % ( sort[0], sort[1] ) )
if sqlWhere:
cmd = "%s WHERE %s" % ( cmd, " AND ".join( sqlWhere ) )
if sqlOrder:
cmd = "%s ORDER BY %s" % ( cmd, ", ".join( sqlOrder ) )
if limit:
try:
start = int( start )
limit = int( limit )
except ValueError:
return S_ERROR( "start and limit have to be integers" )
cmd += " LIMIT %d,%d" % ( start, limit )
retVal = self._query( cmd )
if not retVal[ 'OK' ]:
return retVal
data = []
for record in retVal[ 'Value' ]:
record = list( record )
if record[4] == 'True':
record[4] = True
else:
record[4] = False
data.append( record )
totalRecords = len( data )
cmd = "SELECT COUNT( UserGroup ) FROM `ProxyDB_Proxies`"
if sqlWhere:
cmd = "%s WHERE %s" % ( cmd, " AND ".join( sqlWhere ) )
retVal = self._query( cmd )
if retVal[ 'OK' ]:
totalRecords = retVal[ 'Value' ][0][0]
return S_OK( { 'ParameterNames' : fields, 'Records' : data, 'TotalRecords' : totalRecords } )
def logAction( self, action, issuerDN, issuerGroup, targetDN, targetGroup ):
"""
Add an action to the log
"""
try:
sAction = self._escapeString( action )[ 'Value' ]
sIssuerDN = self._escapeString( issuerDN )[ 'Value' ]
sIssuerGroup = self._escapeString( issuerGroup )[ 'Value' ]
sTargetDN = self._escapeString( targetDN )[ 'Value' ]
sTargetGroup = self._escapeString( targetGroup )[ 'Value' ]
except KeyError:
return S_ERROR( "Can't escape from death" )
cmd = "INSERT INTO `ProxyDB_Log` ( Action, IssuerDN, IssuerGroup, TargetDN, TargetGroup, Timestamp ) VALUES "
cmd += "( %s, %s, %s, %s, %s, UTC_TIMESTAMP() )" % ( sAction, sIssuerDN, sIssuerGroup, sTargetDN, sTargetGroup )
retVal = self._update( cmd )
if not retVal[ 'OK' ]:
self.log.error( "Can't add a proxy action log: ", retVal[ 'Message' ] )
def purgeLogs( self ):
"""
Purge expired requests from the db
"""
cmd = "DELETE FROM `ProxyDB_Log` WHERE TIMESTAMPDIFF( SECOND, Timestamp, UTC_TIMESTAMP() ) > 15552000"
return self._update( cmd )
def getLogsContent( self, selDict, sortList, start = 0, limit = 0 ):
"""
Function to get the contents of the logs table
parameters are a filter to the db
"""
fields = ( "Action", "IssuerDN", "IssuerGroup", "TargetDN", "TargetGroup", "Timestamp" )
cmd = "SELECT %s FROM `ProxyDB_Log`" % ", ".join( fields )
if selDict:
qr = []
if 'beforeDate' in selDict:
qr.append( "Timestamp < %s" % self._escapeString( selDict[ 'beforeDate' ] )[ 'Value' ] )
del( selDict[ 'beforeDate' ] )
if 'afterDate' in selDict:
qr.append( "Timestamp > %s" % self._escapeString( selDict[ 'afterDate' ] )[ 'Value' ] )
del( selDict[ 'afterDate' ] )
for field in selDict:
qr.append( "(%s)" % " OR ".join( [ "%s=%s" % ( field, self._escapeString( str( value ) )[ 'Value' ] ) for value in selDict[field] ] ) )
whereStr = " WHERE %s" % " AND ".join( qr )
cmd += whereStr
else:
whereStr = ""
if sortList:
cmd += " ORDER BY %s" % ", ".join( [ "%s %s" % ( sort[0], sort[1] ) for sort in sortList ] )
if limit:
cmd += " LIMIT %d,%d" % ( start, limit )
retVal = self._query( cmd )
if not retVal[ 'OK' ]:
return retVal
data = retVal[ 'Value' ]
totalRecords = len( data )
cmd = "SELECT COUNT( Timestamp ) FROM `ProxyDB_Log`"
cmd += whereStr
retVal = self._query( cmd )
if retVal[ 'OK' ]:
totalRecords = retVal[ 'Value' ][0][0]
return S_OK( { 'ParameterNames' : fields, 'Records' : data, 'TotalRecords' : totalRecords } )
def generateToken( self, requesterDN, requesterGroup, numUses = 1, lifeTime = 0, retries = 10 ):
"""
Generate and return a token and the number of uses for the token
"""
if not lifeTime:
lifeTime = gConfig.getValue( "/DIRAC/VOPolicy/TokenLifeTime", self.__defaultTokenLifetime )
maxUses = gConfig.getValue( "/DIRAC/VOPolicy/TokenMaxUses", self.__defaultTokenMaxUses )
numUses = max( 1, min( numUses, maxUses ) )
m = hashlib.md5()
rndData = "%s.%s.%s.%s" % ( time.time(), random.random(), numUses, lifeTime )
m.update( rndData )
token = m.hexdigest()
fieldsSQL = ", ".join( ( "Token", "RequesterDN", "RequesterGroup", "ExpirationTime", "UsesLeft" ) )
valuesSQL = ", ".join( ( self._escapeString( token )['Value'],
self._escapeString( requesterDN )['Value'],
self._escapeString( requesterGroup )['Value'],
"TIMESTAMPADD( SECOND, %d, UTC_TIMESTAMP() )" % int( lifeTime ),
str( numUses ) ) )
insertSQL = "INSERT INTO `ProxyDB_Tokens` ( %s ) VALUES ( %s )" % ( fieldsSQL, valuesSQL )
result = self._update( insertSQL )
if result[ 'OK' ]:
return S_OK( ( token, numUses ) )
if result[ 'Message' ].find( "uplicate entry" ) > -1:
if retries:
return self.generateToken( numUses, lifeTime, retries - 1 )
return S_ERROR( "Max retries reached for token generation. Aborting" )
return result
def purgeExpiredTokens( self ):
delSQL = "DELETE FROM `ProxyDB_Tokens` WHERE ExpirationTime < UTC_TIMESTAMP() OR UsesLeft < 1"
return self._update( delSQL )
def useToken( self, token, requesterDN, requesterGroup ):
sqlCond = " AND ".join( ( "UsesLeft > 0",
"Token=%s" % self._escapeString( token )['Value'],
"RequesterDN=%s" % self._escapeString( requesterDN )['Value'],
"RequesterGroup=%s" % self._escapeString( requesterGroup )['Value'],
"ExpirationTime >= UTC_TIMESTAMP()" ) )
updateSQL = "UPDATE `ProxyDB_Tokens` SET UsesLeft = UsesLeft - 1 WHERE %s" % sqlCond
result = self._update( updateSQL )
if not result[ 'OK' ]:
return result
return S_OK( result[ 'Value' ] > 0 )
def __cleanExpNotifs( self ):
cmd = "DELETE FROM `ProxyDB_ExpNotifs` WHERE ExpirationTime < UTC_TIMESTAMP()"
return self._update( cmd )
def sendExpirationNotifications( self ):
result = self.__cleanExpNotifs()
if not result[ 'OK' ]:
return result
cmd = "SELECT UserDN, UserGroup, LifeLimit FROM `ProxyDB_ExpNotifs`"
result = self._query( cmd )
if not result[ 'OK' ]:
return result
notifDone = dict( [ ( ( row[0], row[1] ), row[2] ) for row in result[ 'Value' ] ] )
notifLimits = sorted( [ int( x ) for x in self.getCSOption( "NotificationTimes", ProxyDB.NOTIFICATION_TIMES ) ] )
sqlSel = "UserDN, UserGroup, TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime )"
sqlCond = "TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ) < %d" % max( notifLimits )
cmd = "SELECT %s FROM `ProxyDB_Proxies` WHERE %s" % ( sqlSel, sqlCond )
result = self._query( cmd )
if not result[ 'OK' ]:
return result
pilotProps = ( Properties.GENERIC_PILOT, Properties.PILOT )
data = result[ 'Value' ]
sent = []
for row in data:
userDN, group, lTime = row
#If it's a pilot proxy, skip it
if Registry.groupHasProperties( group, pilotProps ):
continue
#IF it dosn't hace the auto upload proxy, skip it
if not Registry.getGroupOption( group, "AutoUploadProxy", False ):
continue
notKey = ( userDN, group )
for notifLimit in notifLimits:
if notifLimit < lTime:
#Not yet in this notification limit
continue
if notKey in notifDone and notifDone[ notKey ] <= notifLimit:
#Already notified for this notification limit
break
if not self._notifyProxyAboutToExpire( userDN, group, lTime, notifLimit ):
#Cannot send notification, retry later
break
try:
sUserDN = self._escapeString( userDN )[ 'Value' ]
sGroup = self._escapeString( group )[ 'Value' ]
except KeyError:
return S_ERROR( "OOPS" )
if notKey not in notifDone:
values = "( %s, %s, %d, TIMESTAMPADD( SECOND, %s, UTC_TIMESTAMP() ) )" % ( sUserDN, sGroup, notifLimit, lTime )
cmd = "INSERT INTO `ProxyDB_ExpNotifs` ( UserDN, UserGroup, LifeLimit, ExpirationTime ) VALUES %s" % values
result = self._update( cmd )
if not result[ 'OK' ]:
gLogger.error( "Could not mark notification as sent", result[ 'Message' ] )
else:
values = "LifeLimit = %d, ExpirationTime = TIMESTAMPADD( SECOND, %s, UTC_TIMESTAMP() )" % ( notifLimit, lTime )
cmd = "UPDATE `ProxyDB_ExpNotifs` SET %s WHERE UserDN = %s AND UserGroup = %s" % ( values, sUserDN, sGroup )
result = self._update( cmd )
if not result[ 'OK' ]:
gLogger.error( "Could not mark notification as sent", result[ 'Message' ] )
sent.append( ( userDN, group, lTime ) )
notifDone[ notKey ] = notifLimit
return S_OK( sent )
def _notifyProxyAboutToExpire( self, userDN, userGroup, lTime, notifLimit ):
result = Registry.getUsernameForDN( userDN )
if not result[ 'OK' ]:
return False
userName = result[ 'Value' ]
userEMail = Registry.getUserOption( userName, "Email", "" )
if not userEMail:
gLogger.error( "Could not discover user email", userName )
return False
daysLeft = int( lTime / 86400 )
msgSubject = "Your proxy uploaded to DIRAC will expire in %d days" % daysLeft
msgBody = """\
Dear %s,
The proxy you uploaded to DIRAC will expire in aproximately %d days. The proxy
information is:
DN: %s
Group: %s
If you plan on keep using this credentials please upload a newer proxy to
DIRAC by executing:
$ dirac-proxy-init -UP -g %s
If you have been issued different certificate, please make sure you have a
proxy uploaded with that certificate.
Cheers,
DIRAC's Proxy Manager
""" % ( userName, daysLeft, userDN, userGroup, userGroup )
result = self.__notifClient.sendMail( userEMail, msgSubject, msgBody, fromAddress = 'proxymanager@diracgrid.org' )
if not result[ 'OK' ]:
gLogger.error( "Could not send email", result[ 'Message' ] )
return False
return True
| marcelovilaca/DIRAC | FrameworkSystem/DB/ProxyDB.py | Python | gpl-3.0 | 43,566 | [
"DIRAC"
] | 82ca59a49bc1c6c10fecc5bae82dda166baebcc5f845db76a04fab018eb68c3c |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from geonode.tests.base import GeoNodeBaseTestSupport
import os
import json
import base64
import urllib2
import logging
import gisdata
import contextlib
from django.conf import settings
from django.http import HttpRequest
from django.core.urlresolvers import reverse
from tastypie.test import ResourceTestCaseMixin
from django.contrib.auth import get_user_model
from guardian.shortcuts import get_anonymous_user, assign_perm, remove_perm
from geonode import geoserver
from geonode.base.populate_test_data import all_public
from geonode.people.models import Profile
from geonode.people.utils import get_valid_user
from geonode.layers.models import Layer
from geonode.groups.models import Group
from geonode.utils import check_ogc_backend
from geonode.tests.utils import check_layer
from geonode.decorators import on_ogc_backend
from geonode.geoserver.helpers import gs_slurp
from geonode.geoserver.upload import geoserver_upload
from geonode.layers.populate_layers_data import create_layer_data
from .utils import (purge_geofence_all,
get_users_with_perms,
get_geofence_rules_count,
get_highest_priority,
set_geofence_all,
set_geowebcache_invalidate_cache,
sync_geofence_with_guardian,
sync_resources_with_guardian)
logger = logging.getLogger(__name__)
def _log(msg, *args):
logger.info(msg, *args)
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
class SecurityTest(GeoNodeBaseTestSupport):
type = 'layer'
"""
Tests for the Geonode security app.
"""
def setUp(self):
super(SecurityTest, self).setUp()
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_login_middleware(self):
"""
Tests the Geonode login required authentication middleware.
"""
from geonode.security.middleware import LoginRequiredMiddleware
middleware = LoginRequiredMiddleware()
white_list = [
reverse('account_ajax_login'),
reverse('account_confirm_email', kwargs=dict(key='test')),
reverse('account_login'),
reverse('account_reset_password'),
reverse('forgot_username'),
reverse('layer_acls'),
reverse('layer_resolve_user'),
]
black_list = [
reverse('account_signup'),
reverse('document_browse'),
reverse('maps_browse'),
reverse('layer_browse'),
reverse('layer_detail', kwargs=dict(layername='geonode:Test')),
reverse('layer_remove', kwargs=dict(layername='geonode:Test')),
reverse('profile_browse'),
]
request = HttpRequest()
request.user = get_anonymous_user()
# Requests should be redirected to the the `redirected_to` path when un-authenticated user attempts to visit
# a black-listed url.
for path in black_list:
request.path = path
response = middleware.process_request(request)
if response:
self.assertEqual(response.status_code, 302)
self.assertTrue(
response.get('Location').startswith(
middleware.redirect_to))
# The middleware should return None when an un-authenticated user
# attempts to visit a white-listed url.
for path in white_list:
request.path = path
response = middleware.process_request(request)
self.assertIsNone(
response,
msg="Middleware activated for white listed path: {0}".format(path))
self.client.login(username='admin', password='admin')
admin = get_user_model().objects.get(username='admin')
self.assertTrue(admin.is_authenticated())
request.user = admin
# The middleware should return None when an authenticated user attempts
# to visit a black-listed url.
for path in black_list:
request.path = path
response = middleware.process_request(request)
self.assertIsNone(response)
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_session_ctrl_middleware(self):
"""
Tests the Geonode session control authentication middleware.
"""
from geonode.security.middleware import SessionControlMiddleware
middleware = SessionControlMiddleware()
request = HttpRequest()
self.client.login(username='admin', password='admin')
admin = get_user_model().objects.get(username='admin')
self.assertTrue(admin.is_authenticated())
request.user = admin
request.path = reverse('layer_browse')
middleware.process_request(request)
response = self.client.get(request.path)
self.assertEqual(response.status_code, 200)
# Simulating Token expired (or not set)
request.session = {}
request.session['access_token'] = None
middleware.process_request(request)
response = self.client.get('/admin')
self.assertEqual(response.status_code, 302)
class SecurityViewsTests(ResourceTestCaseMixin, GeoNodeBaseTestSupport):
def setUp(self):
super(SecurityViewsTests, self).setUp()
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
settings.OGC_SERVER['default']['GEOFENCE_SECURITY_ENABLED'] = True
self.user = 'admin'
self.passwd = 'admin'
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_attributes_sats_refresh(self):
layers = Layer.objects.all()[:2].values_list('id', flat=True)
test_layer = Layer.objects.get(id=layers[0])
self.client.login(username='admin', password='admin')
layer_attributes = test_layer.attributes
self.assertIsNotNone(layer_attributes)
test_layer.attribute_set.all().delete()
test_layer.save()
data = {
'uuid': test_layer.uuid
}
resp = self.client.post(reverse('attributes_sats_refresh'), data)
self.assertHttpOK(resp)
self.assertEquals(layer_attributes.count(), test_layer.attributes.count())
from geonode.geoserver.helpers import set_attributes_from_geoserver
test_layer.attribute_set.all().delete()
test_layer.save()
set_attributes_from_geoserver(test_layer, overwrite=True)
self.assertEquals(layer_attributes.count(), test_layer.attributes.count())
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_invalidate_tiledlayer_cache(self):
layers = Layer.objects.all()[:2].values_list('id', flat=True)
test_layer = Layer.objects.get(id=layers[0])
self.client.login(username='admin', password='admin')
data = {
'uuid': test_layer.uuid
}
resp = self.client.post(reverse('invalidate_tiledlayer_cache'), data)
self.assertHttpOK(resp)
class BulkPermissionsTests(ResourceTestCaseMixin, GeoNodeBaseTestSupport):
def setUp(self):
super(BulkPermissionsTests, self).setUp()
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
settings.OGC_SERVER['default']['GEOFENCE_SECURITY_ENABLED'] = True
self.user = 'admin'
self.passwd = 'admin'
self.list_url = reverse(
'api_dispatch_list',
kwargs={
'api_name': 'api',
'resource_name': 'layers'})
self.bulk_perms_url = reverse('bulk_permissions')
all_public()
self.perm_spec = {
"users": {"admin": ["view_resourcebase"]}, "groups": {}}
def test_set_bulk_permissions(self):
"""Test that after restrict view permissions on two layers
bobby is unable to see them"""
geofence_rules_count = 0
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
purge_geofence_all()
# Reset GeoFence Rules
geofence_rules_count = get_geofence_rules_count()
self.assertEquals(geofence_rules_count, 0)
layers = Layer.objects.all()[:2].values_list('id', flat=True)
layers_id = map(lambda x: str(x), layers)
test_perm_layer = Layer.objects.get(id=layers[0])
self.client.login(username='admin', password='admin')
resp = self.client.get(self.list_url)
self.assertEquals(len(self.deserialize(resp)['objects']), 8)
data = {
'permissions': json.dumps(self.perm_spec),
'resources': layers_id
}
resp = self.client.post(self.bulk_perms_url, data)
self.assertHttpOK(resp)
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
# Check GeoFence Rules have been correctly created
geofence_rules_count = get_geofence_rules_count()
_log("1. geofence_rules_count: %s " % geofence_rules_count)
self.assertEquals(geofence_rules_count, 4)
set_geofence_all(test_perm_layer)
geofence_rules_count = get_geofence_rules_count()
_log("2. geofence_rules_count: %s " % geofence_rules_count)
self.assertEquals(geofence_rules_count, 5)
self.client.logout()
self.client.login(username='bobby', password='bob')
resp = self.client.get(self.list_url)
self.assertEquals(len(self.deserialize(resp)['objects']), 7)
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
perms = get_users_with_perms(test_perm_layer)
_log("3. perms: %s " % perms)
sync_geofence_with_guardian(test_perm_layer, perms, user='bobby')
# Check GeoFence Rules have been correctly created
geofence_rules_count = get_geofence_rules_count()
_log("4. geofence_rules_count: %s " % geofence_rules_count)
self.assertEquals(geofence_rules_count, 5)
# Validate maximum priority
geofence_rules_highest_priority = get_highest_priority()
_log("5. geofence_rules_highest_priority: %s " % geofence_rules_highest_priority)
self.assertTrue(geofence_rules_highest_priority > 0)
# Try GWC Invalidation
# - it should not work here since the layer has not been uploaded to GeoServer
set_geowebcache_invalidate_cache(test_perm_layer.alternate)
url = settings.OGC_SERVER['default']['LOCATION']
user = settings.OGC_SERVER['default']['USER']
passwd = settings.OGC_SERVER['default']['PASSWORD']
import requests
from requests.auth import HTTPBasicAuth
r = requests.get(url + 'gwc/rest/seed/%s.json' % test_perm_layer.alternate,
auth=HTTPBasicAuth(user, passwd))
self.assertEquals(r.status_code, 400)
geofence_rules_count = 0
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
purge_geofence_all()
# Reset GeoFence Rules
geofence_rules_count = get_geofence_rules_count()
self.assertEquals(geofence_rules_count, 0)
def test_bobby_cannot_set_all(self):
"""Test that Bobby can set the permissions only only on the ones
for which he has the right"""
layer = Layer.objects.all()[0]
self.client.login(username='admin', password='admin')
# give bobby the right to change the layer permissions
assign_perm('change_resourcebase', Profile.objects.get(username='bobby'), layer.get_self_resource())
self.client.logout()
self.client.login(username='bobby', password='bob')
layer2 = Layer.objects.all()[1]
data = {
'permissions': json.dumps({"users": {"bobby": ["view_resourcebase"]}, "groups": {}}),
'resources': [layer.id, layer2.id]
}
resp = self.client.post(self.bulk_perms_url, data)
self.assertTrue(layer2.title in json.loads(resp.content)['not_changed'])
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_perm_specs_synchronization(self):
"""Test that Layer is correctly synchronized with guardian:
1. Set permissions to all users
2. Set permissions to a single user
3. Set permissions to a group of users
4. Try to sync a layer from GeoServer
"""
layer = Layer.objects.all()[0]
self.client.login(username='admin', password='admin')
# Reset GeoFence Rules
purge_geofence_all()
geofence_rules_count = get_geofence_rules_count()
self.assertEquals(geofence_rules_count, 0)
perm_spec = {'users': {'AnonymousUser': []}}
layer.set_permissions(perm_spec)
geofence_rules_count = get_geofence_rules_count()
_log("1. geofence_rules_count: %s " % geofence_rules_count)
self.assertEquals(geofence_rules_count, 0)
perm_spec = {
"users": {"admin": ["view_resourcebase"]}, "groups": {}}
layer.set_permissions(perm_spec)
geofence_rules_count = get_geofence_rules_count()
_log("2. geofence_rules_count: %s " % geofence_rules_count)
self.assertEquals(geofence_rules_count, 2)
perm_spec = {'users': {"admin": ['change_layer_data']}}
layer.set_permissions(perm_spec)
geofence_rules_count = get_geofence_rules_count()
_log("3. geofence_rules_count: %s " % geofence_rules_count)
self.assertEquals(geofence_rules_count, 2)
perm_spec = {'groups': {'bar': ['view_resourcebase']}}
layer.set_permissions(perm_spec)
geofence_rules_count = get_geofence_rules_count()
_log("4. geofence_rules_count: %s " % geofence_rules_count)
self.assertEquals(geofence_rules_count, 2)
perm_spec = {'groups': {'bar': ['change_resourcebase']}}
layer.set_permissions(perm_spec)
geofence_rules_count = get_geofence_rules_count()
_log("5. geofence_rules_count: %s " % geofence_rules_count)
self.assertEquals(geofence_rules_count, 0)
# Reset GeoFence Rules
purge_geofence_all()
geofence_rules_count = get_geofence_rules_count()
self.assertEquals(geofence_rules_count, 0)
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_layer_upload_with_time(self):
""" Try uploading a layer and verify that the user can administrate
his own layer despite not being a site administrator.
"""
try:
# user without change_layer_style cannot edit it
self.assertTrue(self.client.login(username='bobby', password='bob'))
# grab bobby
bobby = get_user_model().objects.get(username="bobby")
anonymous_group, created = Group.objects.get_or_create(name='anonymous')
# Upload to GeoServer
saved_layer = geoserver_upload(
Layer(),
os.path.join(
gisdata.GOOD_DATA,
'time/'
"boxes_with_date.shp"),
bobby,
'boxes_with_date_by_bobby',
overwrite=True
)
# Test that layer owner can wipe GWC Cache
ignore_errors = False
skip_unadvertised = False
skip_geonode_registered = False
remove_deleted = True
verbosity = 2
owner = bobby
workspace = 'geonode'
filter = None
store = None
permissions = {
'users': {"bobby": ['view_resourcebase', 'change_layer_data']},
'groups': {anonymous_group: ['view_resourcebase']},
}
gs_slurp(
ignore_errors,
verbosity=verbosity,
owner=owner,
workspace=workspace,
store=store,
filter=filter,
skip_unadvertised=skip_unadvertised,
skip_geonode_registered=skip_geonode_registered,
remove_deleted=remove_deleted,
permissions=permissions,
execute_signals=True)
saved_layer = Layer.objects.get(title='boxes_with_date_by_bobby')
check_layer(saved_layer)
from lxml import etree
from geonode.geoserver.helpers import get_store
from geonode.geoserver.signals import gs_catalog
self.assertIsNotNone(saved_layer)
workspace, name = saved_layer.alternate.split(':')
self.assertIsNotNone(workspace)
self.assertIsNotNone(name)
ws = gs_catalog.get_workspace(workspace)
self.assertIsNotNone(ws)
store = get_store(gs_catalog, saved_layer.store, workspace=ws)
self.assertIsNotNone(store)
url = settings.OGC_SERVER['default']['LOCATION']
user = settings.OGC_SERVER['default']['USER']
passwd = settings.OGC_SERVER['default']['PASSWORD']
rest_path = 'rest/workspaces/geonode/datastores/{lyr_name}/featuretypes/{lyr_name}.xml'.\
format(lyr_name=name)
import requests
from requests.auth import HTTPBasicAuth
r = requests.get(url + rest_path,
auth=HTTPBasicAuth(user, passwd))
self.assertEquals(r.status_code, 200)
_log(r.text)
featureType = etree.ElementTree(etree.fromstring(r.text))
metadata = featureType.findall('./[metadata]')
self.assertEquals(len(metadata), 0)
payload = """<featureType>
<metadata>
<entry key="elevation">
<dimensionInfo>
<enabled>false</enabled>
</dimensionInfo>
</entry>
<entry key="time">
<dimensionInfo>
<enabled>true</enabled>
<attribute>date</attribute>
<presentation>LIST</presentation>
<units>ISO8601</units>
<defaultValue/>
<nearestMatchEnabled>false</nearestMatchEnabled>
</dimensionInfo>
</entry>
</metadata></featureType>"""
r = requests.put(url + rest_path,
data=payload,
headers={
'Content-type': 'application/xml'
},
auth=HTTPBasicAuth(user, passwd))
self.assertEquals(r.status_code, 200)
r = requests.get(url + rest_path,
auth=HTTPBasicAuth(user, passwd))
self.assertEquals(r.status_code, 200)
_log(r.text)
featureType = etree.ElementTree(etree.fromstring(r.text))
metadata = featureType.findall('./[metadata]')
_log(etree.tostring(metadata[0], encoding='utf8', method='xml'))
self.assertEquals(len(metadata), 1)
saved_layer.set_default_permissions()
from geonode.geoserver.views import get_layer_capabilities
capab = get_layer_capabilities(saved_layer, tolerant=True)
self.assertIsNotNone(capab)
wms_capabilities_url = reverse('capabilities_layer', args=[saved_layer.id])
wms_capabilities_resp = self.client.get(wms_capabilities_url)
self.assertTrue(wms_capabilities_resp.status_code, 200)
all_times = None
if wms_capabilities_resp.status_code >= 200 and wms_capabilities_resp.status_code < 400:
wms_capabilities = wms_capabilities_resp.getvalue()
if wms_capabilities:
namespaces = {'wms': 'http://www.opengis.net/wms',
'xlink': 'http://www.w3.org/1999/xlink',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'}
e = etree.fromstring(wms_capabilities)
for atype in e.findall(
"./[wms:Name='%s']/wms:Dimension[@name='time']" % (saved_layer.alternate), namespaces):
dim_name = atype.get('name')
if dim_name:
dim_name = str(dim_name).lower()
if dim_name == 'time':
dim_values = atype.text
if dim_values:
all_times = dim_values.split(",")
break
self.assertIsNotNone(all_times)
self.assertEquals(all_times,
['2000-03-01T00:00:00.000Z', '2000-03-02T00:00:00.000Z',
'2000-03-03T00:00:00.000Z', '2000-03-04T00:00:00.000Z',
'2000-03-05T00:00:00.000Z', '2000-03-06T00:00:00.000Z',
'2000-03-07T00:00:00.000Z', '2000-03-08T00:00:00.000Z',
'2000-03-09T00:00:00.000Z', '2000-03-10T00:00:00.000Z',
'2000-03-11T00:00:00.000Z', '2000-03-12T00:00:00.000Z',
'2000-03-13T00:00:00.000Z', '2000-03-14T00:00:00.000Z',
'2000-03-15T00:00:00.000Z', '2000-03-16T00:00:00.000Z',
'2000-03-17T00:00:00.000Z', '2000-03-18T00:00:00.000Z',
'2000-03-19T00:00:00.000Z', '2000-03-20T00:00:00.000Z',
'2000-03-21T00:00:00.000Z', '2000-03-22T00:00:00.000Z',
'2000-03-23T00:00:00.000Z', '2000-03-24T00:00:00.000Z',
'2000-03-25T00:00:00.000Z', '2000-03-26T00:00:00.000Z',
'2000-03-27T00:00:00.000Z', '2000-03-28T00:00:00.000Z',
'2000-03-29T00:00:00.000Z', '2000-03-30T00:00:00.000Z',
'2000-03-31T00:00:00.000Z', '2000-04-01T00:00:00.000Z',
'2000-04-02T00:00:00.000Z', '2000-04-03T00:00:00.000Z',
'2000-04-04T00:00:00.000Z', '2000-04-05T00:00:00.000Z',
'2000-04-06T00:00:00.000Z', '2000-04-07T00:00:00.000Z',
'2000-04-08T00:00:00.000Z', '2000-04-09T00:00:00.000Z',
'2000-04-10T00:00:00.000Z', '2000-04-11T00:00:00.000Z',
'2000-04-12T00:00:00.000Z', '2000-04-13T00:00:00.000Z',
'2000-04-14T00:00:00.000Z', '2000-04-15T00:00:00.000Z',
'2000-04-16T00:00:00.000Z', '2000-04-17T00:00:00.000Z',
'2000-04-18T00:00:00.000Z', '2000-04-19T00:00:00.000Z',
'2000-04-20T00:00:00.000Z', '2000-04-21T00:00:00.000Z',
'2000-04-22T00:00:00.000Z', '2000-04-23T00:00:00.000Z',
'2000-04-24T00:00:00.000Z', '2000-04-25T00:00:00.000Z',
'2000-04-26T00:00:00.000Z', '2000-04-27T00:00:00.000Z',
'2000-04-28T00:00:00.000Z', '2000-04-29T00:00:00.000Z',
'2000-04-30T00:00:00.000Z', '2000-05-01T00:00:00.000Z',
'2000-05-02T00:00:00.000Z', '2000-05-03T00:00:00.000Z',
'2000-05-04T00:00:00.000Z', '2000-05-05T00:00:00.000Z',
'2000-05-06T00:00:00.000Z', '2000-05-07T00:00:00.000Z',
'2000-05-08T00:00:00.000Z', '2000-05-09T00:00:00.000Z',
'2000-05-10T00:00:00.000Z', '2000-05-11T00:00:00.000Z',
'2000-05-12T00:00:00.000Z', '2000-05-13T00:00:00.000Z',
'2000-05-14T00:00:00.000Z', '2000-05-15T00:00:00.000Z',
'2000-05-16T00:00:00.000Z', '2000-05-17T00:00:00.000Z',
'2000-05-18T00:00:00.000Z', '2000-05-19T00:00:00.000Z',
'2000-05-20T00:00:00.000Z', '2000-05-21T00:00:00.000Z',
'2000-05-22T00:00:00.000Z', '2000-05-23T00:00:00.000Z',
'2000-05-24T00:00:00.000Z', '2000-05-25T00:00:00.000Z',
'2000-05-26T00:00:00.000Z', '2000-05-27T00:00:00.000Z',
'2000-05-28T00:00:00.000Z', '2000-05-29T00:00:00.000Z',
'2000-05-30T00:00:00.000Z', '2000-05-31T00:00:00.000Z',
'2000-06-01T00:00:00.000Z', '2000-06-02T00:00:00.000Z',
'2000-06-03T00:00:00.000Z', '2000-06-04T00:00:00.000Z',
'2000-06-05T00:00:00.000Z', '2000-06-06T00:00:00.000Z',
'2000-06-07T00:00:00.000Z', '2000-06-08T00:00:00.000Z'])
saved_layer.set_default_permissions()
url = reverse('layer_metadata', args=[saved_layer.service_typename])
resp = self.client.get(url)
self.assertEquals(resp.status_code, 200)
finally:
# Clean up and completely delete the layer
try:
saved_layer.delete()
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
from geonode.geoserver.helpers import cleanup
cleanup(saved_layer.name, saved_layer.uuid)
except BaseException:
pass
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_layer_permissions(self):
try:
# Test permissions on a layer
# grab bobby
bobby = get_user_model().objects.get(username="bobby")
layers = Layer.objects.all()[:2].values_list('id', flat=True)
test_perm_layer = Layer.objects.get(id=layers[0])
thefile = os.path.join(
gisdata.VECTOR_DATA,
'san_andres_y_providencia_poi.shp')
layer = geoserver_upload(
test_perm_layer,
thefile,
bobby,
'san_andres_y_providencia_poi',
overwrite=True
)
self.assertIsNotNone(layer)
# Reset GeoFence Rules
purge_geofence_all()
geofence_rules_count = get_geofence_rules_count()
self.assertTrue(geofence_rules_count == 0)
ignore_errors = False
skip_unadvertised = False
skip_geonode_registered = False
remove_deleted = True
verbosity = 2
owner = get_valid_user('admin')
workspace = 'geonode'
filter = None
store = None
permissions = {'users': {"admin": ['change_layer_data']}}
gs_slurp(
ignore_errors,
verbosity=verbosity,
owner=owner,
console=StreamToLogger(logger, logging.INFO),
workspace=workspace,
store=store,
filter=filter,
skip_unadvertised=skip_unadvertised,
skip_geonode_registered=skip_geonode_registered,
remove_deleted=remove_deleted,
permissions=permissions,
execute_signals=True)
layer = Layer.objects.get(title='san_andres_y_providencia_poi')
check_layer(layer)
geofence_rules_count = get_geofence_rules_count()
_log("0. geofence_rules_count: %s " % geofence_rules_count)
self.assertEquals(geofence_rules_count, 2)
# Set the layer private for not authenticated users
layer.set_permissions({'users': {'AnonymousUser': []}})
url = 'http://localhost:8080/geoserver/geonode/ows?' \
'LAYERS=geonode%3Asan_andres_y_providencia_poi&STYLES=' \
'&FORMAT=image%2Fpng&SERVICE=WMS&VERSION=1.1.1&REQUEST=GetMap' \
'&SRS=EPSG%3A4326' \
'&BBOX=-81.394599749999,13.316009005566,' \
'-81.370560451855,13.372728455566' \
'&WIDTH=217&HEIGHT=512'
# test view_resourcebase permission on anonymous user
request = urllib2.Request(url)
response = urllib2.urlopen(request)
self.assertTrue(
response.info().getheader('Content-Type'),
'application/vnd.ogc.se_xml;charset=UTF-8'
)
# test WMS with authenticated user that has not view_resourcebase:
# the layer must be not accessible (response is xml)
request = urllib2.Request(url)
base64string = base64.encodestring(
'%s:%s' % ('bobby', 'bob')).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
response = urllib2.urlopen(request)
self.assertTrue(
response.info().getheader('Content-Type'),
'application/vnd.ogc.se_xml;charset=UTF-8'
)
# test WMS with authenticated user that has view_resourcebase: the layer
# must be accessible (response is image)
assign_perm('view_resourcebase', bobby, layer.get_self_resource())
request = urllib2.Request(url)
base64string = base64.encodestring(
'%s:%s' % ('bobby', 'bob')).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
response = urllib2.urlopen(request)
self.assertTrue(response.info().getheader('Content-Type'), 'image/png')
# test change_layer_data
# would be nice to make a WFS/T request and test results, but this
# would work only on PostGIS layers
# test change_layer_style
url = 'http://localhost:8080/geoserver/rest/workspaces/geonode/styles/san_andres_y_providencia_poi.xml'
sld = """<?xml version="1.0" encoding="UTF-8"?>
<sld:StyledLayerDescriptor xmlns:sld="http://www.opengis.net/sld"
xmlns:gml="http://www.opengis.net/gml" xmlns:ogc="http://www.opengis.net/ogc"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" version="1.0.0"
xsi:schemaLocation="http://www.opengis.net/sld http://schemas.opengis.net/sld/1.0.0/StyledLayerDescriptor.xsd">
<sld:NamedLayer>
<sld:Name>geonode:san_andres_y_providencia_poi</sld:Name>
<sld:UserStyle>
<sld:Name>san_andres_y_providencia_poi</sld:Name>
<sld:Title>san_andres_y_providencia_poi</sld:Title>
<sld:IsDefault>1</sld:IsDefault>
<sld:FeatureTypeStyle>
<sld:Rule>
<sld:PointSymbolizer>
<sld:Graphic>
<sld:Mark>
<sld:Fill>
<sld:CssParameter name="fill">#8A7700
</sld:CssParameter>
</sld:Fill>
<sld:Stroke>
<sld:CssParameter name="stroke">#bbffff
</sld:CssParameter>
</sld:Stroke>
</sld:Mark>
<sld:Size>10</sld:Size>
</sld:Graphic>
</sld:PointSymbolizer>
</sld:Rule>
</sld:FeatureTypeStyle>
</sld:UserStyle>
</sld:NamedLayer>
</sld:StyledLayerDescriptor>"""
# user without change_layer_style cannot edit it
self.assertTrue(self.client.login(username='bobby', password='bob'))
response = self.client.put(url, sld, content_type='application/vnd.ogc.sld+xml')
self.assertEquals(response.status_code, 404)
# user with change_layer_style can edit it
assign_perm('change_layer_style', bobby, layer)
perm_spec = {
'users': {
'bobby': ['view_resourcebase',
'change_resourcebase', ]
}
}
layer.set_permissions(perm_spec)
response = self.client.put(url, sld, content_type='application/vnd.ogc.sld+xml')
finally:
try:
layer.delete()
except BaseException:
pass
class PermissionsTest(GeoNodeBaseTestSupport):
"""Tests GeoNode permissions
"""
perm_spec = {
"users": {
"admin": [
"change_resourcebase",
"change_resourcebase_permissions",
"view_resourcebase"]},
"groups": {}}
# Permissions Tests
# Users
# - admin (pk=2)
# - bobby (pk=1)
def setUp(self):
super(PermissionsTest, self).setUp()
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
settings.OGC_SERVER['default']['GEOFENCE_SECURITY_ENABLED'] = True
self.user = 'admin'
self.passwd = 'admin'
create_layer_data()
self.anonymous_user = get_anonymous_user()
def test_layer_set_default_permissions(self):
"""Verify that Layer.set_default_permissions is behaving as expected
"""
# Get a Layer object to work with
layer = Layer.objects.all()[0]
# Set the default permissions
layer.set_default_permissions()
# Test that the anonymous user can read
self.assertTrue(
self.anonymous_user.has_perm(
'view_resourcebase',
layer.get_self_resource()))
# Test that the owner user can read
self.assertTrue(
layer.owner.has_perm(
'view_resourcebase',
layer.get_self_resource()))
# Test that the owner user can download it
self.assertTrue(
layer.owner.has_perm(
'download_resourcebase',
layer.get_self_resource()))
# Test that the owner user can edit metadata
self.assertTrue(
layer.owner.has_perm(
'change_resourcebase_metadata',
layer.get_self_resource()))
# Test that the owner user can edit data if is vector type
if layer.storeType == 'dataStore':
self.assertTrue(
layer.owner.has_perm(
'change_layer_data',
layer))
# Test that the owner user can edit styles
self.assertTrue(
layer.owner.has_perm(
'change_layer_style',
layer))
# Test that the owner can manage the layer
self.assertTrue(
layer.owner.has_perm(
'change_resourcebase',
layer.get_self_resource()))
self.assertTrue(
layer.owner.has_perm(
'delete_resourcebase',
layer.get_self_resource()))
self.assertTrue(
layer.owner.has_perm(
'change_resourcebase_permissions',
layer.get_self_resource()))
self.assertTrue(
layer.owner.has_perm(
'publish_resourcebase',
layer.get_self_resource()))
def test_set_layer_permissions(self):
"""Verify that the set_layer_permissions view is behaving as expected
"""
# Get a layer to work with
layer = Layer.objects.all()[0]
# FIXME Test a comprehensive set of permissions specifications
# Set the Permissions
layer.set_permissions(self.perm_spec)
# Test that the Permissions for anonymous user is are set
self.assertFalse(
self.anonymous_user.has_perm(
'view_resourcebase',
layer.get_self_resource()))
# Test that previous permissions for users other than ones specified in
# the perm_spec (and the layers owner) were removed
current_perms = layer.get_all_level_info()
self.assertEqual(len(current_perms['users'].keys()), 2)
# Test that the User permissions specified in the perm_spec were
# applied properly
for username, perm in self.perm_spec['users'].items():
user = get_user_model().objects.get(username=username)
self.assertTrue(user.has_perm(perm, layer.get_self_resource()))
def test_ajax_layer_permissions(self):
"""Verify that the ajax_layer_permissions view is behaving as expected
"""
# Setup some layer names to work with
valid_layer_typename = Layer.objects.all()[0].id
invalid_layer_id = 9999999
# Test that an invalid layer.alternate is handled for properly
response = self.client.post(
reverse(
'resource_permissions', args=(
invalid_layer_id,)), data=json.dumps(
self.perm_spec), content_type="application/json")
self.assertEquals(response.status_code, 404)
# Test that GET returns permissions
response = self.client.get(
reverse(
'resource_permissions',
args=(
valid_layer_typename,
)))
assert('permissions' in response.content)
# Test that a user is required to have maps.change_layer_permissions
# First test un-authenticated
response = self.client.post(
reverse(
'resource_permissions', args=(
valid_layer_typename,)), data=json.dumps(
self.perm_spec), content_type="application/json")
self.assertEquals(response.status_code, 401)
# Next Test with a user that does NOT have the proper perms
logged_in = self.client.login(username='bobby', password='bob')
self.assertEquals(logged_in, True)
response = self.client.post(
reverse(
'resource_permissions', args=(
valid_layer_typename,)), data=json.dumps(
self.perm_spec), content_type="application/json")
self.assertEquals(response.status_code, 200)
# Login as a user with the proper permission and test the endpoint
logged_in = self.client.login(username='admin', password='admin')
self.assertEquals(logged_in, True)
response = self.client.post(
reverse(
'resource_permissions', args=(
valid_layer_typename,)), data=json.dumps(
self.perm_spec), content_type="application/json")
# Test that the method returns 200
self.assertEquals(response.status_code, 200)
# Test that the permissions specification is applied
# Should we do this here, or assume the tests in
# test_set_layer_permissions will handle for that?
def test_perms_info(self):
""" Verify that the perms_info view is behaving as expected
"""
# Test with a Layer object
layer = Layer.objects.all()[0]
layer.set_default_permissions()
# Test that the anonymous user can read
self.assertTrue(
self.anonymous_user.has_perm(
'view_resourcebase',
layer.get_self_resource()))
# Test that layer owner can edit layer
self.assertTrue(
layer.owner.has_perm(
'change_resourcebase',
layer.get_self_resource()))
# Test with a Map object
# TODO
# now we test permissions, first on an authenticated user and then on the
# anonymous user
# 1. view_resourcebase
# 2. change_resourcebase
# 3. delete_resourcebase
# 4. change_resourcebase_metadata
# 5. change_resourcebase_permissions
# 6. change_layer_data
# 7. change_layer_style
def test_not_superuser_permissions(self):
geofence_rules_count = 0
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
purge_geofence_all()
# Reset GeoFence Rules
geofence_rules_count = get_geofence_rules_count()
self.assertTrue(geofence_rules_count == 0)
# grab bobby
bob = get_user_model().objects.get(username='bobby')
# grab a layer
layer = Layer.objects.all()[0]
layer.set_default_permissions()
# verify bobby has view/change permissions on it but not manage
self.assertTrue(
bob.has_perm(
'change_resourcebase_permissions',
layer.get_self_resource()))
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
# Check GeoFence Rules have been correctly created
geofence_rules_count = get_geofence_rules_count()
_log("1. geofence_rules_count: %s " % geofence_rules_count)
self.assertTrue(geofence_rules_count == 1)
self.assertTrue(self.client.login(username='bobby', password='bob'))
# 1. view_resourcebase
# 1.1 has view_resourcebase: verify that bobby can access the layer
# detail page
self.assertTrue(
bob.has_perm(
'view_resourcebase',
layer.get_self_resource()))
response = self.client.get(reverse('layer_detail', args=(layer.alternate,)))
self.assertEquals(response.status_code, 200)
# 1.2 has not view_resourcebase: verify that bobby can not access the
# layer detail page
remove_perm('view_resourcebase', bob, layer.get_self_resource())
anonymous_group = Group.objects.get(name='anonymous')
remove_perm('view_resourcebase', anonymous_group, layer.get_self_resource())
response = self.client.get(reverse('layer_detail', args=(layer.alternate,)))
self.assertTrue(response.status_code in (401, 403))
# 2. change_resourcebase
# 2.1 has not change_resourcebase: verify that bobby cannot access the
# layer replace page
response = self.client.get(reverse('layer_replace', args=(layer.alternate,)))
self.assertEquals(response.status_code, 200)
# 2.2 has change_resourcebase: verify that bobby can access the layer
# replace page
assign_perm('change_resourcebase', bob, layer.get_self_resource())
self.assertTrue(
bob.has_perm(
'change_resourcebase',
layer.get_self_resource()))
response = self.client.get(reverse('layer_replace', args=(layer.alternate,)))
self.assertEquals(response.status_code, 200)
# 3. delete_resourcebase
# 3.1 has not delete_resourcebase: verify that bobby cannot access the
# layer delete page
response = self.client.get(reverse('layer_remove', args=(layer.alternate,)))
self.assertEquals(response.status_code, 200)
# 3.2 has delete_resourcebase: verify that bobby can access the layer
# delete page
assign_perm('delete_resourcebase', bob, layer.get_self_resource())
self.assertTrue(
bob.has_perm(
'delete_resourcebase',
layer.get_self_resource()))
response = self.client.get(reverse('layer_remove', args=(layer.alternate,)))
self.assertEquals(response.status_code, 200)
# 4. change_resourcebase_metadata
# 4.1 has not change_resourcebase_metadata: verify that bobby cannot
# access the layer metadata page
response = self.client.get(reverse('layer_metadata', args=(layer.alternate,)))
self.assertEquals(response.status_code, 200)
# 4.2 has delete_resourcebase: verify that bobby can access the layer
# delete page
assign_perm('change_resourcebase_metadata', bob, layer.get_self_resource())
self.assertTrue(
bob.has_perm(
'change_resourcebase_metadata',
layer.get_self_resource()))
response = self.client.get(reverse('layer_metadata', args=(layer.alternate,)))
self.assertEquals(response.status_code, 200)
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
perms = get_users_with_perms(layer)
_log("2. perms: %s " % perms)
sync_geofence_with_guardian(layer, perms, user=bob, group=anonymous_group)
# Check GeoFence Rules have been correctly created
geofence_rules_count = get_geofence_rules_count()
_log("3. geofence_rules_count: %s " % geofence_rules_count)
self.assertEquals(geofence_rules_count, 1)
# 5. change_resourcebase_permissions
# should be impossible for the user without change_resourcebase_permissions
# to change permissions as the permission form is not available in the
# layer detail page?
# 6. change_layer_data
# must be done in integration test sending a WFS-T request with CURL
# 7. change_layer_style
# 7.1 has not change_layer_style: verify that bobby cannot access
# the layer style page
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
# Only for geoserver backend
response = self.client.get(reverse('layer_style_manage', args=(layer.alternate,)))
self.assertEquals(response.status_code, 200)
# 7.2 has change_layer_style: verify that bobby can access the
# change layer style page
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
# Only for geoserver backend
assign_perm('change_layer_style', bob, layer)
self.assertTrue(
bob.has_perm(
'change_layer_style',
layer))
response = self.client.get(reverse('layer_style_manage', args=(layer.alternate,)))
self.assertEquals(response.status_code, 200)
geofence_rules_count = 0
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
purge_geofence_all()
# Reset GeoFence Rules
geofence_rules_count = get_geofence_rules_count()
self.assertEquals(geofence_rules_count, 0)
def test_anonymus_permissions(self):
# grab a layer
layer = Layer.objects.all()[0]
layer.set_default_permissions()
# 1. view_resourcebase
# 1.1 has view_resourcebase: verify that anonymous user can access
# the layer detail page
self.assertTrue(
self.anonymous_user.has_perm(
'view_resourcebase',
layer.get_self_resource()))
response = self.client.get(reverse('layer_detail', args=(layer.alternate,)))
self.assertEquals(response.status_code, 200)
# 1.2 has not view_resourcebase: verify that anonymous user can not
# access the layer detail page
remove_perm('view_resourcebase', self.anonymous_user, layer.get_self_resource())
anonymous_group = Group.objects.get(name='anonymous')
remove_perm('view_resourcebase', anonymous_group, layer.get_self_resource())
response = self.client.get(reverse('layer_detail', args=(layer.alternate,)))
self.assertTrue(response.status_code in (302, 403))
# 2. change_resourcebase
# 2.1 has not change_resourcebase: verify that anonymous user cannot
# access the layer replace page but redirected to login
response = self.client.get(reverse('layer_replace', args=(layer.alternate,)))
self.assertTrue(response.status_code in (302, 403))
# 3. delete_resourcebase
# 3.1 has not delete_resourcebase: verify that anonymous user cannot
# access the layer delete page but redirected to login
response = self.client.get(reverse('layer_remove', args=(layer.alternate,)))
self.assertTrue(response.status_code in (302, 403))
# 4. change_resourcebase_metadata
# 4.1 has not change_resourcebase_metadata: verify that anonymous user
# cannot access the layer metadata page but redirected to login
response = self.client.get(reverse('layer_metadata', args=(layer.alternate,)))
self.assertTrue(response.status_code in (302, 403))
# 5 N\A? 6 is an integration test...
# 7. change_layer_style
# 7.1 has not change_layer_style: verify that anonymous user cannot access
# the layer style page but redirected to login
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
# Only for geoserver backend
response = self.client.get(reverse('layer_style_manage', args=(layer.alternate,)))
self.assertTrue(response.status_code in (302, 403))
class GisBackendSignalsTests(ResourceTestCaseMixin, GeoNodeBaseTestSupport):
def setUp(self):
super(GisBackendSignalsTests, self).setUp()
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
settings.OGC_SERVER['default']['GEOFENCE_SECURITY_ENABLED'] = True
self.user = 'admin'
self.passwd = 'admin'
self.list_url = reverse(
'api_dispatch_list',
kwargs={
'api_name': 'api',
'resource_name': 'layers'})
self.bulk_perms_url = reverse('bulk_permissions')
all_public()
self.perm_spec = {
"users": {"admin": ["view_resourcebase"]}, "groups": {}}
def test_save_and_delete_signals(self):
"""Test that GeoServer Signals methods work as espected"""
layers = Layer.objects.all()[:2].values_list('id', flat=True)
test_perm_layer = Layer.objects.get(id=layers[0])
self.client.login(username='admin', password='admin')
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
from geonode.geoserver.signals import (geoserver_pre_delete,
geoserver_post_save,
geoserver_post_save_local)
# Handle Layer Save and Upload Signals
geoserver_post_save(test_perm_layer, sender=Layer, created=True)
geoserver_post_save_local(test_perm_layer)
# Check instance bbox and links
self.assertIsNotNone(test_perm_layer.bbox)
self.assertIsNotNone(test_perm_layer.srid)
self.assertIsNotNone(test_perm_layer.link_set)
self.assertEquals(len(test_perm_layer.link_set.all()), 18)
# Layer Manipulation
from geonode.geoserver.upload import geoserver_upload
from geonode.geoserver.signals import gs_catalog
from geonode.geoserver.helpers import (check_geoserver_is_up,
get_sld_for,
fixup_style,
set_layer_style,
get_store,
set_attributes_from_geoserver,
set_styles,
create_gs_thumbnail,
cleanup)
check_geoserver_is_up()
admin_user = get_user_model().objects.get(username="admin")
saved_layer = geoserver_upload(
test_perm_layer,
os.path.join(
gisdata.VECTOR_DATA,
"san_andres_y_providencia_poi.shp"),
admin_user,
test_perm_layer.name,
overwrite=True
)
self.assertIsNotNone(saved_layer)
_log(saved_layer)
workspace, name = test_perm_layer.alternate.split(':')
self.assertIsNotNone(workspace)
self.assertIsNotNone(name)
ws = gs_catalog.get_workspace(workspace)
self.assertIsNotNone(ws)
store = get_store(gs_catalog, name, workspace=ws)
_log("1. ------------ %s " % store)
self.assertIsNotNone(store)
# Save layer attributes
set_attributes_from_geoserver(test_perm_layer)
# Save layer styles
set_styles(test_perm_layer, gs_catalog)
# set SLD
sld = test_perm_layer.default_style.sld_body if test_perm_layer.default_style else None
if sld:
_log("2. ------------ %s " % sld)
set_layer_style(test_perm_layer, test_perm_layer.alternate, sld)
fixup_style(gs_catalog, test_perm_layer.alternate, None)
self.assertIsNone(get_sld_for(gs_catalog, test_perm_layer))
_log("3. ------------ %s " % get_sld_for(gs_catalog, test_perm_layer))
create_gs_thumbnail(test_perm_layer, overwrite=True)
self.assertIsNotNone(test_perm_layer.get_thumbnail_url())
self.assertTrue(test_perm_layer.has_thumbnail())
# Handle Layer Delete Signals
geoserver_pre_delete(test_perm_layer, sender=Layer)
# Check instance has been removed from GeoServer also
from geonode.geoserver.views import get_layer_capabilities
self.assertIsNone(get_layer_capabilities(test_perm_layer))
# Cleaning Up
test_perm_layer.delete()
cleanup(test_perm_layer.name, test_perm_layer.uuid)
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
class SecurityRulesTest(ResourceTestCaseMixin, GeoNodeBaseTestSupport):
"""
Test resources synchronization with Guardian and dirty states cleaning
"""
def setUp(self):
super(SecurityRulesTest, self).setUp()
# Layer upload
layer_upload_url = reverse('layer_upload')
self.client.login(username="admin", password="admin")
input_paths, suffixes = self._get_input_paths()
input_files = [open(fp, 'rb') for fp in input_paths]
files = dict(zip(['{}_file'.format(s) for s in suffixes], input_files))
files['base_file'] = files.pop('shp_file')
with contextlib.nested(*input_files):
files['permissions'] = '{}'
files['charset'] = 'utf-8'
files['layer_title'] = 'test layer'
resp = self.client.post(layer_upload_url, data=files)
# Check the response is OK
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
lname = data['url'].split(':')[-1]
self._l = Layer.objects.get(name=lname)
def _get_input_paths(self):
base_name = 'single_point'
suffixes = 'shp shx dbf prj'.split(' ')
base_path = gisdata.GOOD_DATA
paths = [os.path.join(base_path, 'vector', '{}.{}'.format(base_name, suffix)) for suffix in suffixes]
return paths, suffixes,
def test_sync_resources_with_guardian_delay_false(self):
with self.settings(DELAYED_SECURITY_SIGNALS=False):
# Set geofence (and so the dirty state)
set_geofence_all(self._l)
# Retrieve the same layer
dirty_layer = Layer.objects.get(pk=self._l.id)
# Check dirty state (True)
self.assertFalse(dirty_layer.dirty_state)
# Call sync resources
sync_resources_with_guardian()
clean_layer = Layer.objects.get(pk=self._l.id)
# Check dirty state
self.assertFalse(clean_layer.dirty_state)
def test_sync_resources_with_guardian_delay_true(self):
with self.settings(DELAYED_SECURITY_SIGNALS=True):
# Set geofence (and so the dirty state)
set_geofence_all(self._l)
# Retrieve the same layer
dirty_layer = Layer.objects.get(pk=self._l.id)
# Check dirty state (True)
self.assertTrue(dirty_layer.dirty_state)
# Call sync resources
sync_resources_with_guardian()
clean_layer = Layer.objects.get(pk=self._l.id)
# Check dirty state
self.assertFalse(clean_layer.dirty_state)
| ppasq/geonode | geonode/security/tests.py | Python | gpl-3.0 | 57,433 | [
"VisIt"
] | 2d24670fa908089867012730ad55b1d65e248b01b2f3886ab9a1abf6a42fefd0 |
'''
user interface for viewing genesis simulation results
'''
import sys
import os
import csv
import time
import matplotlib
# check if Xserver is connected
# havedisplay = "DISPLAY" in os.environ
# if not havedisplay:
# # re-check
# exitval = os.system('python -c "import matplotlib.pyplot as plt; plt.figure()"')
# havedisplay = (exitval == 0)
# if not havedisplay:
# # force matplotlib not ot use Xwindows backend. plots may still be plotted into e.g. *.png
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from numpy import *
from ocelot.adaptors.genesis import *
from ocelot.common.globals import * # import of constants like "h_eV_s" and
from ocelot.common.math_op import * # import of mathematical functions
from ocelot.utils.xfel_utils import *
# from pylab import rc, rcParams #tmp
from matplotlib import rc, rcParams
from mpl_toolkits.axes_grid1 import make_axes_locatable
fntsz = 4
params = {'backend': 'ps', 'axes.labelsize': 3 * fntsz, 'font.size': 3 * fntsz, 'legend.fontsize': 4 * fntsz, 'xtick.labelsize': 4 * fntsz, 'ytick.labelsize': 4 * fntsz, 'text.usetex': False}
rcParams.update(params)
# plt.rc('grid', color='0.75', linestyle='-', linewidth=0.5)
# rcParams["savefig.directory"] = os.chdir(os.path.dirname(__file__)) but __file__ appears to be genesis_plot
def plot_gen_out_all_paral(exp_dir, stage=1, savefig='png', debug=1):
print('start')
from ocelot.utils.xfel_utils import background
i = 0
dir = exp_dir + 'run_' + str(i) + '/'
while(os.path.exists(dir)):
print(i)
file = dir + 'run.' + str(i) + '.s'+str(stage)+'.gout'
if(file):
print('good',i)
background('''plot_gen_out_all("'''+file+'''", choice=(1,1,1,1,0,0,0,0,0,0,0),debug='''+str(debug)+''')''')
i += 1
dir = exp_dir + 'run_' + str(i) + '/'
print(dir)
return
# plot_gen_stat(proj_dir=exp_dir, run_inp=[], stage_inp=[], param_inp=[], s_param_inp=['p_int','energy','r_size_weighted'], z_param_inp=[], dfl_param_inp=[], s_inp=['max'], z_inp=[0,'end'], savefig=1, saveval=1, showfig=0, debug=0)
def plot_gen_out_all(handle=None, savefig='png', showfig=False, choice=(1, 1, 1, 1, 6.05, 1, 0, 0, 0, 0, 0, 1), vartype_dfl=complex128, debug=1):
'''
plots all possible output from the genesis output
handle is either:
genesis output object
path to genesis output file
path to folders with genesis output files
choice=(1,1,1,1,[],1,0,0,0,0,0)
0 1 2 3 4 5 6 7 8 9 10
0 - electron evolution
1 - radiation evolution
2 - profile at z=0m
3 - profile at the end
4 - profile every m meters
5 - dfl at the end, space -time domain
6 - inv.space-time domain
7 - space -frequency domain
8 - inv.space-frequency domain
9 - dpa as edist at the end, smeared
10 - dpa as edist at the end, not smeared
#picks as an input "GenesisOutput" object, file path of directory as strings.
#plots e-beam evolution, radiation evolution, initial and final simulation window
#If folder path is provided, all *.gout and *.out files are plotted
'''
if debug > 0:
print(' plotting genesis output')
plotting_time = time.time()
# plt.ioff()
if savefig == True:
savefig = 'png'
if choice == 'all':
choice = (1, 1, 1, 1, 6.05, 1, 1, 1, 1, 1, 1, 1)
elif choice == 'gen':
choice = (1, 1, 1, 1, 6.05, 0, 0, 0, 0, 0, 0, 0)
if len(choice) > 12:
choice = choice[:12]
elif len(choice) < 12:
choice += tuple((zeros(12 - len(choice)).astype(int)))
if os.path.isdir(str(handle)):
handles = []
for root, dirs, files in os.walk(handle):
for name in files:
if name.endswith('.gout') or name.endswith('.out'):
handles.append(os.path.join(root, name))
if debug > 0:
print('\n plotting all files in ' + str(handle))
else:
handles = [handle]
for handle in handles:
if os.path.isfile(str(handle)):
handle = read_out_file(handle, read_level=2, debug=debug)
if isinstance(handle, GenesisOutput):
if choice[0]:
f0 = plot_gen_out_e(handle, savefig=savefig, debug=debug)
if choice[1]:
f1 = plot_gen_out_ph(handle, savefig=savefig, debug=debug)
if choice[2]:
f2 = plot_gen_out_z(handle, z=0, savefig=savefig, debug=debug)
if choice[3]:
f3 = plot_gen_out_z(handle, z=inf, savefig=savefig, debug=debug)
if choice[4] != 0:
for z in arange(choice[4], max(handle.z), choice[4]):
plot_gen_out_z(handle, z=z, savefig=savefig, debug=debug)
if choice[11]:
W=wigner_out(handle)
plot_wigner(W, savefig=savefig, debug=debug)
if os.path.isfile(handle.filePath + '.dfl') and any(choice[5:8]):
dfl = read_dfl_file_out(handle, debug=debug)
if dfl.Nz()==0:
print('empty dfl, skipping')
else:
if choice[5]:
f5 = plot_dfl(dfl, savefig=savefig, debug=debug)
if choice[6]:
f6 = plot_dfl(dfl, far_field=1, freq_domain=0, auto_zoom=0, savefig=savefig, debug=debug)
if choice[7]:
f7 = plot_dfl(dfl, far_field=0, freq_domain=1, auto_zoom=0, savefig=savefig, debug=debug)
if choice[8]:
f8 = plot_dfl(dfl, far_field=1, freq_domain=1, auto_zoom=0, savefig=savefig, debug=debug)
if os.path.isfile(handle.filePath + '.dpa') and (choice[9] or choice[10]) and handle('itdp') == True:
dpa = read_dpa_file_out(handle, debug=debug)
if size(dpa.ph)==0:
print('empty dpa, skipping')
else:
if choice[9]:
edist = dpa2edist(handle, dpa, num_part=5e4, smear=1, debug=debug)
f9 = plot_edist(edist, figsize=3, fig_name=None, savefig=savefig, showfig=showfig, bins=100, debug=debug)
if choice[10]:
edist = dpa2edist(handle, dpa, num_part=5e4, smear=0, debug=debug)
f10 = plot_edist(edist, figsize=3, fig_name=None, savefig=savefig, showfig=showfig, bins=(100, 100, 300, 200), debug=debug)
if savefig != False:
if debug > 0:
print(' plots recorded to *.' + str(savefig) + ' files')
if showfig:
if debug > 0:
print(' showing plots, close all to proceed')
plt.show()
# else:
# plt.close('all')
print (' total plotting time %.2f seconds' % (time.time() - plotting_time))
def plot_gen_out_z(g, figsize=(10, 14), legend=True, fig_name=None, z=inf, savefig=False, showfig=False, debug=1):
number_ticks = 6
if showfig == False and savefig == False:
return
if g('itdp') == False:
print(' plotting bunch profile at ' + str(z) + ' [m]')
print('! not applicable for steady-state')
return
import matplotlib.ticker as ticker
if z == inf:
# print ('Showing profile parameters at the end of undulator')
z = np.amax(g.z)
elif z > np.amax(g.z):
# print ('Z parameter too large, setting to the undulator end')
z = np.amax(g.z)
elif z < np.amin(g.z):
# print ('Z parameter too small, setting to the undulator entrance')
z = np.amin(g.z)
zi = np.where(g.z >= z)[0][0]
z = g.z[zi]
if debug > 0:
print(' plotting bunch profile at ' + str(z) + ' [m]')
font_size = 1
if fig_name is None:
if g.fileName() is '':
fig = plt.figure('Bunch profile at ' + str(z) + 'm')
else:
fig = plt.figure('Bunch profile at ' + str(z) + 'm ' + g.fileName())
else:
fig = plt.figure(fig_name)
fig.set_size_inches(figsize, forward=True)
# plt.rc('axes', grid=True)
# left, width = 0.1, 0.85
plt.clf()
ax_curr = fig.add_subplot(4, 1, 1)
ax_curr.clear()
ax_energy = fig.add_subplot(4, 1, 2, sharex=ax_curr)
ax_energy.clear()
ax_phase = fig.add_subplot(4, 1, 3, sharex=ax_curr)
ax_phase.clear()
ax_spectrum = fig.add_subplot(4, 1, 4)
ax_spectrum.clear()
for ax in ax_curr, ax_phase, ax_spectrum, ax_energy:
if ax != ax_spectrum and ax != ax_phase:
for label in ax.get_xticklabels():
label.set_visible(False)
fig.subplots_adjust(hspace=0)
s = g.t * speed_of_light * 1.0e-15 * 1e6
ax_curr.plot(s, g.I / 1e3, 'k--')
ax_curr.set_ylabel(r'I [kA]')
ax_curr.set_ylim(ymin=0)
ax_curr.text(0.02, 0.98, "Q= %.2f pC" % (g.beam_charge * 1e12), fontsize=12, horizontalalignment='left', verticalalignment='top', transform=ax_curr.transAxes) # horizontalalignment='center', verticalalignment='center',
ax_curr.text(0.98, 0.98, "E= %.2e J" % (g.energy[zi]), fontsize=12, horizontalalignment='right', verticalalignment='top', transform=ax_curr.transAxes, color='green') # horizontalalignment='center', verticalalignment='center',
ax_curr.grid(True)
ax_power = ax_curr.twinx()
ax_power.grid(False)
ax_power.plot(s, g.p_int[:, zi], 'g-', linewidth=1.5)
ax_power.set_ylabel(r'Power [W]')
ax_power.set_ylim(ymin=0)
# if np.amax(g.p_int[:,zi])!=np.amin(g.p_int[:,zi]):
# ax_power.set_ylim([0, np.amax(g.p_int[:,zi])])
ax_power.get_yaxis().get_major_formatter().set_useOffset(False)
ax_power.get_yaxis().get_major_formatter().set_scientific(True)
ax_power.get_yaxis().get_major_formatter().set_powerlimits((-3, 4)) # [:,75,75]
# ax_power.get_xaxis().get_offset_text().set_x(1.1)
ax_energy.plot(s, g.el_energy[:, zi] * 0.511e-3, 'b-', s, (g.el_energy[:, zi] + g.el_e_spread[:, zi]) * 0.511e-3, 'r--', s, (g.el_energy[:, zi] - g.el_e_spread[:, zi]) * 0.511e-3, 'r--')
ax_energy.set_ylabel(r'$E\pm\sigma_E$ [GeV]')
# ax_energy.ticklabel_format(axis='y', style='sci', scilimits=(-3, 3), useOffset=False)
ax_energy.ticklabel_format(useOffset=False, style='plain')
ax_energy.grid(True)
ax_bunching = ax_energy.twinx()
ax_bunching.plot(s, g.bunching[:, zi], 'grey', linewidth=0.5)
ax_bunching.set_ylabel('Bunching')
ax_bunching.set_ylim(ymin=0)
ax_bunching.grid(False)
n_pad = 1
power = np.pad(g.p_mid, [(int(g.nSlices / 2) * n_pad, (g.nSlices - (int(g.nSlices / 2)))) * n_pad, (0, 0)], mode='constant')
phase = np.pad(g.phi_mid, [(int(g.nSlices / 2) * n_pad, (g.nSlices - (int(g.nSlices / 2)))) * n_pad, (0, 0)], mode='constant') # not supported by the numpy 1.6.2
spectrum = abs(np.fft.fft(np.sqrt(np.array(power)) * np.exp(1.j * np.array(phase)), axis=0))**2 / sqrt(g.nSlices) / (2 * g.leng / g('ncar'))**2 / 1e10
e_0 = 1239.8 / g('xlamds') / 1e9
g.freq_ev1 = h_eV_s * np.fft.fftfreq(len(spectrum), d=g('zsep') * g('xlamds') * g('ishsty') / speed_of_light) + e_0
lamdscale = 1239.8 / g.freq_ev1
lamdscale_array = np.swapaxes(np.tile(lamdscale, (g.nZ, 1)), 0, 1)
# for std calculation
# spectrum_lamdpos=np.sum(spectrum*lamdscale_array/np.sum(spectrum,axis=0),axis=0)
# spectrum_lamdwidth=sqrt(np.sum(spectrum*(lamdscale_array-spectrum_lamdpos)**2/np.sum(spectrum,axis=0),axis=0))
# ax_spectrum.plot(np.fft.fftshift(lamdscale), np.fft.fftshift(spectrum[:,zi]), 'r-')
ax_spectrum.plot(g.freq_lamd, g.spec[:, zi], 'r-')
ax_spectrum.text(0.98, 0.98, r'(on axis)', fontsize=10, horizontalalignment='right', verticalalignment='top', transform=ax_spectrum.transAxes) # horizontalalignment='center', verticalalignment='center',
ax_spectrum.set_ylabel(r'P($\lambda$) [a.u.]')
ax_spectrum.set_xlabel(r'$\lambda$ [nm]')
ax_spectrum.set_ylim(ymin=0)
ax_spectrum.get_yaxis().get_major_formatter().set_useOffset(False)
ax_spectrum.get_yaxis().get_major_formatter().set_scientific(True)
ax_spectrum.get_yaxis().get_major_formatter().set_powerlimits((-3, 4)) # [:,75,75]
ax_spectrum.grid(True)
if np.amin(lamdscale) != np.amax(lamdscale):
ax_spectrum.set_xlim([np.amin(lamdscale), np.amax(lamdscale)])
ax_phase.set_xlabel(r's [$\mu$m]')
maxspectrum_index = np.argmax(spectrum[:, zi])
maxspower_index = np.argmax(power[:, zi])
maxspectrum_wavelength = lamdscale[maxspectrum_index] * 1e-9
ax_spectrum.text(0.02, 0.98, r"$\lambda_{max}$= %.3f nm" % (maxspectrum_wavelength * 1e9), fontsize=12, horizontalalignment='left', verticalalignment='top', transform=ax_spectrum.transAxes, color='red') # horizontalalignment='center', verticalalignment='center',
phase = unwrap(g.phi_mid[:, zi])
phase_cor = np.arange(g.nSlices) * (maxspectrum_wavelength - g('xlamds')) / g('xlamds') * g('zsep') * 2 * pi
phase_fixed = phase + phase_cor
phase_fixed -= power[maxspower_index, zi]
n = 1
phase_fixed = (phase_fixed + n * pi) % (2 * n * pi) - n * pi
ax_phase.plot(s, phase_fixed, 'k-', linewidth=0.5)
ax_phase.text(0.98, 0.98, r'(on axis)', fontsize=10, horizontalalignment='right', verticalalignment='top', transform=ax_phase.transAxes) # horizontalalignment='center', verticalalignment='center',
ax_phase.set_ylabel(r'$\phi$ [rad]')
ax_phase.set_ylim([-pi, pi])
ax_phase.grid(True)
# ax_spectrum.yaxis.major.locator.set_params(nbins=number_ticks)
ax_phase.xaxis.major.locator.set_params(nbins=number_ticks)
ax_power.yaxis.major.locator.set_params(nbins=number_ticks)
ax_energy.yaxis.major.locator.set_params(nbins=number_ticks)
ax_spectrum.yaxis.major.locator.set_params(nbins=number_ticks)
ax_bunching.yaxis.major.locator.set_params(nbins=number_ticks)
ax_curr.yaxis.major.locator.set_params(nbins=number_ticks)
# ax_energy.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1e'))
plt.xlim(s[0], s[-1])
fig.subplots_adjust(top=0.95, bottom=0.2, right=0.85, left=0.15)
# fig.set_size_inches((8,8),forward=True)
pos1 = ax_spectrum.get_position() # get the original position
pos2 = [pos1.x0 + 0, pos1.y0 - 0.1, pos1.width / 1.0, pos1.height / 0.9]
ax_spectrum.set_position(pos2)
ax_spectrum.tick_params(axis='y', which='both', colors='r')
ax_spectrum.yaxis.label.set_color('r')
ax_energy.tick_params(axis='y', which='both', colors='b')
ax_energy.yaxis.label.set_color('b')
ax_bunching.tick_params(axis='y', which='both', colors='grey')
ax_bunching.yaxis.label.set_color('grey')
ax_power.tick_params(axis='y', which='both', colors='g')
ax_power.yaxis.label.set_color('g')
ax_power.yaxis.get_offset_text().set_color(ax_power.yaxis.label.get_color())
ax_spectrum.yaxis.get_offset_text().set_color(ax_spectrum.yaxis.label.get_color())
plt.draw()
if savefig != False:
if savefig == True:
savefig = 'png'
fig.savefig(g.filePath + '_z_' + str(z) + 'm.' + str(savefig), format=savefig)
if showfig:
plt.show()
else:
plt.close('all')
def plot_gen_out_e(g, legend=False, figsize=4, fig_name='Electrons', savefig=False, showfig=False, debug=1):
fig = plot_gen_out_evo(g, params=['und_quad', 'el_size', 'el_energy', 'el_bunching'], figsize=figsize, legend=legend, fig_name=fig_name, savefig=savefig, showfig=showfig, debug=debug)
def plot_gen_out_ph(g, legend=False, figsize=4, fig_name='Radiation', savefig=False, showfig=False, debug=1):
if g('itdp'):
fig = plot_gen_out_evo(g, params=['rad_pow_en', 'rad_spec', 'rad_size'], figsize=figsize, legend=legend, fig_name=fig_name, savefig=savefig, showfig=showfig, debug=debug)
else:
fig = plot_gen_out_evo(g, params=['rad_pow', 'rad_size'], figsize=figsize, legend=legend, fig_name=fig_name, savefig=savefig, showfig=showfig, debug=debug)
def plot_gen_out_evo(g, params=['und_quad', 'el_size', 'el_energy', 'el_bunching', 'rad_pow_en', 'rad_spec', 'rad_size', 'rad_spec_evo_n', 'rad_pow_evo_n'], figsize=4, legend=False, fig_name=None, savefig=False, showfig=False, debug=1):
'''
plots evolution of given parameters from genesis output with undulator length
'''
import matplotlib.ticker as ticker
if showfig == False and savefig == False:
return
params_str = str(params).replace("'", '').replace('[', '').replace(']', '').replace(' ', '').replace(',', '--')
if os.path.isfile(str(g)):
g = read_out_file(g, read_level=2)
# add check for output object
if fig_name is None:
if g.fileName() is '':
fig = plt.figure(params_str)
if debug > 0:
print(' plotting ' + params_str)
else:
fig = plt.figure(g.fileName() + '_' + params_str)
if debug > 0:
print(' plotting ' + g.fileName() + '_' + params_str)
else:
fig = plt.figure(fig_name)
if debug > 0:
print(' plotting ' + fig_name)
if size(figsize) == 1:
figsize = (3 * figsize, (len(params) + 0.5) * figsize)
fig.set_size_inches(figsize, forward=True)
# plt.rc('axes', grid=True)
# plt.rc('grid', color='0.75', linestyle='-', linewidth=0.5)
# left, width = 0.1, 0.85
plt.clf()
fig.subplots_adjust(hspace=0)
ax = []
is_tdp = g('itdp')
for index, param in enumerate(params):
if len(ax) == 0:
ax.append(fig.add_subplot(len(params), 1, index + 1))
else:
ax.append(fig.add_subplot(len(params), 1, index + 1, sharex=ax[0]))
# ax[-1]
if param == 'und_quad':
subfig_und_quad(ax[-1], g, legend)
elif param == 'und':
subfig_und(ax[-1], g, legend)
elif param == 'el_size':
subfig_el_size(ax[-1], g, legend)
elif param == 'el_energy':
subfig_el_energy(ax[-1], g, legend)
elif param == 'el_bunching':
subfig_el_bunching(ax[-1], g, legend)
elif param == 'rad_pow_en':
subfig_rad_pow_en(ax[-1], g, legend)
elif param == 'rad_pow':
subfig_rad_pow(ax[-1], g, legend)
elif param == 'rad_size':
subfig_rad_size(ax[-1], g, legend)
elif param == 'rad_spec':
if is_tdp:
subfig_rad_spec(ax[-1], g, legend)
elif param == 'rad_spec_evo_n':
if is_tdp:
subfig_rad_spec_evo(ax[-1], g, legend, norm=1)
elif param == 'rad_pow_evo_n':
if is_tdp:
subfig_rad_pow_evo(ax[-1], g, legend, norm=1)
elif param == 'rad_spec_evo':
if is_tdp:
subfig_rad_spec_evo(ax[-1], g, legend, norm=0)
elif param == 'rad_pow_evo':
if is_tdp:
subfig_rad_pow_evo(ax[-1], g, legend, norm=0)
else:
print('! wrong parameter ' + param)
ax[0].set_xlim(g.z[0], g.z[-1])
ax[-1].set_xlabel('z [m]')
fig.subplots_adjust(top=0.95, bottom=0.1, right=0.8, left=0.15)
for axi in ax[0:-1]:
for label in axi.get_xticklabels():
label.set_visible(False)
if savefig != False:
if savefig == True:
savefig = 'png'
if fig_name == 'Electrons':
fig.savefig(g.filePath + '_elec.' + str(savefig), format=savefig)
elif fig_name == 'Radiation':
fig.savefig(g.filePath + '_rad.' + str(savefig), format=savefig)
else:
fig.savefig(g.filePath + '_' + params_str + '.' + str(savefig), format=savefig)
plt.draw()
if showfig == True:
dir_lst = g.filePath.split(os.path.sep)
dir = os.path.sep.join(dir_lst[0:-1]) + os.path.sep
rcParams["savefig.directory"] = dir
plt.show()
else:
plt.close('all')
def subfig_und_quad(ax_und, g, legend):
number_ticks = 6
ax_und.plot(g.z, g.aw, 'b-', linewidth=1.5)
ax_und.set_ylabel('K (rms)')
ax_und.grid(True)
ax_quad = ax_und.twinx()
ax_quad.plot(g.z, g.qfld, 'r-', linewidth=1.5)
ax_quad.set_ylabel('Quad')
ax_quad.grid(False)
ax_und.yaxis.major.locator.set_params(nbins=number_ticks)
ax_quad.yaxis.major.locator.set_params(nbins=number_ticks)
if np.amax(g.aw) != 0:
aw_tmp = np.array(g.aw)[np.array(g.aw) != 0]
if np.amax(aw_tmp) != np.amin(aw_tmp):
diff = np.amax(aw_tmp) - np.amin(aw_tmp)
ax_und.set_ylim([np.amin(aw_tmp) - diff / 10, np.amax(aw_tmp) + diff / 10])
else:
ax_und.set_ylim([0, 1])
ax_und.tick_params(axis='y', which='both', colors='b')
ax_und.yaxis.label.set_color('b')
ax_quad.tick_params(axis='y', which='both', colors='r')
ax_quad.yaxis.label.set_color('r')
def subfig_und(ax_und, g, legend):
number_ticks = 6
ax_und.plot(g.z, g.aw, 'b-', linewidth=1.5)
ax_und.set_ylabel('K (rms)')
ax_und.grid(True)
ax_und.yaxis.major.locator.set_params(nbins=number_ticks)
if np.amax(g.aw) != 0:
aw_tmp = np.array(g.aw)[np.array(g.aw) != 0]
if np.amax(aw_tmp) != np.amin(aw_tmp):
diff = np.amax(aw_tmp) - np.amin(aw_tmp)
ax_und.set_ylim([np.amin(aw_tmp) - diff / 10, np.amax(aw_tmp) + diff / 10])
else:
ax_und.set_ylim([0, 1])
ax_und.tick_params(axis='y', which='both', colors='b')
ax_und.yaxis.label.set_color('b')
def subfig_el_size(ax_size_tpos, g, legend, which='both'):
number_ticks = 6
if which == 'both' or which == 'averaged':
ax_size_tpos.plot(g.z, np.average(g.xrms, axis=0, weights=g.I) * 1e6, 'g-', g.z, np.average(g.yrms, axis=0, weights=g.I) * 1e6, 'b-')
if which == 'both' or which == 'peak_curr':
idx_pk = np.where(g.I == np.amax(g.I))[0][0]
ax_size_tpos.plot(g.z, g.xrms[idx_pk, :] * 1e6, 'g--', g.z, g.yrms[idx_pk, :] * 1e6, 'b--')
ax_size_tpos.set_ylabel(r'$\sigma_{x,y}$ [$\mu$m]')
ax_size_tpos.set_ylim(ymin=0)
ax_size_tpos.yaxis.major.locator.set_params(nbins=number_ticks)
ax_size_tpos.grid(True)
def subfig_el_energy(ax_energy, g, legend):
number_ticks = 6
el_energy = g.el_energy * m_e_MeV
el_energy_av = int(mean(el_energy))
ax_energy.plot(g.z, np.average(el_energy - el_energy_av, axis=0), 'b-', linewidth=1.5)
ax_energy.set_ylabel('E + ' + str(el_energy_av) + '[MeV]')
ax_energy.ticklabel_format(axis='y', style='sci', scilimits=(-3, 3), useOffset=False)
ax_energy.grid(True)
ax_spread = ax_energy.twinx()
ax_spread.plot(g.z, np.average(g.el_e_spread * 0.511e-3 * 1000, weights=g.I, axis=0), 'm--', g.z, np.amax(g.el_e_spread * 0.511e-3 * 1000, axis=0), 'r--', linewidth=1.5)
ax_spread.set_ylabel(r'$\sigma_E$ [MeV]')
ax_spread.grid(False)
ax_spread.set_ylim(ymin=0)
ax_energy.yaxis.major.locator.set_params(nbins=number_ticks)
ax_spread.yaxis.major.locator.set_params(nbins=number_ticks)
ax_energy.tick_params(axis='y', which='both', colors='b')
ax_energy.yaxis.label.set_color('b')
ax_spread.tick_params(axis='y', which='both', colors='r')
ax_spread.yaxis.label.set_color('r')
def subfig_el_bunching(ax_bunching, g, legend):
number_ticks = 6
ax_bunching.plot(g.z, np.average(g.bunching, weights=g.I, axis=0), 'k-', g.z, np.amax(g.bunching, axis=0), 'grey', linewidth=1.5)
# ax_bunching.plot(g.z, np.amax(g.bunching, axis=0), 'grey',linewidth=1.5) #only max
ax_bunching.set_ylabel(r'Bunching')
ax_bunching.set_ylim(ymin=0)
# ax_bunching.set_ylim([0,0.8])
ax_bunching.yaxis.major.locator.set_params(nbins=number_ticks)
ax_bunching.grid(True)
def subfig_rad_pow_en(ax_rad_pow, g, legend, log=1):
ax_rad_pow.plot(g.z, np.amax(g.p_int, axis=0), 'g-', linewidth=1.5)
ax_rad_pow.set_ylabel(r'P [W]')
ax_rad_pow.get_yaxis().get_major_formatter().set_useOffset(False)
ax_rad_pow.get_yaxis().get_major_formatter().set_scientific(True)
if np.amax(g.p_int) > 0 and log:
ax_rad_pow.set_yscale('log')
ax_rad_en = ax_rad_pow.twinx()
ax_rad_en.plot(g.z, g.energy, 'k--', linewidth=1.5)
ax_rad_en.set_ylabel(r'E [J]')
ax_rad_en.get_yaxis().get_major_formatter().set_useOffset(False)
ax_rad_en.get_yaxis().get_major_formatter().set_scientific(True)
if np.amax(g.p_int) > 0 and log:
ax_rad_en.set_yscale('log')
ax_rad_pow.grid(True) # , which='minor')
# ax_rad_pow.grid(False, which="minor")
ax_rad_pow.tick_params(axis='y', which='both', colors='g')
ax_rad_pow.yaxis.label.set_color('g')
ax_rad_en.tick_params(axis='y', which='both', colors='k')
ax_rad_en.yaxis.label.set_color('k')
ax_rad_en.grid(False)
# ax_rad_en.grid(False, which='minor')
ax_rad_pow.yaxis.get_offset_text().set_color(ax_rad_pow.yaxis.label.get_color())
ax_rad_en.yaxis.get_offset_text().set_color(ax_rad_en.yaxis.label.get_color())
ax_rad_pow.text(0.98, 0.02, r'$P_{end}$= %.2e W ' '\n' r'$E_{end}$= %.2e J' % (np.amax(g.p_int[:, -1]), np.mean(g.p_int[:, -1], axis=0) * g('xlamds') * g('zsep') * g.nSlices / speed_of_light), fontsize=12, horizontalalignment='right', verticalalignment='bottom', transform=ax_rad_pow.transAxes)
def subfig_rad_pow(ax_rad_pow, g, legend, log=1):
ax_rad_pow.plot(g.z, np.amax(g.p_int, axis=0), 'g-', linewidth=1.5)
ax_rad_pow.set_ylabel('P [W]')
ax_rad_pow.get_yaxis().get_major_formatter().set_useOffset(False)
ax_rad_pow.get_yaxis().get_major_formatter().set_scientific(True)
if np.amax(g.p_int) > 0 and log:
ax_rad_pow.set_yscale('log')
ax_rad_pow.grid(False) # , which='minor')
ax_rad_pow.tick_params(axis='y', which='both', colors='g')
ax_rad_pow.yaxis.label.set_color('g')
ax_rad_pow.yaxis.get_offset_text().set_color(ax_rad_pow.yaxis.label.get_color())
# ax_rad_pow.set_ylim([1e5,1e11])
ax_rad_pow.text(0.98, 0.02, r'$P_{end}$= %.2e W' % (np.amax(g.p_int[:, -1])), fontsize=12, horizontalalignment='right', verticalalignment='bottom', transform=ax_rad_pow.transAxes)
def subfig_rad_spec(ax_spectrum, g, legend, log=1):
ax_spectrum.plot(g.z, np.amax(g.spec, axis=0), 'r-', linewidth=1.5)
ax_spectrum.text(0.5, 0.98, r"(on axis)", fontsize=10, horizontalalignment='center', verticalalignment='top', transform=ax_spectrum.transAxes) # horizontalalignment='center', verticalalignment='center',
ax_spectrum.set_ylabel(r'P$(\lambda)_{max}$ [a.u.]')
# if np.amin(np.amax(spectrum,axis=0))>0:
if np.amax(np.amax(g.spec, axis=0)) > 0 and log:
ax_spectrum.set_yscale('log')
ax_spectrum.grid(True)
spectrum_lamdwidth_fwhm = np.zeros_like(g.z)
spectrum_lamdwidth_std = np.zeros_like(g.z)
for zz in range(g.nZ):
# if np.sum(g.spec[:,zz])!=0:
#tmp
try:
peak = fwhm3(g.spec[:, zz])
spectrum_lamdwidth_fwhm[zz] = abs(g.freq_lamd[0] - g.freq_lamd[1]) * peak[1] / g.freq_lamd[peak[0]] # the FWHM of spectral line (error when paekpos is at the edge of lamdscale)
except:
spectrum_lamdwidth_fwhm[zz] = 0
try:
spectrum_lamdwidth_std[zz] = std_moment(g.freq_lamd, g.spec[:, zz]) / n_moment(g.freq_lamd, g.spec[:, zz], 0, 1)
except:
spectrum_lamdwidth_std[zz] = 0
ax_spec_bandw = ax_spectrum.twinx()
ax_spec_bandw.plot(g.z, spectrum_lamdwidth_fwhm * 100, 'm:', label="fwhm")
ax_spec_bandw.plot(g.z, spectrum_lamdwidth_std * 100, 'm--', label="std")
ax_spec_bandw.grid(False)
if legend:
ax_spec_bandw.legend()
# ax_spec_bandw.set_ylabel('$2\sigma\lambda$ [nm]')
ax_spec_bandw.set_ylabel(r'$\Delta\lambda/\lambda, \%$')
def subfig_rad_size(ax_size_t, g, legend):
if g.nSlices == 1:
ax_size_t.plot(g.z, g.r_size.T * 2 * 1e6, 'b-', linewidth=1.5)
ax_size_t.plot([np.amin(g.z), np.amax(g.z)], [g.leng * 1e6, g.leng * 1e6], 'b-', linewidth=1.0)
ax_size_t.set_ylabel('transverse $[\mu m]$')
else:
if hasattr(g, r'rad_t_size_weighted'):
ax_size_t.plot(g.z, g.rad_t_size_weighted, 'b-', linewidth=1.5)
else:
if np.amax(g.p_int) > 0:
weight = g.p_int + np.amin(g.p_int[g.p_int != 0]) / 1e6
else:
weight = np.ones_like(g.p_int)
ax_size_t.plot(g.z, np.average(g.r_size * 2 * 1e6, weights=weight, axis=0), 'b-', linewidth=1.5)
ax_size_t.set_ylim(ymin=0)
ax_size_t.set_ylabel(r'size$_{transv}$ [$\mu$m]')
ax_size_t.grid(True)
if g.nSlices > 1:
ax_size_s = ax_size_t.twinx()
size_long_fwhm = np.zeros_like(g.z)
size_long_std = np.zeros_like(g.z)
s = g.t * speed_of_light * 1.0e-15 * 1e6
delta_s = (s[1] - s[0])
for zz in range(g.nZ):
# if np.sum(g.spec[:,zz])!=0:
try:
peak = fwhm3(g.p_int[:, zz])
size_long_fwhm[zz] = abs(delta_s) * peak[1] # the FWHM of spectral line (error when paekpos is at the edge of lamdscale)
except:
size_long_fwhm[zz] = 0
try:
size_long_std[zz] = std_moment(s, g.p_int[:, zz])
except:
size_long_std[zz] = 0
ax_size_s.plot(g.z, size_long_fwhm, color='navy', linestyle=':', linewidth=1.0, label="fwhm")
ax_size_s.plot(g.z, size_long_std, color='navy', linestyle='--', linewidth=1.0, label="std")
ax_size_s.set_ylim(ymin=0)
ax_size_s.set_ylabel(r'size$_{long}$ [$\mu$m]')
ax_size_s.grid(False)
if legend:
ax_size_s.legend()
# plt.legend('fwhm','std')
def subfig_rad_pow_evo(ax_power_evo, g, legend, norm=1):
if g.nSlices > 1:
z = g.z
s = g.s
power = g.p_int
if norm == 1:
max_power = np.max(power, 0)[np.newaxis, :]
max_power[max_power == 0] = 1 # avoid division by zero
power = power / max_power
# power[isnan(power)]=0
ax_power_evo.pcolormesh(z, s * 1e6, power)
ax_power_evo.set_xlabel('z [m]')
ax_power_evo.set_ylabel('s [$\mu$m]')
ax_power_evo.axis('tight')
ax_power_evo.grid(True)
else:
pass
def subfig_rad_spec_evo(ax_spectrum_evo, g, legend, norm=1):
if g.nSlices > 1:
z = g.z
l = g.freq_lamd
spectrum = g.spec
if norm == 1:
max_spectrum = np.max(spectrum, 0)[np.newaxis, :]
max_spectrum[max_spectrum == 0] = 1 # avoid division by zero
spectrum = spectrum / max_spectrum
# spectrum[isnan(spectrum)]=0
ax_spectrum_evo.pcolormesh(z, l, spectrum)
ax_spectrum_evo.set_xlabel('z [m]')
ax_spectrum_evo.set_ylabel('$\lambda$ [nm]')
ax_spectrum_evo.axis('tight')
ax_spectrum_evo.grid(True)
else:
pass
def plot_gen_out_scanned_z(g, figsize=(10, 14), legend=True, fig_name=None, z=inf, savefig=False):
if g('itdp') == True:
print(' plotting scan at ' + str(z) + ' [m]')
print('! Not implemented yet for time dependent, skipping')
return
if g('iscan') == 0 and g('scan') == 0:
print(' plotting scan at ' + str(z) + ' [m]')
print('! Not a scan, skipping')
return
import matplotlib.ticker as ticker
if z == inf:
# print 'Showing profile parameters at the end of undulator'
z = np.amax(g.z)
elif z > np.amax(g.z):
# print 'Z parameter too large, setting to the undulator end'
z = np.amax(g.z)
elif z < np.amin(g.z):
# print 'Z parameter too small, setting to the undulator entrance'
z = np.amin(g.z)
zi = np.where(g.z >= z)[0][0]
z = g.z[zi]
print(' plotting scan at ' + str(z) + ' [m]')
font_size = 1
if fig_name is None:
if g.fileName() is '':
fig = plt.figure('Genesis scan at ' + str(z) + 'm')
else:
fig = plt.figure('Genesis scan at ' + str(z) + 'm ' + g.fileName())
else:
fig = plt.figure(fig_name)
fig.set_size_inches(figsize, forward=True)
plt.rc('axes', grid=True)
plt.rc('grid', color='0.75', linestyle='-', linewidth=0.5)
# left, width = 0.1, 0.85
plt.clf()
ax_curr = fig.add_subplot(2, 1, 1)
ax_curr.clear()
ax_energy = fig.add_subplot(2, 1, 2, sharex=ax_curr)
ax_energy.clear()
# ax_phase=fig.add_subplot(4, 1, 3,sharex=ax_curr)
# ax_phase.clear()
# ax_spectrum=fig.add_subplot(4, 1, 4)
# ax_spectrum.clear()
for ax in [ax_curr]: # , ax_energy: #ax_phase, ax_spectrum,
for label in ax.get_xticklabels():
label.set_visible(False)
fig.subplots_adjust(hspace=0)
s = g.scv # scan value is written to current colunm
ax_curr.plot(s, np.linspace(g('curpeak'), g('curpeak'), len(s)), 'k--')
ax_curr.set_ylabel(r'I[kA]')
ax_power = ax_curr.twinx()
ax_power.grid(False)
ax_power.plot(s, g.p_int[:, zi], 'g-', linewidth=1.5)
ax_power.set_ylabel(r'Power [W]')
ax_power.set_ylim([0, np.amax(g.p_int[:, zi])])
ax_power.get_yaxis().get_major_formatter().set_useOffset(False)
ax_power.get_yaxis().get_major_formatter().set_scientific(True)
ax_power.get_yaxis().get_major_formatter().set_powerlimits((-3, 4)) # [:,75,75]
# ax_power.get_xaxis().get_offset_text().set_x(1.1)
ax_energy.plot(s, g.el_energy[:, zi] * 0.511e-3, 'b-', s, (g.el_energy[:, zi] + g.el_e_spread[:, zi]) * 0.511e-3, 'r--', s, (g.el_energy[:, zi] - g.el_e_spread[:, zi]) * 0.511e-3, 'r--')
ax_energy.set_ylabel(r'$E\pm\sigma_E$\n[GeV]')
# ax_energy.ticklabel_format(axis='y', style='sci', scilimits=(-3, 3), useOffset=False)
ax_energy.ticklabel_format(useOffset=False, style='plain')
ax_energy.get_xaxis().get_major_formatter().set_useOffset(False)
ax_energy.get_xaxis().get_major_formatter().set_scientific(True)
ax_bunching = ax_energy.twinx()
ax_bunching.plot(s, g.bunching[:, zi], 'grey', linewidth=0.5)
ax_bunching.set_ylabel('Bunching')
ax_bunching.grid(False)
# ax_power.yaxis.major.locator.set_params(nbins=number_ticks)
# ax_energy.yaxis.major.locator.set_params(nbins=number_ticks)
# ax_energy.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1e'))
plt.xlim(s[0], s[-1])
fig.subplots_adjust(top=0.95, bottom=0.2, right=0.85, left=0.15)
# fig.set_size_inches((8,8),forward=True)
ax_energy.tick_params(axis='y', which='both', colors='b')
ax_energy.yaxis.label.set_color('b')
ax_bunching.tick_params(axis='y', which='both', colors='grey')
ax_bunching.yaxis.label.set_color('grey')
ax_power.tick_params(axis='y', which='both', colors='g')
ax_power.yaxis.label.set_color('g')
ax_power.yaxis.get_offset_text().set_color(ax_power.yaxis.label.get_color())
plt.draw()
if savefig != False:
if savefig == True:
savefig = 'png'
fig.savefig(g.filePath + '_z_' + str(z) + 'm_scan.' + str(savefig), format=savefig)
return fig
def plot_dfl(F, z_lim=[], xy_lim=[], figsize=4, legend=True, phase=False, far_field=False, freq_domain=False, fig_name=None, auto_zoom=False, column_3d=True, savefig=False, showfig=False, return_proj=False, log_scale=0, debug=1, vartype_dfl=complex64):
'''
Plots dfl radiation object in 3d.
F is RadiationField() object
z_lim sets the boundaries to CUT the dfl object in z to ranges of e.g. [2,5] um or nm depending on freq_domain=False of True
xy_lim sets the boundaries to SCALE the dfl object in x and y to ranges of e.g. [2,5] um or urad depending on far_field=False of True
figsize rescales the size of the figure
legend not used yet
phase can replace Z projection or spectrum with phase front distribution
far_field and freq_domain carry out FFT along xy and z dimentions correspondingly
fig_name is the desired name of the output figure
auto_zoom automatically scales xyz the images to the (1%?) of the intensity limits
column_3d plots top and side views of the radiation distribution
savefig and showfig allow to save figure to image (savefig='png' (default) or savefig='eps', etc...) or to display it (slower)
return_proj returns [xy_proj,yz_proj,xz_proj,x,y,z] array.
vartype_dfl is the data type to store dfl in memory [either complex128 (two 64-bit floats) or complex64 (two 32-bit floats)], may save memory
'''
import matplotlib.colors as colors
if showfig == False and savefig == False:
return
from ocelot.utils.xfel_utils import dfl_fft_xy, dfl_fft_z
filePath = F.filePath
text_present = 1
if debug > 0:
print(' plotting radiation field')
start_time = time.time()
suffix = ''
if F.Nz() != 1:
# Make sure it is time-dependent
ncar_z = F.Nz()
leng_z = F.Lz()
z = np.linspace(0, leng_z, ncar_z)
else:
column_3d = False
phase = True
freq_domain = False
z_lim = []
xlamds = F.xlamds
# number of mesh points
ncar_x = F.Nx()
leng_x = F.Lx() # transverse size of mesh [m]
ncar_y = F.Ny()
leng_y = F.Ly()
E_pulse = F.E()
if F.Nz() != 1:
if freq_domain:
if F.domain_z == 't':
F = dfl_fft_z(F, debug=debug)
z = F.scale_z() * 1e9
F.fld = F.fld[::-1, :, :]
z = z[::-1]
unit_z = r'nm'
z_label = r'$\lambda$ [' + unit_z + ']'
z_labelv = r'[arb. units]'
z_title = 'Spectrum'
z_color = 'red'
suffix += '_fd'
else:
if F.domain_z == 'f':
F = dfl_fft_z(F, debug=debug)
z = F.scale_z() * 1e6
unit_z = r'$\mu$m'
z_label = '$s$ [' + unit_z + ']'
z_labelv = r'Power [W]'
z_title = 'Z projection'
z_color = 'blue'
else:
z = 0
if z_lim != []:
if len(z_lim) == 1:
z_lim = [z_lim, z_lim]
if z_lim[0] > z_lim[1]:
z_lim[0] = -inf
z_lim[1] = inf
if z_lim[1] < np.amin(z) or z_lim[1] > np.amax(z):
z_lim[1] = np.amax(z)
# print(' set top lim to max')
if z_lim[0] > np.amax(z) or z_lim[0] < np.amin(z):
z_lim[0] = np.amin(z)
# print(' set low lim to min')
if debug > 1:
print(' setting z-axis limits to ' + str(np.amin(z)) + ':' + str(z_lim[0]) + '-' + str(z_lim[1]) + ':' + str(np.amax(z))) # tmp
z_lim_1 = np.where(z <= z_lim[0])[0][-1]
z_lim_2 = np.where(z >= z_lim[1])[0][0]
if z_lim_1 == z_lim_2 and z_lim_1 == 0:
z_lim_2 = z_lim_1 + 1
elif z_lim_1 == z_lim_2 and z_lim_1 != 0:
z_lim_1 = z_lim_2 - 1
F.fld = F.fld[z_lim_1:z_lim_2, :, :]
z = z[z_lim_1:z_lim_2]
ncar_z = dfl.shape[0]
suffix += '_zoom_%.2f-%.2f' % (np.amin(z), np.amax(z))
if far_field:
if F.domain_xy == 's':
F = dfl_fft_xy(F, debug=debug)
x = F.scale_x() * 1e6
y = F.scale_y() * 1e6
unit_xy = r'$\mu$rad'
x_label = r'$\theta_x$ [' + unit_xy + ']'
y_label = r'$\theta_y$ [' + unit_xy + ']'
suffix += '_ff'
x_title = 'X divergence'
y_title = 'Y divergence'
xy_title = 'Far field intensity'
x_y_color = 'green'
# if debug>1: print(' done in %.2f seconds' %(time.time()-calc_time))
else:
if F.domain_xy == 'k':
F = dfl_fft_xy(F, debug=debug)
x = F.scale_x() * 1e6
y = F.scale_y() * 1e6
unit_xy = r'$\mu$m'
x_label = 'x [' + unit_xy + ']'
y_label = 'y [' + unit_xy + ']'
x_title = 'X projection'
y_title = 'Y projection'
xy_title = 'Intensity'
x_y_color = 'blue'
if log_scale:
suffix += '_log'
F.fld = F.fld.astype(np.complex64)
xy_proj = F.int_xy()
xy_proj_ph = np.zeros_like(xy_proj) # tmp
yz_proj = F.int_zy()
xz_proj = F.int_zx()
z_proj = F.int_z()
dx = abs(x[1] - x[0])
dy = abs(y[1] - y[0])
if fig_name is None:
if F.fileName() is '':
fig = plt.figure('Radiation distribution' + suffix)
else:
fig = plt.figure('Radiation distribution' + suffix + ' ' + F.fileName())
else:
fig = plt.figure(fig_name + suffix)
del F
fig.clf()
fig.set_size_inches(((3 + 2 * column_3d) * figsize, 3 * figsize), forward=True)
cmap_int = plt.get_cmap('jet') # jet inferno viridis #change to convenient
cmap_ph = plt.get_cmap('hsv')
x_line = xy_proj[:, int((ncar_y - 1) / 2)]
y_line = xy_proj[int((ncar_x - 1) / 2), :]
if max(x_line) != 0 and max(y_line) != 0:
x_line, y_line = x_line / max(x_line), y_line / max(y_line)
ax_int = fig.add_subplot(2, 2 + column_3d, 1)
if log_scale:
intplt = ax_int.pcolormesh(x, y, swapaxes(xy_proj, 1, 0), norm=colors.LogNorm(vmin=xy_proj.min(), vmax=xy_proj.max()), cmap=cmap_int)
else:
intplt = ax_int.pcolormesh(x, y, swapaxes(xy_proj, 1, 0), cmap=cmap_int)
ax_int.set_title(xy_title, fontsize=15)
ax_int.set_xlabel(r'' + x_label)
ax_int.set_ylabel(y_label)
if len(z) > 1 and text_present:
ax_int.text(0.01, 0.01, r'$E_{p}$=%.2e J' % (E_pulse), horizontalalignment='left', verticalalignment='bottom', fontsize=12, color='white', transform=ax_int.transAxes)
if phase == True:
ax_ph = fig.add_subplot(2, 2 + column_3d, 4 + column_3d, sharex=ax_int, sharey=ax_int)
ax_ph.pcolormesh(x, y, swapaxes(xy_proj_ph, 1, 0), cmap=cmap_ph)
ax_ph.axis([min(x), max(x), min(y), max(y)])
ax_ph.set_title('Phase', fontsize=15)
else:
ax_z = fig.add_subplot(2, 2 + column_3d, 4 + column_3d)
if log_scale:
ax_z.semilogy(z, z_proj, linewidth=1.5, color=z_color)
else:
ax_z.plot(z, z_proj, linewidth=1.5, color=z_color)
ax_z.set_title(z_title, fontsize=15)
ax_z.set_xlabel(z_label)
ax_z.set_ylabel(z_labelv)
ax_z.set_ylim(ymin=0)
ax_proj_x = fig.add_subplot(2, 2 + column_3d, 3 + column_3d, sharex=ax_int)
ax_proj_x.set_title(x_title, fontsize=15)
if sum(x_line) != 0:
x_line_f, rms_x = gauss_fit(x, x_line) # fit with Gaussian, and return fitted function and rms
fwhm_x = fwhm3(x_line)[1] * dx # measure FWHM
else:
x_line_f = np.zeros_like(x_line)
rms_x = 0
fwhm_x = 0
if log_scale:
ax_proj_x.semilogy(x, x_line, linewidth=2, color=x_y_color)
ax_proj_x.semilogy(x, x_line_f, color='grey')
else:
ax_proj_x.plot(x, x_line, linewidth=2, color=x_y_color)
ax_proj_x.plot(x, x_line_f, color='grey')
if text_present:
try:
ax_proj_x.text(0.95, 0.95, 'fwhm= \n' + str(round_sig(fwhm_x, 3)) + r' [' + unit_xy + ']\nrms= \n' + str(round_sig(rms_x, 3)) + r' [' + unit_xy + ']', horizontalalignment='right', verticalalignment='top', transform=ax_proj_x.transAxes, fontsize=12)
except:
pass
ax_proj_x.set_ylim(ymin=0, ymax=1)
ax_proj_x.set_xlabel(x_label)
ax_proj_y = fig.add_subplot(2, 2 + column_3d, 2, sharey=ax_int)
ax_proj_y.set_title(y_title, fontsize=15)
if sum(y_line) != 0:
y_line_f, rms_y = gauss_fit(y, y_line) # fit with Gaussian, and return fitted function and rms
fwhm_y = fwhm3(y_line)[1] * dy # measure FWHM
else:
y_line_f = np.zeros_like(y_line)
rms_y = 0
fwhm_y = 0
if log_scale:
ax_proj_y.semilogx(y_line, y, linewidth=2, color=x_y_color)
ax_proj_y.semilogx(y_line_f, y, color='grey')
else:
ax_proj_y.plot(y_line, y, linewidth=2, color=x_y_color)
ax_proj_y.plot(y_line_f, y, color='grey')
if text_present:
try:
ax_proj_y.text(0.95, 0.95, 'fwhm= ' + str(round_sig(fwhm_y, 3)) + r' [' + unit_xy + ']\nrms= ' + str(round_sig(rms_y, 3)) + r' [' + unit_xy + ']', horizontalalignment='right', verticalalignment='top', transform=ax_proj_y.transAxes, fontsize=12)
except:
pass
ax_proj_y.set_xlim(xmin=0, xmax=1)
ax_proj_y.set_ylabel(y_label)
# if log_scale:
# ax_proj_x.set_yscale('log')
# ax_proj_y.set_xscale('log')
# if not phase:
# ax_z.set_yscale('log')
if column_3d:
min_xz_proj=xz_proj[xz_proj!=0].min()
min_yz_proj=yz_proj[yz_proj!=0].min()
if phase == True:
ax_proj_xz = fig.add_subplot(2, 2 + column_3d, 6)
else:
ax_proj_xz = fig.add_subplot(2, 2 + column_3d, 6, sharex=ax_z)
if log_scale:
ax_proj_xz.pcolormesh(z, x, swapaxes(xz_proj, 1, 0), norm=colors.LogNorm(vmin=min_xz_proj, vmax=xz_proj.max()), cmap=cmap_int)
else:
ax_proj_xz.pcolormesh(z, x, swapaxes(xz_proj, 1, 0), cmap=cmap_int)
ax_proj_xz.set_title('Top view', fontsize=15)
ax_proj_xz.set_xlabel(z_label)
ax_proj_xz.set_ylabel(x_label)
ax_proj_yz = fig.add_subplot(2, 2 + column_3d, 3, sharey=ax_int, sharex=ax_proj_xz)
if log_scale:
ax_proj_yz.pcolormesh(z, y, swapaxes(yz_proj, 1, 0), norm=colors.LogNorm(vmin=min_yz_proj, vmax=yz_proj.max()), cmap=cmap_int)
else:
ax_proj_yz.pcolormesh(z, y, swapaxes(yz_proj, 1, 0), cmap=cmap_int)
ax_proj_yz.set_title('Side view', fontsize=15)
ax_proj_yz.set_xlabel(z_label)
ax_proj_yz.set_ylabel(y_label)
cbar = 0
if cbar:
fig.subplots_adjust(top=0.95, bottom=0.05, right=0.85, left=0.1)
cbar_int = fig.add_axes([0.89, 0.15, 0.015, 0.7])
cbar = plt.colorbar(intplt, cax=cbar_int) # pad = -0.05 ,fraction=0.01)
# cbar.set_label(r'[$ph/cm^2$]',size=10)
cbar.set_label(r'a.u.', size=10)
if auto_zoom != False:
size_x = max(abs(x[nonzero(x_line > 0.005)][[0, -1]]))
size_y = max(abs(x[nonzero(x_line > 0.005)][[0, -1]]))
size_xy = max(size_x, size_y)
if phase == True and column_3d == True and z_lim == []:
ax_proj_xz.set_xlim(z[nonzero(z_proj > max(z_proj) * 0.01)][[0, -1]])
elif phase == False and z_lim == []:
ax_z.set_xlim(z[nonzero(z_proj > max(z_proj) * 0.01)][[0, -1]])
print (' scaling xy to', size_xy)
ax_proj_xz.set_ylim([-size_xy, size_xy])
elif column_3d == True:
ax_proj_xz.set_ylim([-size_xy, size_xy])
ax_int.axis('equal')
ax_int.axis([-size_xy, size_xy, -size_xy, size_xy])
suffix += '_zmd'
else:
if column_3d == True:
ax_proj_xz.axis('tight')
ax_proj_yz.axis('tight')
elif column_3d == False and phase == False:
ax_z.axis('tight')
ax_int.set_aspect('equal')
ax_int.autoscale(tight=True)
if len(xy_lim) == 2:
ax_int.axis([-xy_lim[0], xy_lim[0], -xy_lim[1], xy_lim[1]])
ax_proj_xz.set_ylim([-xy_lim[0], xy_lim[0]])
elif len(xy_lim) == 1:
ax_int.axis([-xy_lim[0], xy_lim[0], -xy_lim[0], xy_lim[0]])
ax_proj_xz.set_ylim([-xy_lim[0], xy_lim[0]])
fig.subplots_adjust(wspace=0.4, hspace=0.4)
plt.draw()
if savefig != False:
if savefig == True:
savefig = 'png'
if debug > 0:
print(' saving *' + suffix + '.' + savefig)
fig.savefig(filePath + suffix + '.' + str(savefig), format=savefig)
if debug > 0:
print(' done in %.2f seconds' % (time.time() - start_time))
plt.draw()
if showfig == True:
if debug > 0:
print(' showing dfl')
plt.show()
else:
plt.close(fig)
if return_proj:
return [xy_proj, yz_proj, xz_proj, x, y, z]
else:
return
def plot_gen_stat(proj_dir, run_inp=[], stage_inp=[], param_inp=[], s_param_inp=['p_int', 'energy', 'r_size_weighted', 'spec', 'error'], z_param_inp=['p_int', 'phi_mid_disp', 'spec', 'bunching', 'wigner'], dfl_param_inp=['dfl_spec'], run_param_inp=['p_int', 'spec', 'energy'], s_inp=['max'], z_inp=[0,'end'], run_s_inp=['max'], run_z_inp=['end'], savefig=1, saveval=1, showfig=0, debug=1):
'''
The routine for plotting the statistical info of many GENESIS runs
proj_dir is the directory path in which \run_xxx folders are located.
run_inp=[1,2,3] number of runs to be processed, default - all possible up to run 1000
stage_inp=[1,2,3] stages to be processed, default - all possible up to stage 15
s_param_inp=['p_int','energy'] parameters to be displayed at certain position along the beam as a function of undulator length
z_param_inp=['p_int','phi_mid_disp','spec','bunching'] parameters to be displayed at certain position along the undulator length as a function of location along the beam.
s_inp=[1e-6,'max','mean'] positions at s to be plotted as function of z, max value of s as a function of z, mean value of s as a function of z
z_inp=[12,'end'] position of z at which radiation and spectrum parameters are plotted
savefig=1 save figures to given file format into proj_dir/results folder. 1 corresponds to 'png'. accepts other values, such as 'eps'
saveval=1, saves values being plotted to text files with the same names as the figures. first column - argument value (s[um],z[m],or lamd[nm]), second column - averaged parameters over shots, rest columns - single shot values.
showfig=1 envokes plt.show() to display figures interactively. May be time- and processor-consuming
dfl_power, dfl_spec, dfl_size, dfl_divergence
'''
import copy
rc('text', usetex=False)
dict_name = {'p_int': 'radiation power', 'energy': 'radiation pulse energy', 'el_e_spread': 'el.beam energy spread', 'el_energy': 'el.beam energy average', 'bunching': 'el.beam bunching', 'spec': 'radiation on-axis spectral density', 'dfl_spec': 'total radiation spectral density', 'r_size': 'radiation transv size', 'r_size_weighted': 'radiation transv size (weighted)', 'xrms': 'el.beam x size', 'yrms': 'el.beam y size', 'error': 'genesis simulation error', 'p_mid': 'radiation power on-axis', 'phi_mid': 'radiation phase on-axis', 'increment': 'radiation power increment'}
dict_unit = {'p_int': '[W]', 'energy': '[J]', 'el_e_spread': '(gamma)', 'el_energy': '(gamma)', 'bunching': '', 'spec': '[arb.units]', 'dfl_spec': '[arb.units]', 'r_size': '[m]', 'xrms': '[m]', 'yrms': '[m]', 'error': ''}
figsize = (14, 7)
if debug > 0:
print ('statistical postprocessing started')
start_time = time.time()
if proj_dir[-1] != '/':
proj_dir += '/'
if stage_inp == []:
stage_range = range(15) # guess possible stages (0 to 100)
else:
stage_range = stage_inp
for stage in stage_range: # scan through stages
outlist = [GenesisOutput() for i in range(1000)]
if run_inp == []:
run_range = range(1000)
else:
run_range = run_inp
run_range_good = []
for irun in run_range:
out_file = proj_dir + 'run_' + str(irun) + '/run.' + str(irun) + '.s' + str(stage) + '.gout'
if os.path.isfile(out_file):
# try:
outlist[irun] = read_out_file(out_file, read_level=2, debug=1)
run_range_good.append(irun)
# except:
# print(' could not read '+out_file)
run_range = run_range_good
# if len(run_range)!=0 and debug>0:
# print('stage = ', stage)
# check if all gout have the same number of slices nSlice and history records nZ
for irun in run_range[1:]:
if outlist[irun].nSlices != outlist[run_range[0]].nSlices or outlist[irun].nZ != outlist[run_range[0]].nZ:
raise ValueError('Non-uniform out objects')
if run_range == [] or len(run_range) == 1:
continue
if debug > 0:
print(' processing runs ' + str(run_range) + ' of stage ' + str(stage))
# for irun in run_range:
# out_file=proj_dir+'run_'+str(irun)+'/run.'+str(irun)+'.s'+str(stage)+'.gout'
# outlist[irun] = read_out_file(out_file,read_level=1)
# print(outlist[irun].sliceKeys)
# if param_inp==[]:
# if debug>1: print(outlist[run_range[0]].sliceKeys_used)
# param_range=outlist[run_range[0]].sliceKeys_used
# else:
param_range = param_inp
if savefig != False or saveval != False:
if savefig == True:
savefig = 'png'
saving_path = proj_dir + 'results/'
if not os.path.isdir(saving_path):
os.makedirs(saving_path)
if debug > 1:
print(' saving to ' + saving_path)
# if s_param_inp==[]:
# s_param_range=param_range
# else:
s_param_range = s_param_inp
if debug > 0:
print(' processing S parameters ' + str(s_param_range))
if debug > 1:
print(' s_inp ' + str(s_inp))
for param in s_param_range:
for s_ind in s_inp:
s_value = []
s_fig_name = 'stage_' + str(stage) + '__Z__' + dict_name.get(param, param).replace(' ', '_').replace('.', '_') + '__' + str(s_ind)
for irun in run_range:
if not hasattr(outlist[irun], param):
continue
else:
if debug > 0:
print ('parameter = ', param)
param_matrix = copy.deepcopy(getattr(outlist[irun], param))
if debug > 1:
print('param', param, 'irun', irun, 's_ind', s_ind)
if debug > 1:
print('shape param_matrix', shape(param_matrix))
if debug > 1:
print('length', len(param_matrix), len(outlist[irun].z))
if len(param_matrix) == len(outlist[irun].z):
s_value.append(param_matrix)
else:
if s_ind == 'max':
s_value.append(np.amax(param_matrix, axis=0))
elif s_ind == 'max_cur':
s_value.append(param_matrix[outlist[irun].sn_Imax, :])
elif s_ind == 'mean':
s_value.append(np.mean(param_matrix, axis=0))
else:
si = np.where(outlist[irun].s <= s_ind)[-1][-1]
s_value.append(param_matrix[si, :])
if s_value != []:
fig = plt.figure(s_fig_name)
fig.clf()
fig.set_size_inches(figsize, forward=True)
if debug > 1:
print('plotting array shapes', shape(outlist[irun].z), shape(swapaxes(s_value, 0, 1)))
fig = plt.plot(outlist[irun].z, swapaxes(s_value, 0, 1), '0.8', linewidth=1)
fig = plt.plot(outlist[irun].z, s_value[0], '0.5', linewidth=1)
fig = plt.plot(outlist[irun].z, mean(s_value, 0), 'k', linewidth=2)
plt.xlim([min(outlist[irun].z), max(outlist[irun].z)])
# fig[0].axes.get_yaxis().get_major_formatter().set_scientific(True)
# plt.ticklabel_format(style='sci')
plt.xlabel('z [m]')
plt.ylabel(dict_name.get(param, param) + ' ' + dict_unit.get(param, ''))
if savefig != False:
if debug > 1:
print(' saving ' + s_fig_name + '.' + savefig)
plt.draw()
plt.savefig(saving_path + s_fig_name + '.' + savefig, format=savefig)
if saveval != False:
if debug > 1:
print(' saving ' + s_fig_name + '.txt')
np.savetxt(saving_path + s_fig_name + '.txt', vstack([outlist[irun].z, mean(s_value, 0), s_value]).T, fmt="%E", newline='\n', comments='')
if not showfig:
plt.close('all')
# if z_param_inp==[]:
# z_param_range=param_range
# else:
z_param_range = z_param_inp
if debug > 0:
print(' processing Z parameters ' + str(z_param_range))
if debug > 1:
print(' z_inp ' + str(z_inp))
if 'wigner' in z_param_range:
if debug > 0:
print(' processing Wigner')
for z_ind in z_inp:
w = np.zeros((outlist[irun].nSlices,outlist[irun].nSlices))
for irun in run_range:
out=outlist[irun]
W=wigner_out(out,z=z_ind,debug=0)
w += W.wig
W.wig= w / len(outlist)
W.filePath = proj_dir + 'results' + os.path.sep + 'stage_' + str(stage) + '__WIG__' + str(z_ind) + '__m'
wig_fig_name = 'stage_' + str(stage) + '__WIG__' + str(z_ind) + '__m'
plot_wigner(W, z=z_ind, p_units='um', s_units='eV', fig_name=wig_fig_name, savefig=savefig, debug=0)
for param in z_param_range:
for z_ind in z_inp:
z_value = []
z_fig_name = 'stage_' + str(stage) + '__S__' + dict_name.get(param, param).replace(' ', '_').replace('.', '_') + '__' + str(z_ind) + '__m'
for irun in run_range:
if not hasattr(outlist[irun], param):
break
else:
if debug > 0:
print ('parameter = ', param)
param_matrix = copy.deepcopy(getattr(outlist[irun], param))
if debug > 1:
print('param', param, 'irun', irun, 'z_ind', z_ind)
if debug > 1:
print('shape param_matrix', shape(param_matrix))
if debug > 1:
print('length', len(param_matrix), len(outlist[irun].z))
if len(param_matrix) == len(outlist[irun].z): # case if the array is 1D (no s/z matrix presented)
break
else:
if z_ind == 'end' or z_ind == inf:
z_value.append(param_matrix[:, -1]) # after undulator
elif z_ind == 'start':
z_value.append(param_matrix[:, 0]) # before undulator
else:
zi = np.where(outlist[irun].z <= z_ind)[-1][-1]
z_value.append(param_matrix[:, zi])
if z_value != []:
fig = plt.figure(z_fig_name)
fig.clf()
fig.set_size_inches(figsize, forward=True)
if param == 'spec':
freq_scale = outlist[irun].freq_lamd # *1e9
if debug > 1:
print('plotting array shapes freq', shape(freq_scale), shape(swapaxes(z_value, 0, 1)))
fig = plt.plot(freq_scale, swapaxes(z_value, 0, 1), '0.8')
fig = plt.plot(freq_scale, z_value[0], '0.5', linewidth=1)
fig = plt.plot(freq_scale, mean(z_value, 0), 'k', linewidth=2)
plt.xlim([min(freq_scale), max(freq_scale)])
plt.xlabel('$\lambda$ [nm]')
else:
s_scale = outlist[irun].s * 1e6
if debug > 1:
print('plotting array shapes', shape(s_scale), shape(swapaxes(z_value, 0, 1)))
fig = plt.plot(s_scale, swapaxes(z_value, 0, 1), '0.8')
fig = plt.plot(s_scale, z_value[0], '0.5', linewidth=1)
fig = plt.plot(s_scale, mean(z_value, 0), 'k', linewidth=2)
plt.xlim([min(s_scale), max(s_scale)])
plt.xlabel('s [um]')
plt.ylabel(dict_name.get(param, param) + ' ' + dict_unit.get(param, ''))
if savefig != False:
if debug > 1:
print(' saving ' + z_fig_name + '.' + savefig)
plt.draw()
plt.savefig(saving_path + z_fig_name + '.' + savefig, format=savefig)
if saveval != False:
if debug > 1:
print(' saving ' + z_fig_name + '.txt')
if param == 'spec':
np.savetxt(saving_path + z_fig_name + '.txt', vstack([outlist[irun].freq_lamd * 1e9, mean(z_value, 0), z_value]).T, fmt="%E", newline='\n', comments='')
else:
np.savetxt(saving_path + z_fig_name + '.txt', vstack([outlist[irun].s * 1e6, mean(z_value, 0), z_value]).T, fmt="%E", newline='\n', comments='')
if not showfig:
plt.close('all')
# if run_param_inp==[]:
# run_param_range=[]
# else:
run_param_range = run_param_inp
if debug > 0:
print(' processing run parameters ' + str(run_param_range))
if debug > 1:
print(' run_s_inp ' + str(run_s_inp))
if debug > 1:
print(' run_z_inp ' + str(run_z_inp))
for param in run_param_range:
for z_ind in run_z_inp:
for s_ind in run_s_inp: # not optimal
run_value = []
run_value_arr = []
run_fig_name = 'stage_' + str(stage) + '__RUN__' + dict_name.get(param, param).replace(' ', '_').replace('.', '_') + '__' + str(s_ind) + '__um__' + str(z_ind) + '__m'
for irun in run_range:
if not hasattr(outlist[irun], param):
break
else:
if debug > 0:
print ('parameter = ', param)
param_matrix = copy.deepcopy(getattr(outlist[irun], param))
if debug > 1:
print('param', param, 'irun', irun, 'z_ind', z_ind, 's_ind', s_ind)
if debug > 1:
print('shape param_matrix', shape(param_matrix))
if debug > 1:
print('length', len(param_matrix), len(outlist[irun].z))
if len(param_matrix) != len(outlist[irun].z): # case if the array is 1D (no s/z matrix presented)
if z_ind == 'end' or z_ind == inf:
run_value = param_matrix[:, -1] # after undulator
elif z_ind == 'start':
run_value = param_matrix[:, 0] # before undulator
else:
zi = np.where(outlist[irun].z <= z_ind)[-1][-1]
run_value = param_matrix[:, zi]
else:
run_value = param_matrix
if s_ind == 'max':
run_value = np.amax(run_value)
elif s_ind == 'max_cur':
run_value = run_value[outlist[irun].sn_Imax]
elif s_ind == 'mean':
run_value = np.amean(run_value)
else:
si = np.where(outlist[irun].s <= s_ind)[-1][-1]
run_value = run_value[si]
if debug > 1:
print('run_value ', run_value)
run_value_arr.append(run_value)
if run_value_arr != []:
fig = plt.figure(run_fig_name)
fig.clf()
fig.set_size_inches(figsize, forward=True)
fig = plt.plot(run_range, run_value_arr, 'k')
plt.xlim([min(run_range), max(run_range)])
plt.xlabel('run')
plt.ylabel(dict_name.get(param, param) + ' ' + dict_unit.get(param, '') + ' (' + str(s_ind) + ' um, ' + str(z_ind) + ' m)')
if savefig != False:
if debug > 1:
print(' saving ' + run_fig_name + '.' + savefig)
plt.draw()
plt.savefig(saving_path + run_fig_name + '.' + savefig, format=savefig)
if saveval != False:
if debug > 1:
print(' saving ' + run_fig_name + '.txt')
np.savetxt(saving_path + run_fig_name + '.txt', vstack([run_range, run_value_arr]).T, fmt="%E", newline='\n', comments='')
if not showfig:
plt.close('all')
if dfl_param_inp != []:
if debug > 0:
print(' processing DFL parameters ' + str(dfl_param_inp))
for param in dfl_param_inp:
dfl_value = []
dfl_fig_name = 'stage_' + str(stage) + '__DFL__' + param.replace(' ', '_').replace('.', '_') + '__end'
for irun in run_range:
dfl_filePath = proj_dir + 'run_' + str(irun) + '/run.' + str(irun) + '.s' + str(stage) + '.gout.dfl'
dfl = read_dfl_file_out(outlist[irun], debug=debug)
# dfl=read_dfl_file(dfl_filePath, Nxy=outlist[irun]('ncar'),debug=debug)
# read_dfl_file(filePath, Nxy=None, Lxy=None, Lz=None, zsep=None, xlamds=None, vartype=complex,debug=1):
dfl = dfl.fld
if dfl.shape[0] != 1:
ncar_z = dfl.shape[0]
leng_z = outlist[irun]('xlamds') * outlist[irun]('zsep') * ncar_z
if param == 'dfl_spec':
spec = np.fft.ifftshift(np.fft.fft(dfl, axis=0), 0) / sqrt(ncar_z)
spec = abs(spec)**2
spec = sum(spec, (1, 2))
dfl_value.append(spec)
dk = 2 * pi / leng_z
k = 2 * pi / outlist[irun]('xlamds')
freq_scale = 2 * pi / np.linspace(k - dk / 2 * ncar_z, k + dk / 2 * ncar_z, ncar_z) * 1e9
if debug > 1:
print(' spectrum calculated')
if dfl_value != []:
fig = plt.figure(dfl_fig_name)
fig.clf()
fig.set_size_inches(figsize, forward=True)
if param == 'dfl_spec':
fig = plt.plot(freq_scale, swapaxes(dfl_value, 0, 1), '0.8')
fig = plt.plot(freq_scale, dfl_value[0], '0.5', linewidth=1)
fig = plt.plot(freq_scale, mean(dfl_value, 0), 'k', linewidth=2)
plt.xlabel('$\lambda$ [nm]')
plt.ylabel(dict_name.get(param, param) + ' ' + dict_unit.get(param, ''))
if savefig != False:
if debug > 1:
print(' saving ' + dfl_fig_name + '.' + savefig)
plt.draw()
plt.savefig(saving_path + dfl_fig_name + '.' + savefig, format=savefig)
if saveval != False:
if debug > 1:
print(' saving ' + dfl_fig_name + '.txt')
if param == 'dfl_spec':
np.savetxt(saving_path + dfl_fig_name + '.txt', vstack([freq_scale * 1e9, mean(dfl_value, 0), dfl_value]).T, fmt="%E", newline='\n', comments='')
if not showfig:
plt.close('all')
plt.draw()
if showfig:
plt.show()
else:
plt.close('all')
if debug > 0:
print(' done in %.2f seconds' % (time.time() - start_time))
def plot_gen_corr(proj_dir, run_inp=[], p1=(), p2=(), savefig=False, showfig=False, saveval=False):
# param (parameter[str], stage[int], z_position[double], s_position [double or 'max'/'mean' stings])
# e.g. ('p_int',1,inf,'max') , ('spec',1,inf,'max')
figsize = (7, 7)
if proj_dir[-1] != '/':
proj_dir += '/'
param_1, stage_1, z_1, s_1 = p1
param_2, stage_2, z_2, s_2 = p2
outlist_1 = [GenesisOutput() for i in range(1000)]
outlist_2 = [GenesisOutput() for i in range(1000)]
if run_inp == []:
run_range = range(1000)
else:
run_range = run_inp
run_range_good_1 = []
run_range_good_2 = []
if param_1 not in []:
for irun in run_range:
out_file_1 = proj_dir + 'run_' + str(irun) + '/run.' + str(irun) + '.s' + str(stage_1) + '.gout'
if os.path.isfile(out_file_1):
outlist_1[irun] = read_out_file(out_file_1, read_level=2)
run_range_good_1.append(irun)
if param_2 not in []:
for irun in run_range:
out_file_2 = proj_dir + 'run_' + str(irun) + '/run.' + str(irun) + '.s' + str(stage_2) + '.gout'
if os.path.isfile(out_file_2):
outlist_2[irun] = read_out_file(out_file_2, read_level=2)
run_range_good_2.append(irun)
run_range_good = [val for val in run_range_good_1 if val in run_range_good_2]
if param_1 not in []:
irun = run_range_good[0]
if isinstance(s_1, (int, long, float)):
index_s1 = np.where(outlist_1[irun].s <= s_1)[-1][-1]
if isinstance(z_1, (int, long, float)):
index_z1 = np.where(outlist_1[irun].z <= z_1)[-1][-1]
if param_2 not in []:
if isinstance(s_2, (int, long, float)):
index_s2 = np.where(outlist_2[irun].s <= s_2)[-1][-1]
if isinstance(z_2, (int, long, float)):
index_z2 = np.where(outlist_2[irun].z <= z_2)[-1][-1]
matrix_1 = []
matrix_2 = []
for i in run_range_good:
matrix_1.append(getattr(outlist_1[i], param_1))
matrix_2.append(getattr(outlist_2[i], param_2))
matrix_1 = np.array(matrix_1)
matrix_2 = np.array(matrix_2)
if ndim(matrix_1) == 2:
var_1 = matrix_1[:, index_z1]
else:
if s_1 == 'mean':
var_1 = mean(matrix_1[:, :, index_z1], axis=1)
elif s_1 == 'max':
var_1 = np.amax(matrix_1[:, :, index_z1], axis=1)
else:
var_1 = matrix_1[:, index_s1, index_z1]
if ndim(matrix_2) == 2:
var_2 = matrix_2[:, index_z2]
else:
if s_2 == 'mean':
var_2 = mean(matrix_2[:, :, index_z2], axis=1)
elif s_2 == 'max':
var_2 = np.amax(matrix_2[:, :, index_z2], axis=1)
else:
var_2 = matrix_2[:, index_s2, index_z2]
corr_fig_name = 'corr_' + param_1 + '_s' + str(stage_1) + '_at' + str(z_1) + '_' + str(s_1) + '__' + param_2 + '_s' + str(stage_2) + '_at' + str(z_2) + '_' + str(s_2)
fig = plt.figure(corr_fig_name)
fig.clf()
fig.set_size_inches(figsize, forward=True)
fig = plt.scatter(var_1, var_2)
label1 = param_1 + '_s' + str(stage_1) + '_z=' + str(z_1) + '_s=' + str(s_1)
label2 = param_2 + '_s' + str(stage_2) + '_z=' + str(z_2) + '_s=' + str(s_2)
label1 = label1.replace('_', ' ')
label2 = label2.replace('_', ' ')
plt.xlabel(label1)
plt.ylabel(label2)
plt.xlim(np.amin(var_1), np.amax(var_1))
plt.ylim(np.amin(var_2), np.amax(var_2))
plt.xlim(0, np.amax(var_1) * 1.05)
plt.ylim(0, np.amax(var_2) * 1.05)
saving_path = proj_dir + 'results/'
plt.draw()
if savefig != False:
print(' saving ' + corr_fig_name + '.' + savefig)
plt.savefig(saving_path + corr_fig_name + '.' + savefig, format=savefig)
if saveval != False:
print(' saving ' + corr_fig_name + '.txt')
np.savetxt(saving_path + corr_fig_name + '.txt', vstack([var_1, var_2]).T, fmt="%E", newline='\n', comments=param_1 + '_s' + str(stage_1) + '_at' + str(z_1) + '_' + str(s_1) + ' ' + param_2 + '_s' + str(stage_2) + '_at' + str(z_2) + '_' + str(s_2))
if showfig:
plt.show()
else:
plt.close('all')
return fig
# np.where(out.s>1.8e-6)[0][0]
def plot_dpa_bucket_out(out, dpa, slice_pos=None, repeat=1, GeV=1, figsize=4, legend=True, fig_name=None, savefig=False, showfig=False, debug=1):
if out.nSlices > 1:
if slice_pos < np.amin(out.s) or slice_pos > np.amax(out.s):
raise ValueError('slice_pos outside out.s range')
else:
slice_num = np.where(out.s > slice_pos)[0][0]
return plot_dpa_bucket(dpa=dpa, slice_num=slice_num, repeat=repeat, GeV=GeV, figsize=figsize, legend=legend, fig_name=fig_name, savefig=savefig, showfig=showfig, debug=debug)
else:
slice_num = 0
return plot_dpa_bucket(dpa=dpa, slice_num=slice_num, repeat=repeat, GeV=GeV, figsize=figsize, legend=legend, fig_name=fig_name, savefig=savefig, showfig=showfig, debug=debug)
def plot_dpa_bucket(dpa, slice_num=None, repeat=1, GeV=1, figsize=4, legend=True, fig_name=None, savefig=False, showfig=False, debug=1):
part_colors = ['darkred', 'orange', 'g', 'b', 'm']
if showfig == False and savefig == False:
return
if debug > 0:
print(' plotting bucket')
start_time = time.time()
if shape(dpa.ph)[0] == 1:
slice_num = 0
else:
assert (slice_num <= shape(dpa.ph)[0]), 'slice_num larger than the dpa shape'
if fig_name == None:
fig_name = 'Electron phase space ' + dpa.fileName()
fig = plt.figure(fig_name)
fig.clf()
fig.set_size_inches((5 * figsize, 3 * figsize), forward=True)
ax_z_hist = plt.subplot2grid((4, 1), (0, 0), rowspan=1)
ax_main = plt.subplot2grid((4, 1), (1, 0), rowspan=3, sharex=ax_z_hist)
nbins = shape(dpa.ph)[1]
phase = dpa.ph[slice_num, :, :]
energy = dpa.e[slice_num, :, :]
if GeV:
energy *= m_e_MeV
energy_mean = round(np.mean(energy), 1)
print(energy_mean)
energy -= energy_mean
phase_hist = np.array([])
for irep in range(repeat):
phase_hist = np.concatenate((phase_hist, np.ravel(phase) + 2 * np.pi * (irep - 1)))
hist, edges = np.histogram(phase_hist, bins=30 * repeat) # calculate current histogram
edges = edges[0:-1] # remove the last bin edge to save equal number of points
ax_z_hist.bar(edges, hist, width=edges[1] - edges[0])
ax_z_hist.set_ylabel('counts')
for label in ax_z_hist.get_xticklabels():
label.set_visible(False)
ax_z_hist.set_xlim([edges[0], edges[-1]])
for irep in range(repeat):
for ibin in range(nbins):
ax_main.scatter(phase[ibin, :] + 2 * np.pi * (irep - 1), energy[ibin, :], color=part_colors[ibin], marker='.')
ax_main.set_xlabel('$\phi$ [rad]')
if GeV:
ax_main.set_ylabel('E [MeV] + ' + str(energy_mean / 1000) + ' [GeV]')
else:
ax_main.set_ylabel('$\gamma$ + ' + str(energy_mean))
plt.draw()
if savefig != False:
if savefig == True:
savefig = 'png'
if debug > 1:
print(' saving ' + dpa.fileName() + '.' + savefig)
plt.savefig(dpa.filePath + '.' + savefig, format=savefig)
if showfig:
plt.show()
else:
plt.close('all')
def plot_edist(edist, figsize=4, fig_name=None, savefig=False, showfig=False, scatter=False, plot_x_y=True, plot_xy_s=True, bins=(50, 50, 50, 50), flip_t=True, beam_E_plot='eV', cmin=0, debug=1):
if showfig == False and savefig == False:
return
if debug > 0:
print(' plotting edist file')
start_time = time.time()
# suffix=''
if size(bins) == 1:
bins = (bins, bins, bins, bins) # x,y,t,e
if fig_name == None:
fig_name = 'Electron distribution ' + edist.fileName()
fig = plt.figure(fig_name)
fig.clf()
fig.set_size_inches(((3 + plot_x_y + plot_xy_s) * figsize, 3 * figsize), forward=True)
if flip_t:
s = -edist.t * speed_of_light * 1e6
else:
s = edist.t * speed_of_light * 1e6
hist, edges = np.histogram(s, bins=bins[2]) # calculate current histogram
edges = edges[0:-1] # remove the last bin edge to save equal number of points
hist_int = np.trapz(hist, edges) / speed_of_light / 1e6 # normalize
hist = np.rint(hist.astype(float) / (hist_int / float(edist.charge())))
ax_curr = fig.add_subplot(2, 1 + plot_x_y + plot_xy_s, 1)
#ax_curr.hist(s, bins,color='b')
ax_curr.plot(edges, hist, color='b')
ax_curr.set_xlabel('s [$\mu$m]')
ax_curr.set_ylabel('I [A]')
ax_se = fig.add_subplot(2, 1 + plot_x_y + plot_xy_s, 2 + plot_x_y + plot_xy_s, sharex=ax_curr)
if beam_E_plot == 'eV':
energy = edist.g * m_e_MeV
energy_av = int(mean(energy))
if scatter:
ax_se.scatter(s, energy - energy_av, marker='.')
else:
ax_se.hist2d(s, energy - energy_av, [bins[2], bins[3]], cmin=cmin)
ax_se.set_xlabel('s [$\mu$m]')
ax_se.set_ylabel('E + ' + str(energy_av) + ' [MeV]')
else: # elif beam_E_plot=='gamma':
if scatter:
ax_se.scatter(s, edist.g, marker='.')
else:
ax_se.hist2d(s, edist.g, [bins[2], bins[3]], cmin=cmin)
ax_se.set_xlabel('s [$\mu$m]')
ax_se.set_ylabel('$\gamma$')
if plot_xy_s:
ax_xs = fig.add_subplot(2, 1 + plot_x_y + plot_xy_s, 4 + plot_x_y, sharex=ax_curr)
if scatter:
ax_xs.scatter(s, 1e6 * edist.x, marker='.')
else:
ax_xs.hist2d(s, 1e6 * edist.x, [bins[2], bins[0]], cmin=cmin)
ax_xs.set_xlabel('s [$\mu$m]')
ax_xs.set_ylabel('x [$\mu$m]')
ax_ys = fig.add_subplot(2, 1 + plot_x_y + plot_xy_s, 2, sharex=ax_curr)
if scatter:
ax_ys.scatter(s, 1e6 * edist.y, marker='.')
else:
ax_ys.hist2d(s, 1e6 * edist.y, [bins[2], bins[1]], cmin=cmin)
ax_ys.set_xlabel('s [$\mu$m]')
ax_ys.set_ylabel('y [$\mu$m]')
if plot_x_y:
ax_xy = fig.add_subplot(2, 1 + plot_x_y + plot_xy_s, 2 + plot_xy_s)
if scatter:
ax_xy.scatter(edist.x * 1e6, edist.y * 1e6, marker='.')
else:
ax_xy.hist2d(edist.x * 1e6, edist.y * 1e6, [bins[0], bins[1]], cmin=cmin)
ax_xy.set_xlabel('x [$\mu$m]')
ax_xy.set_ylabel('y [$\mu$m]')
ax_pxpy = fig.add_subplot(2, 1 + plot_x_y + plot_xy_s, 4 + 2 * plot_xy_s)
if scatter:
ax_pxpy.scatter(edist.xp * 1e6, edist.yp * 1e6, marker='.')
else:
ax_pxpy.hist2d(edist.xp * 1e6, edist.yp * 1e6, [bins[0], bins[1]], cmin=cmin)
ax_pxpy.set_xlabel('px [$\mu$rad]')
ax_pxpy.set_ylabel('py [$\mu$rad]')
if scatter:
ax_curr.set_xlim([np.amin(s), np.amax(s)])
ax_curr.set_ylim(ymin=0)
fig.subplots_adjust(wspace=0.4, hspace=0.4)
plt.draw()
if savefig != False:
if savefig == True:
savefig = 'png'
if debug > 1:
print(' saving ' + edist.fileName() + '.' + savefig)
plt.savefig(edist.filePath + '.' + savefig, format=savefig)
if showfig:
plt.show()
else:
plt.close('all')
if debug > 0:
print((' done in %.2f seconds' % (time.time() - start_time)))
def plot_beam(beam, figsize=3, showfig=False, savefig=False, fig=None, plot_xy=None, debug=0):
if showfig == False and savefig == False:
return
fontsize = 15
if plot_xy == None:
if mean(beam.x) == 0 and mean(beam.y) == 0 and mean(beam.px) == 0 and mean(beam.py) == 0:
plot_xy = 0
else:
plot_xy = 1
if fig == None:
fig = plt.figure()
fig.clf()
fig.set_size_inches((4 * figsize, (3 + plot_xy) * figsize), forward=True)
ax = fig.add_subplot(2 + plot_xy, 2, 1)
plt.grid(True)
ax.set_xlabel(r'$\mu m$')
p1, = plt.plot(1.e6 * np.array(beam.z), beam.I, 'r', lw=3)
plt.plot(1.e6 * beam.z[beam.idx_max], beam.I[beam.idx_max], 'bs')
ax = ax.twinx()
p2, = plt.plot(1.e6 * np.array(beam.z), 1.e-3 * np.array(beam.eloss), 'g', lw=3)
ax.legend([p1, p2], [r'$I [A]$', r'Wake $[KV/m]$'], fontsize=fontsize, loc='best')
# ax.set_xlim([np.amin(beam.z),np.amax(beam.x)])
ax = fig.add_subplot(2 + plot_xy, 2, 2, sharex=ax)
plt.grid(True)
ax.set_xlabel(r'$\mu m$')
#p1,= plt.plot(1.e6 * np.array(beam.z),1.e-3 * np.array(beam.eloss),'r',lw=3)
p1, = plt.plot(1.e6 * np.array(beam.z), beam.g0, 'r', lw=3)
plt.plot(1.e6 * beam.z[beam.idx_max], beam.g0[beam.idx_max], 'bs')
ax = ax.twinx()
p2, = plt.plot(1.e6 * np.array(beam.z), beam.dg, 'g', lw=3)
plt.plot(1.e6 * beam.z[beam.idx_max], beam.dg[beam.idx_max], 'bs')
ax.legend([p1, p2], [r'$\gamma$', r'$\delta \gamma$'], loc='best')
ax = fig.add_subplot(2 + plot_xy, 2, 3, sharex=ax)
plt.grid(True)
ax.set_xlabel(r'$\mu m$')
p1, = plt.plot(1.e6 * np.array(beam.z), beam.ex * 1e6, 'r', lw=3)
p2, = plt.plot(1.e6 * np.array(beam.z), beam.ey * 1e6, 'g', lw=3)
plt.plot(1.e6 * beam.z[beam.idx_max], beam.ex[beam.idx_max] * 1e6, 'bs')
ax.legend([p1, p2], [r'$\varepsilon_x [\mu m]$', r'$\varepsilon_y [\mu m]$'], fontsize=fontsize, loc='best')
# ax3.legend([p3,p4],[r'$\varepsilon_x$',r'$\varepsilon_y$'])
ax = fig.add_subplot(2 + plot_xy, 2, 4, sharex=ax)
plt.grid(True)
ax.set_xlabel(r'$\mu m$')
p1, = plt.plot(1.e6 * np.array(beam.z), beam.betax, 'r', lw=3)
p2, = plt.plot(1.e6 * np.array(beam.z), beam.betay, 'g', lw=3)
plt.plot(1.e6 * beam.z[beam.idx_max], beam.betax[beam.idx_max], 'bs')
ax.legend([p1, p2], [r'$\beta_x [m]$', r'$\beta_y [m]$'], fontsize=fontsize, loc='best')
if plot_xy:
ax = fig.add_subplot(3, 2, 5, sharex=ax)
plt.grid(True)
ax.set_xlabel(r'$\mu m$')
p1, = plt.plot(1.e6 * np.array(beam.z), 1.e6 * np.array(beam.x), 'r', lw=3)
p2, = plt.plot(1.e6 * np.array(beam.z), 1.e6 * np.array(beam.y), 'g', lw=3)
ax.legend([p1, p2], [r'$x [\mu m]$', r'$y [\mu m]$'], fontsize=fontsize, loc='best')
beam_beta = sqrt(1 - (1 / beam.g0**2))
beam_p = beam.g0 * beam_beta
# p=beam.g0*m_e_eV/speed_of_light
pz = sqrt(beam_p**2 - beam.px**2 - beam.py**2)
xp = beam.px / pz
yp = beam.py / pz
ax = fig.add_subplot(3, 2, 6, sharex=ax)
plt.grid(True)
ax.set_xlabel(r'$\mu m$')
p1, = plt.plot(1.e6 * np.array(beam.z), 1.e6 * np.array(xp), 'r', lw=3)
p2, = plt.plot(1.e6 * np.array(beam.z), 1.e6 * np.array(yp), 'g', lw=3)
ax.legend([p1, p2], [r'$x_p [\mu rad]$', r'$y_p [\mu rad]$'], fontsize=fontsize, loc='best')
ax.set_xlim([1.e6 * np.amin(beam.z), 1e6 * np.amax(beam.z)])
fig.subplots_adjust(hspace=0.2, wspace=0.3)
plt.draw()
if savefig != False:
if savefig == True:
savefig = 'png'
if debug > 1:
print(' saving ' + beam.fileName() + '.' + savefig)
plt.savefig(beam.filePath + '.' + savefig, format=savefig)
if showfig:
plt.show()
else:
plt.close('all')
def plot_wigner(wig_or_out, z=np.inf, p_units='um', s_units='nm', x_lim=(None,None), y_lim=(None,None), downsample=1, cmap='seismic', abs_value=0, fig_name=None, savefig=False, showfig=False, debug=1):
'''
plots wigner distribution (WD) with marginals
wig_or_out - may be WignerDistribution() or GenesisOutput() object
z - (if isinstance(wig_or_out, GenesisOutput)) location at which WD will be calculated
p_units - (um or fs) - units to display power scale
s_units - (nm or eV) - units to display spectrum scale
x_lim, y_lim - scaling limits in given units, (min,max) or [min,max], e.g: (None,6)
abs_value - if True, absolute value of WD is displayed (usually, it has both positive and negative values)
cmap - colormar if abs_value==False (http://matplotlib.org/users/colormaps.html)
'''
if showfig == False and savefig == False:
return
if debug > 0:
print(' plotting Wigner distribution')
if isinstance(wig_or_out, GenesisOutput):
W=wigner_out(wig_or_out,z)
elif isinstance(wig_or_out, WignerDistribution):
W=wig_or_out
else:
raise ValueError('Unknown object for Wigner plot')
if fig_name is None:
if W.fileName() is '':
fig_text = 'Wigner distribution'
else:
fig_text = 'Wigner distribution ' + W.fileName()
else:
fig_text = fig_name
if W.z!=None:
fig_text += ' ' + str(W.z) + 'm'
fig = plt.figure(fig_text)
plt.clf()
fig.set_size_inches((18, 13), forward=True)
power=W.power()
spec=W.spectrum()
wigner=W.wig
wigner_lim=np.amax(abs(W.wig))
if p_units=='fs':
power_scale=W.s/speed_of_light*1e15
p_label_txt='time [fs]'
else:
power_scale=W.s*1e6
p_label_txt='s [$\mu$m]'
if s_units=='eV':
spec_scale=speed_of_light*h_eV_s*1e9/W.freq_lamd
f_label_txt='ph.energy [eV]'
else:
spec_scale=W.freq_lamd
f_label_txt='wavelength [nm]'
# definitions for the axes
left, width = 0.18, 0.57
bottom, height = 0.14, 0.55
left_h = left + width + 0.02 - 0.02
bottom_h = bottom + height + 0.02 - 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.15, height]
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx, sharex=axScatter)
axHisty = plt.axes(rect_histy, sharey=axScatter)
if abs_value:
axScatter.pcolormesh(power_scale, spec_scale, abs(wigner)) #change
axScatter.text(0.02, 0.98, r'$W_{max}$= %.2e' % (np.amax(wigner)), horizontalalignment='left', verticalalignment='top', transform=axScatter.transAxes, color='w')
else:
# cmap='RdBu_r'
# axScatter.imshow(wigner, cmap=cmap, vmax=wigner_lim, vmin=-wigner_lim)
axScatter.pcolormesh(power_scale[::downsample], spec_scale[::downsample], wigner[::downsample,::downsample], cmap=cmap, vmax=wigner_lim, vmin=-wigner_lim)
axScatter.text(0.02, 0.98, r'$W_{max}$= %.2e' % (np.amax(wigner)), horizontalalignment='left', verticalalignment='top', transform=axScatter.transAxes)#fontsize=12,
axHistx.plot(power_scale,power)
axHistx.text(0.02, 0.95, r'E= %.2e J' % (W.energy()), horizontalalignment='left', verticalalignment='top', transform=axHistx.transAxes)#fontsize=12,
axHistx.set_ylabel('power [W]')
axHisty.plot(spec,spec_scale)
axHisty.set_xlabel('spectrum [a.u.]')
axScatter.axis('tight')
axScatter.set_xlabel(p_label_txt)
axScatter.set_ylabel(f_label_txt)
axHistx.set_ylim(ymin=0)
axHisty.set_xlim(xmin=0)
for tl in axHistx.get_xticklabels():
tl.set_visible(False)
for tl in axHisty.get_yticklabels():
tl.set_visible(False)
axHistx.yaxis.major.locator.set_params(nbins=4)
axHisty.xaxis.major.locator.set_params(nbins=2)
axScatter.set_xlim(x_lim[0], x_lim[1])
axScatter.set_ylim(y_lim[0], y_lim[1])
if savefig != False:
if savefig == True:
savefig = 'png'
if W.z is None:
fig.savefig(W.filePath + '_wig.' + str(savefig), format=savefig)
else:
fig.savefig(W.filePath + '_wig_' + str(W.z) + 'm.' + str(savefig), format=savefig)
plt.draw()
if showfig == True:
dir_lst = W.filePath.split(os.path.sep)
dir = os.path.sep.join(dir_lst[0:-1]) + os.path.sep
rcParams["savefig.directory"] = dir
plt.show()
else:
plt.close('all')
'''
tmp for HXRSS
'''
def read_plot_dump_proj(exp_dir, stage, run_ids, plot_phase=1, showfig=0, savefig=0, debug=1):
if showfig == 0 and savefig == 0:
return None
t_l_int_arr = []
t_l_pha_arr = []
f_l_int_arr = []
for run_id in run_ids:
array = np.loadtxt(exp_dir + 'run_' + str(run_id) + '/run.' + str(run_id) + '.s' + str(stage) + '.dfl.t.txt', skiprows=1)
array = np.rollaxis(array, 1)
t_l_scale, t_l_int_a, t_l_pha_a = array[0], array[1], array[2]
array = np.loadtxt(exp_dir + 'run_' + str(run_id) + '/run.' + str(run_id) + '.s' + str(stage) + '.dfl.f.txt', skiprows=1)
array = np.rollaxis(array, 1)
f_l_scale, f_l_int_a, f_l_ftlt_abs, f_l_ftlt_ang = array[0], array[1], array[2], array[3]
t_l_int_arr.append(t_l_int_a)
t_l_pha_arr.append(t_l_pha_a)
f_l_int_arr.append(f_l_int_a)
t_l_scale *= 1e6
t_l_int_arr = np.array(t_l_int_arr)
t_l_pha_arr = np.array(t_l_pha_arr)
f_l_int_arr = np.array(f_l_int_arr)
t_l_int_arr = np.rollaxis(t_l_int_arr, 1)
t_l_pha_arr = np.rollaxis(t_l_pha_arr, 1)
f_l_int_arr = np.rollaxis(f_l_int_arr, 1)
t_l_pha_arr = np.unwrap(t_l_pha_arr, axis=0)
if len(run_ids) > 1:
t_l_int_mean = np.mean(t_l_int_arr, axis=1)
t_l_pha_mean = np.mean(t_l_pha_arr, axis=1)
f_l_int_mean = np.mean(f_l_int_arr, axis=1)
else:
t_l_int_mean = t_l_int_arr[:, 0]
t_l_pha_mean = t_l_pha_arr[:, 0]
f_l_int_mean = f_l_int_arr[:, 0]
# t_domain,t_norm=plt.figure('t_domain_filtered')
fig_name = 'stage_' + str(stage) + '__FILT__power'
t_domain = plt.figure(fig_name,figsize=(15,7))
ax1 = t_domain.add_subplot(2 + plot_phase, 1, 1)
pulse_average_pos = np.sum(t_l_scale * t_l_int_mean) / np.sum(t_l_int_mean)
ax1.plot(t_l_scale - pulse_average_pos, t_l_int_arr, '0.5')
ax1.plot(t_l_scale - pulse_average_pos, t_l_int_mean, 'k', linewidth=1.5)
ax1.plot([0, 0], [0, np.max(t_l_int_arr)], 'r')
ax1.grid(True)
plt.ylabel(r'$P$ [W]')
ax2 = t_domain.add_subplot(2 + plot_phase, 1, 2, sharex=ax1)
ax2.semilogy(t_l_scale - pulse_average_pos, t_l_int_arr, '0.5')
ax2.semilogy(t_l_scale - pulse_average_pos, t_l_int_mean, 'k', linewidth=1.5)
ax2.plot([0, 0], [np.min(t_l_int_arr), np.max(t_l_int_arr)], 'r')
ax2.grid(True)
plt.ylabel(r'$P$ [W]')
if plot_phase:
ax3 = t_domain.add_subplot(2 + plot_phase, 1, 3, sharex=ax1)
ax3.plot(t_l_scale - pulse_average_pos, t_l_pha_arr, '0.5')
ax3.plot(t_l_scale - pulse_average_pos, t_l_pha_mean, 'k', linewidth=1.5)
plt.ylabel(r'$\phi [rad]$')
plt.xlabel(r'$S [\mu m]$')
plt.draw()
if savefig != False:
if savefig == True:
savefig = 'png'
if debug > 1:
print(' saving ' + fig_name + '.' + savefig)
plt.savefig(exp_dir + 'results/' + fig_name + '.' + savefig, format=savefig)
if showfig:
plt.show()
else:
plt.close('all')
fig_name = 'stage_' + str(stage) + '__FILT__spectrum'
f_domain = plt.figure(fig_name,figsize=(15,7))
ax1 = f_domain.add_subplot(2, 1, 1)
ax1.plot(h_eV_s * speed_of_light / f_l_scale, f_l_int_arr, '0.5')
ax1.plot(h_eV_s * speed_of_light / f_l_scale, f_l_int_mean, 'k', linewidth=1.5)
ax1.grid(True)
plt.ylabel(r'$P(\lambda)$ [a.u.]')
ax2 = f_domain.add_subplot(2, 1, 2, sharex=ax1)
# plt.xlabel(r'$S [\mu m]$')
ax2.plot(h_eV_s * speed_of_light / f_l_scale, f_l_ftlt_abs, 'r')
ax2_phase = ax2.twinx()
ax2_phase.plot(h_eV_s * speed_of_light / f_l_scale, f_l_ftlt_ang, 'r--')
ax2.grid(True)
plt.xlabel(r'$E$ [eV]')
# plt.ylabel(r'$Transm$')
# ax[1].xlabel(r'$E$ [eV]')
# ax[0].xlabel(r'$P(\lambda)$ [a.u.]')
# ax[1].xlabel(r'$abs(TrF)$')
plt.draw()
if savefig != False:
if savefig == True:
savefig = 'png'
if debug > 1:
print(' saving ' + fig_name + '.' + savefig)
plt.savefig(exp_dir + 'results/' + fig_name + '.' + savefig, format=savefig)
if showfig:
plt.show()
else:
plt.close('all')
def plot_dfl_waistscan(sc_res, fig_name=None, showfig=0, savefig=0, debug=1):
if showfig == False and savefig == False:
return
if fig_name is None:
if sc_res.fileName() is '':
fig = plt.figure('Waist scan')
else:
fig = plt.figure(sc_res.fileName() + ' waist scan')
else:
fig = plt.figure(fig_name)
plt.clf()
ax_int = fig.add_subplot(1, 1, 1)
ax_int.plot(sc_res.z_pos, sc_res.phdens_max, 'k', label='max', linewidth=2)
ax_int.plot(sc_res.z_pos, sc_res.phdens_onaxis, 'grey', label='on-axis')
ax_int.set_xlabel('z [m]')
ax_int.set_ylabel('photon density [arb.units]')
ax_int.legend(loc='lower left')
ax_size = ax_int.twinx()
ax_size.plot(sc_res.z_pos, sc_res.fwhm_x * 1e6, 'g--', label='fwhm_x')
ax_size.plot(sc_res.z_pos, sc_res.fwhm_y * 1e6, 'b--', label='fwhm_y')
ax_size.plot(sc_res.z_pos, sc_res.std_x * 1e6, 'g:', label='std_x')
ax_size.plot(sc_res.z_pos, sc_res.std_y * 1e6, 'b:', label='std_y')
ax_size.set_ylabel('size [um]')
ax_size.legend(loc='lower right')
plt.draw()
if savefig != False:
if savefig == True:
savefig = 'png'
if debug > 0:
print(' saving *.' + savefig)
if debug > 1:
print(' to ' + sc_res.filePath + '_%.2fm-%.2fm-waistscan.' % (sc_res.z_pos[0], sc_res.z_pos[-1]) + str(savefig))
fig.savefig(sc_res.filePath + '_%.2fm-%.2fm-waistscan.' % (sc_res.z_pos[0], sc_res.z_pos[-1]) + str(savefig), format=savefig)
# if debug>0: print(' done in %.2f seconds' % (time.time() - start_time))
if showfig:
if debug > 0:
print(' showing fig')
plt.show()
else:
plt.close(fig)
'''
scheduled for removal
'''
def show_output(g, show_field=False, show_slice=0):
print ('plotting slice', show_slice)
h = 4.135667516e-15
c = 299792458.0
xrms = np.array(g.sliceValues[g.sliceValues.keys()[show_slice]]['xrms'])
yrms = np.array(g.sliceValues[g.sliceValues.keys()[show_slice]]['yrms'])
f = plt.figure()
f.add_subplot(131), plt.plot(g.z, xrms, lw=3), plt.plot(g.z, yrms, lw=3), plt.grid(True)
f.add_subplot(132), plt.plot(g.z, g.power_z, lw=3), plt.grid(True)
t = 1.0e+15 * float(g('zsep')) * float(g('xlamds')) * np.arange(0, len(g.I)) / c
f.add_subplot(133)
plt.plot(g.t, g.power_int, lw=3)
plt.plot(t, g.I * np.max(g.power_int) / np.max(g.I), lw=3)
plt.grid(True)
npoints = g('ncar')
zstop = g('zstop')
delz = g('delz')
xlamd = g('xlamd')
xlamds = g('xlamds')
nslice = g('nslice')
zsep = g('zsep')
dgrid = g('dgrid')
smax = nslice * zsep * xlamds
print ('wavelength ', xlamds)
if show_field:
#from mpi4py import MPI
#comm = MPI.COMM_WORLD
#slices = readRadiationFile_mpi(comm=comm, fileName=file+'.dfl', npoints=npoints)
slices = readRadiationFile(fileName=g.path + '.dfl', npoints=npoints)
print ('slices:', slices.shape)
E = np.zeros_like(slices[0, :, :])
for i in range(slices.shape[0]):
E += np.multiply(slices[i, :, :], slices[i, :, :].conjugate())
fig = plt.figure()
fig.add_subplot(131)
m = plt.imshow(abs(E), cmap='YlOrRd')
z = abs(slices[100, :, :])
fig.add_subplot(132)
P = np.zeros_like(slices[:, 0, 0])
for i in range(len(P)):
s = sum(np.abs(np.multiply(slices[i, :, :], slices[i, :, :])))
P[i] = abs(s * s.conjugate()) * (dgrid**2 / npoints)**2
t = 1.0e+15 * float(g('zsep')) * float(g('xlamds')) * np.arange(0, len(P)) / c
plt.plot(t, P)
plt.title('Pulse/axis')
fig.add_subplot(133)
spec = np.abs(np.fft.fft(slices[:, int(npoints / 2), int(npoints / 2)]))**2
freq_ev = h * fftfreq(len(spec), d=zsep * xlamds / c)
plt.plot(freq_ev, spec)
plt.title('Spectrum/axis')
def show_plots(displays, fig):
'''
putting arbitrarily many plots on single figure
'''
n1 = (len(displays) - 1) / 2 + 1
n2 = (len(displays) - 1) / n1 + 1
# print n1, n2
fmt = str(n1) + str(n2)
print (fmt)
for i in range(len(displays)):
ax = fig.add_subplot(fmt + str(i + 1))
ax.grid(True)
for f in displays[i].data:
x, y = f(x=np.linspace(-10, 10, 100))
ax.plot(x, y, '.')
show()
class Display:
def __init__(self, data=lambda x: (x, 0 * x), xlabel='', ylabel=''):
self.data = (data,)
self.xlabel = xlabel
self.ylabel = ylabel
def round_sig(x, sig=2):
from math import log10, floor
return round(x, sig - int(floor(log10(x))) - 1)
def gauss_fit(X, Y):
import numpy as np
import scipy.optimize as opt
def gauss(x, p): # p[0]==mean, p[1]==stdev p[2]==peak
return p[2] / (p[1] * np.sqrt(2 * np.pi)) * np.exp(-(x - p[0])**2 / (2 * p[1]**2))
p0 = [0, max(X) / 2, max(Y)]
errfunc = lambda p, x, y: gauss(x, p) - y
p1, success = opt.leastsq(errfunc, p0[:], args=(X, Y))
fit_mu, fit_stdev, ampl = p1
Y1 = gauss(X, p1)
RMS = fit_stdev
return (Y1, RMS)
def fwhm3(valuelist, height=0.5, peakpos=-1):
"""calculates the full width at half maximum (fwhm) of some curve.
the function will return the fwhm with sub-pixel interpolation. It will start at the maximum position and 'walk' left and right until it approaches the half values.
INPUT:
- valuelist: e.g. the list containing the temporal shape of a pulse
OPTIONAL INPUT:
-peakpos: position of the peak to examine (list index)
the global maximum will be used if omitted.
OUTPUT:
-fwhm (value)
"""
if peakpos == -1: # no peakpos given -> take maximum
peak = np.max(valuelist)
peakpos = np.min(np.nonzero(valuelist == peak))
peakvalue = valuelist[peakpos]
phalf = peakvalue * height
# go left and right, starting from peakpos
ind1 = peakpos
ind2 = peakpos
while ind1 > 2 and valuelist[ind1] > phalf:
ind1 = ind1 - 1
while ind2 < len(valuelist) - 1 and valuelist[ind2] > phalf:
ind2 = ind2 + 1
# ind1 and 2 are now just below phalf
grad1 = valuelist[ind1 + 1] - valuelist[ind1]
grad2 = valuelist[ind2] - valuelist[ind2 - 1]
if grad1 == 0 or grad2 == 0:
width = None
else:
# calculate the linear interpolations
# print(ind1,ind2)
p1interp = ind1 + (phalf - valuelist[ind1]) / grad1
p2interp = ind2 + (phalf - valuelist[ind2]) / grad2
# calculate the width
width = p2interp - p1interp
return (peakpos, width, np.array([ind1, ind2]))
# ax_size_l = ax_size_t.twinx() #longitudinal size
# ax_size_l.plot(g.z, rad_longit_size*2, color='indigo', linestyle='dashed',linewidth=1.5)
# ax_size_l.set_ylabel('longitudinal [$\mu$m]')
| sserkez/ocelot | gui/genesis_plot.py | Python | gpl-3.0 | 99,812 | [
"Gaussian"
] | fbc78a2d6e38e2d408caddd8eeddc80dfbdbc8da02858e3b03d17a563916ac9f |
""" This contains unit tests to make sure that the migration between PyGSI and M2Crypto is as smooth as possible
The test covers only the method exposed by the PyGSI version for the time being.
We are not testing:
* generateProxyRequest with bitStrengsh -> I should...
* getIssuerDN: does not make any sense here, is never used
* generateChainFromResponse: not used
We are skipping:
* init with arguments, because never used
* The delegation mechanism (checkChain method)involves also X509Chain so it is in the X509Chain
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# redefined-outer-name is needed because we keep bassing get_X509Chain_class as param
# pylint: disable=redefined-outer-name
from datetime import datetime, timedelta
from string import ascii_letters
from pytest import mark, fixture, skip, raises, approx
parametrize = mark.parametrize
from DIRAC.Core.Security.test.x509TestUtilities import (
CERTS, CERTKEYS, CERTCONTENTS, deimportDIRAC, ENCRYPTEDKEYPASS, ENCRYPTEDKEY,
getCertOption, HOSTCERT, KEYCONTENTS_PKCS8, USERCERT, X509REQUESTTYPES, get_X509Request,
)
def test_dumpRequest_notInitialized(get_X509Request):
""" Calls dumpRequest a non initlaized Request"""
x509Req = get_X509Request()
res = x509Req.dumpRequest()
assert res['OK'] is False
from DIRAC.Core.Utilities.DErrno import ENOCERT
assert res['Errno'] == ENOCERT
def test_dumpRequest(get_X509Request):
"""" Generate an X509Request and dumps it"""
x509Req = get_X509Request()
x509Req.generateProxyRequest()
res = x509Req.dumpRequest()
assert res['OK']
assert b'CERTIFICATE REQUEST' in res['Value']
def test_loadAllFromString_fromDumpRequest(get_X509Request):
""" Generate a proxy Request and try loading it from incomplete dump"""
x509Req = get_X509Request()
x509Req.generateProxyRequest()
proxyRequest = x509Req.dumpRequest()['Value']
# This should fail because the proxyRequest does not contain the private key
x509ReqLoad = get_X509Request()
res = x509ReqLoad.loadAllFromString(proxyRequest)
assert res['OK'] is False
from DIRAC.Core.Utilities.DErrno import ENOPKEY
assert res['Errno'] == ENOPKEY
@parametrize('isLimited', (False, True))
def test_getSubjectDN(get_X509Request, isLimited):
""" Try getting the subjectDN in case of limited and non limited request
:param isLimited: request a limited proxy
"""
x509Req = get_X509Request()
x509Req.generateProxyRequest(limited=isLimited)
res = x509Req.getSubjectDN()
assert res['OK']
if isLimited:
assert res['Value'] == '/CN=limited proxy'
else:
assert res['Value'] == '/CN=proxy'
@parametrize('isLimited', (False, True))
def test_loadAllFromString(get_X509Request, isLimited):
""" Generate a proxy Request, load it, and check that the subject DN are the same
:param isLimited: request a limited proxy
"""
x509Req = get_X509Request()
x509Req.generateProxyRequest(limited=isLimited)
proxyRequest = x509Req.dumpAll()['Value']
x509ReqLoad = get_X509Request()
res = x509ReqLoad.loadAllFromString(proxyRequest)
assert res['OK']
assert x509Req.getSubjectDN() == x509ReqLoad.getSubjectDN()
| yujikato/DIRAC | src/DIRAC/Core/Security/test/Test_X509Request.py | Python | gpl-3.0 | 3,251 | [
"DIRAC"
] | 4de2d203b9b082bdc46e15b6d83207878818812fbc9a623e569f463fd7c4ca53 |
# -*- coding: utf-8 -*-
""" Sahana Eden Climate Model
@copyright: 2011-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3ClimateModel",
"climate_first_run",
)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3ClimateModel(S3Model):
"""
Climate data is stored in dynamically created tables.
These tables can be added from the command line script add_table.py
in modules.ClimateDataPortal.
The table definitions are stored in climate_sample_table_spec.
A data is an observed value over a time quantum at a given place.
e.g. observed temperature in Kathmandu between Feb 2006 - April 2007
Places are currently points, i.e. lat/lon coordinates.
Places may be stations.
Places may have elevation or other optional information.
@ToDo: i18n
@ToDo: Deprecate raw SQL (Tested only on PostgreSQL)
"""
names = ("climate_place",
"climate_place_elevation",
"climate_place_station_name",
"climate_place_station_id",
"climate_sample_table_spec",
"climate_monthly_aggregation",
"climate_station_parameter",
"climate_prices",
"climate_purchase",
"climate_save_query",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
NONE = current.messages["NONE"]
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# Climate Place
#
# This resource is spread over 4 tables, which we assume are linked by
# common IDs
#
# @ToDo: Migrate to gis_location?
# Although this table has many fields unused so a performance hit?
# elevation is not included as it would just mean a performance hit
# when we are generating 2D maps without elevation info.
define_table("climate_place",
Field("longitude", "double",
notnull=True,
required=True,
),
Field("latitude", "double",
notnull=True,
required=True,
)
)
# ---------------------------------------------------------------------
# elevation may not be useful for future projects
# e.g. where not available, or sea-based stations
# also, elevation may be supplied for gridded data
define_table("climate_place_elevation",
Field("elevation_metres", "double",
notnull=True,
required=True,
),
)
# ---------------------------------------------------------------------
# not all places are stations with elevations
# as in the case of "gridded" data
# a station can only be in one place
define_table("climate_place_station_name",
Field("name", "double",
notnull=True,
required=True,
),
)
station_id = S3ReusableField("station_id", "reference %s" % tablename,
sortby="name",
requires = IS_ONE_OF(db,
"climate_place_station_name.id",
climate_station_represent,
orderby="climate_place_station_name.name",
sort=True
),
represent = climate_station_represent,
label = "Station",
ondelete = "RESTRICT"
)
# ---------------------------------------------------------------------
# station id may not be useful or even meaningful
# e.g. gridded data has no stations.
# this is passive data so ok to store separately
define_table("climate_place_station_id",
Field("station_id", "integer",
notnull=True,
required=True,
),
)
# ---------------------------------------------------------------------
# coefficient of variance is meaningless for degrees C but Ok for Kelvin
# internally all scales must be ratio scales if coefficient
# of variations is to be allowed, (which it is)
# rainfall (mm), temp (K) are ok
# output units
define_table("climate_sample_table_spec",
Field("name",
notnull=True,
required=True,
),
Field("sample_type_code",
length = 1,
notnull = True,
# web2py requires a default value for not null fields
default = "",
required = True
),
Field("field_type",
notnull=True,
required=True,
),
Field("units",
notnull=True,
required=True,
),
Field("date_mapping",
default="",
notnull=True,
required=True
),
Field("grid_size", "double",
default = 0,
notnull = True,
required = True
)
)
parameter_id = S3ReusableField("parameter_id", "reference %s" % tablename,
sortby="name",
requires = IS_ONE_OF(db,
"climate_sample_table_spec.id",
sample_table_spec_represent,
sort=True
),
represent = sample_table_spec_represent,
label = "Parameter",
ondelete = "RESTRICT"
)
# ---------------------------------------------------------------------
define_table("climate_monthly_aggregation",
Field("sample_table_id",
db.climate_sample_table_spec,
notnull = True,
required = True
),
# this maps to the name of a python class
# that deals with the monthly aggregated data.
Field("aggregation",
notnull=True,
required=True,
)
)
# ---------------------------------------------------------------------
# Station Parameters
#
tablename = "climate_station_parameter"
define_table(tablename,
station_id(),
parameter_id(requires = IS_ONE_OF(db,
"climate_sample_table_spec.id",
sample_table_spec_represent,
sort=True
),
),
Field.Method("range_from",
climate_station_parameter_range_from),
Field.Method("range_to",
climate_station_parameter_range_to),
)
ADD = T("Add new Station Parameter")
crud_strings[tablename] = Storage(
label_create = ADD,
title_display = T("Station Parameter Details"),
title_list = T("Station Parameters"),
title_update = T("Edit Station Parameter"),
label_list_button = T("List Station Parameters"),
label_delete_button = T("Remove Station Parameter"),
msg_record_created = T("Station Parameter added"),
msg_record_modified = T("Station Parameter updated"),
msg_record_deleted = T("Station Parameter removed"),
msg_list_empty = T("No Station Parameters"))
configure(tablename,
insertable = False,
list_fields = [
"station_id",
"parameter_id",
(T("Range From"), "range_from"),
(T("Range To"), "range_to"),
]
)
# =====================================================================
# Purchase Data
#
nationality_opts = {
1:"Nepali Student",
2:"Others"
}
tablename = "climate_prices"
define_table(tablename,
Field("category", "integer",
label = T("Category"),
requires = IS_IN_SET(nationality_opts),
represent = lambda id: nationality_opts.get(id, NONE),
notnull = True,
required = True
),
parameter_id(
requires = IS_ONE_OF(db,
"climate_sample_table_spec.id",
sample_table_spec_represent,
filterby = "sample_type_code",
filter_opts = ("O",),
sort=True
),
notnull = True,
required = True,
represent = sample_table_spec_represent
),
Field("nrs_per_datum", "double",
label = T("NRs per datum"),
notnull = True,
required = True
)
)
configure(tablename,
create_onvalidation = self.climate_price_create_onvalidation,
list_fields=[
"category",
"parameter_id",
"nrs_per_datum"
]
)
ADD = T("Add new Dataset Price")
crud_strings[tablename] = Storage(
label_create = ADD,
title_display = T("Dataset Price Details"),
title_list = T("Dataset Prices"),
title_update = T("Edit Dataset Price"),
label_list_button = T("List Dataset Prices"),
label_delete_button = T("Remove Dataset Price"),
msg_record_created = T("Dataset Price added"),
msg_record_modified = T("Dataset Price updated"),
msg_record_deleted = T("Dataset Price removed"),
msg_list_empty = T("No Dataset Prices"))
tablename = "climate_purchase"
define_table(tablename,
#user_id(),
#Field("sample_type_code",
# "string",
# requires = IS_IN_SET(sample_type_code_opts),
# represent = lambda code: ClimateDataPortal.sample_table_types_by_code[code]
#),
Field("parameter_id", "integer",
requires = IS_ONE_OF(db,
"climate_prices.parameter_id",
sample_table_spec_represent,
),
represent = sample_table_spec_represent,
label = "Parameter",
ondelete = "RESTRICT"
),
station_id(),
s3_date("date_from",
default = "now",
empty=False
),
s3_date("date_to",
default = "now",
empty=False
),
Field("nationality", "integer",
label = T("Category"),
requires = IS_IN_SET(nationality_opts),
represent = lambda id: nationality_opts.get(id, NONE),
required = True
),
Field("notes", "text",
label = T("Receipt number / Student ID / other notes")
),
Field("price"),
Field("paid", "boolean",
represent = lambda opt: \
opt and "Yes" or "No",
),
Field("i_agree_to_the_terms_and_conditions", "boolean",
required = True,
represent = lambda agrees: agrees and "Yes" or "No",
comment = DIV(_class="stickytip",
_title="%s|%s" % (
T("Important"),
T("Check this box when you have read, "
"understand and agree to the "
"<a href='terms' target='_blank'>"
"terms and conditions"
"</a>."
)
)
)
),
*s3_meta_fields()
)
# @todo: make lazy_table
table = db[tablename]
table.owned_by_user.label = T("User")
system_roles = auth.get_system_roles()
ADMIN = system_roles.ADMIN
if not auth.s3_has_role(ADMIN):
table.paid.writable = False
ADD = T("Purchase New Data")
crud_strings[tablename] = Storage(
label_create = ADD,
title_display = T("Purchased Data Details"),
title_list = T("All Purchased Data"),
title_update = T("Edit Purchased Data"),
label_list_button = T("List Dataset Prices"),
label_delete_button = T("Remove Purchased Data"),
msg_record_created = T("Data Purchase In Process"),
msg_record_modified = T("Purchased Data updated"),
msg_record_deleted = T("Purchased Data removed"),
msg_list_empty = T("No Data Purchased"))
configure(tablename,
onaccept = self.climate_purchase_onaccept,
create_next = URL(args = ["[id]", "read"]),
list_fields=[
"owned_by_user",
"parameter_id",
"station_id",
"date_from",
"date_to",
"nationality",
#"purpose",
"price",
"paid",
"i_agree_to_terms_and_conditions"
]
)
# =====================================================================
# Saved Queries
#
tablename = "climate_save_query"
define_table(tablename,
#user_id(),
Field("description"),
Field("query_definition", "text"),
)
ADD = T("Save Query")
crud_strings[tablename] = Storage(
label_create = ADD,
title_display = T("Saved Query Details"),
title_list = T("Saved Queries"),
title_update = T("Edit Saved Query"),
label_list_button = T("List Saved Queries"),
label_delete_button = T("Remove Saved Query"),
msg_record_created = T("Query Saved"),
msg_record_modified = T("Saved Query updated"),
msg_record_deleted = T("Saved Query removed"),
msg_list_empty = T("No Saved Queries"))
configure(tablename,
listadd = False)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def climate_price_create_onvalidation(form):
"""
"""
vars = form.request_vars
db = current.db
table = db.climate_prices
query = (table.category == vars["category"]) & \
(table.parameter_id == vars["parameter_id"])
price = db(query).select(table.id,
limitby=(0, 1)).first()
if price is not None:
form.errors["nrs_per_datum"] = [
"There is a conflicting price for the above category and parameter."
]
return False
else:
return True
# -------------------------------------------------------------------------
@staticmethod
def climate_purchase_onaccept(form):
"""
Calculate Price
"""
import ClimateDataPortal
vars = form.vars
id = vars.id
db = current.db
ptable = db.climate_purchase
purchase = db(ptable.id == id).select(ptable.paid,
limitby=(0, 1)).first()
if (purchase and purchase.paid == True):
pass
else:
parameter_id = vars.parameter_id
table = db.climate_sample_table_spec
query = (table.id == parameter_id)
parameter_table = db(query).select(table.id,
table.date_mapping,
limitby=(0, 1)).first()
parameter_table_id = parameter_table.id
date_mapping_name = parameter_table.date_mapping
period = date_mapping_name
date_from = vars.date_from
date_to = vars.date_to
nationality = int(vars.nationality)
table = db.climate_prices
query = (table.category == nationality) & \
(table.parameter_id == parameter_id)
price_row = db(query).select(table.nrs_per_datum,
limitby=(0, 1)).first()
if price_row is None:
form.errors["price"] = ["There is no price set for this data"]
else:
price = price_row.nrs_per_datum
currency = {
1: "%.2f NRs",
2: "US$ %.2f"
}[nationality]
date_mapping = getattr(ClimateDataPortal, date_mapping_name)
start_date_number = date_mapping.date_to_time_period(date_from)
end_date_number = date_mapping.date_to_time_period(date_to)
place_id = int(vars.station_id)
datum_count = db.executesql(
"SELECT COUNT(*) "
"FROM climate_sample_table_%(parameter_table_id)i "
"WHERE place_id = %(place_id)i "
"AND time_period >= %(start_date_number)i "
"AND time_period <= %(end_date_number)i;" % locals()
)[0][0]
ptable[id] = {"price": currency % (datum_count * price)}
# =============================================================================
def climate_station_represent(id, row=None):
"""
"""
if row:
id = row.id
s3db = current.s3db
table = s3db.climate_place_station_id
row_id = db(table.id == id).select(table.station_id,
limitby=(0,1)).first()
table = s3db.climate_place_station_name
row_name = db(table.id == id).select(table.name,
limitby=(0,1)).first()
if row_id and row_id.station_id:
represent = " (%s)" % row_id.station_id
else:
represent = ""
if row_name and row_name.name:
represent = "%s%s" % (row_name.name, represent)
return represent or current.messages["NONE"]
# =============================================================================
def sample_table_spec_represent(id, row=None):
"""
"""
if row:
id = row.id
import ClimateDataPortal
table = current.s3db.climate_sample_table_spec
row = current.db(table.id == id).select(table.name,
table.sample_type_code,
limitby=(0, 1)).first()
if row:
return "%s %s" % (
ClimateDataPortal.sample_table_types_by_code[row.sample_type_code].__name__,
row.name
)
else:
return current.messages["NONE"]
# =============================================================================
def climate_station_parameter_range_from(row):
default = current.messages["NONE"]
if hasattr(row, "climate_station_parameter"):
row = row.climate_station_parameter
try:
parameter_id = row.parameter_id
station_id = row.station_id
except AttributeError:
return default
table = current.s3db.table("climate_sample_table_%s" % parameter_id)
if not table:
return default
date = table.time_period.min()
row = db(table.place_id == station_id).select(date).first()
if row:
date = row[date]
import ClimateDataPortal
year, month = ClimateDataPortal.month_number_to_year_month(date)
return "%s-%s" % (month, year)
else:
return default
# -------------------------------------------------------------------------
def climate_station_parameter_range_to(self):
default = current.messages["NONE"]
if hasattr(row, "climate_station_parameter"):
row = row.climate_station_parameter
try:
parameter_id = row.parameter_id
station_id = row.station_id
except AttributeError:
return default
table = current.s3db.table("climate_sample_table_%s" % parameter_id)
if not table:
return default
date = table.time_period.max()
row = db(table.place_id == station_id).select(date).first()
if row:
date = row[date]
import ClimateDataPortal
year, month = ClimateDataPortal.month_number_to_year_month(date)
return "%s-%s" % (month, year)
else:
return default
# =============================================================================
def climate_first_run():
"""
Called from zzz_1st_run.py
Manual SQL Statements to run after tables are created
"""
errors = []
settings = current.deployment_settings
if settings.get_database_type() != "postgres":
errors.append("Climate unresolved dependency: PostgreSQL required")
try:
import rpy2
except ImportError:
errors.append("""
R is required by the climate data portal to generate charts
To install R: refer to:
http://cran.r-project.org/doc/manuals/R-admin.html
rpy2 is required to interact with python.
To install rpy2, refer to:
http://rpy.sourceforge.net/rpy2/doc-dev/html/overview.html
""")
try:
from Scientific.IO import NetCDF
except ImportError:
errors.append("Climate unresolved dependency: NetCDF required if you want to import readings")
try:
from scipy import stats
except ImportError:
errors.append("Climate unresolved dependency: SciPy required if you want to generate graphs on the map")
if errors:
# Report errors and stop.
prefix = "\n%s: " % current.T("ACTION REQUIRED")
msg = prefix + prefix.join(errors)
current.log.critical(msg)
raise HTTP(500, body=msg)
db = current.db
# Load all stations and parameters
s3db = current.s3db
ptable = s3db.climate_station_parameter
if not db(ptable.id > 0).select(ptable.id,
limitby=(0, 1)):
table = s3db.climate_place_station_name
station_rows = db(table.id > 0).select(table.id)
table = db.climate_sample_table_spec
query = (table.sample_type_code == "O")
for station_row in station_rows:
parameter_rows = db(query).select(table.id)
for parameter_row in parameter_rows:
ptable.insert(
station_id = station_row.id,
parameter_id = parameter_row.id
)
db.executesql(
"ALTER TABLE climate_sample_table_spec"
"ADD CONSTRAINT climate_sample_table_name_sample_type_unique"
"UNIQUE (name, sample_type_code);"
"ALTER TABLE climate_prices"
"ADD CONSTRAINT climate_price_unique"
"UNIQUE (category, parameter_id);"
)
db.commit()
# END =========================================================================
| julianprabhakar/eden_car | modules/s3db/climate.py | Python | mit | 27,377 | [
"NetCDF"
] | a00cb021d4d4e613de5f02532f63485bbe0ebb342a874381def0494bcef9e7bc |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Interface to ASE Atoms, and Calculators.
"""
import logging
import qmpy
from qmpy.materials.structure import Structure
from qmpy.materials.atom import Atom
logger = logging.getLogger(__name__)
if qmpy.FOUND_ASE:
import ase
import ase.io
def structure_to_atoms(structure):
"""
Convert a qmpy.Structure to an ase.Atoms
Example::
>>> import qmpy.io as io
>>> structure = io.read('POSCAR')
>>> atoms = io.ase_mapper.structure_to_atoms(structure)
"""
if not qmpy.FOUND_ASE:
print("ASE must be installed to convert a Structure to an Atoms object")
return
atoms = ase.Atoms(
structure.name,
cell=structure.cell,
scaled_positions=structure.coords,
magmoms=structure.magmoms,
)
return atoms
def atoms_to_structure(atoms):
"""
Convert a qmpy.Structure to an ase.Atoms
Example::
>>> import qmpy.io.ase_mapper
>>> atoms = ase.io.read('POSCAR')
>>> structure = qmpy.io.ase_mapper.atoms_to_structure(atoms)
"""
if not qmpy.FOUND_ASE:
print("ASE must be installed to convert Atoms object to a Structure")
return
struct = Structure()
struct.cell = atoms.get_cell()
for a in atoms:
atom = Atom()
atom.coord = a.position
atom.symbol = a.symbol
atom.magmom = a.magmom
atom.direct = False
struct.add_atom(atom)
return struct
def read(filename, **kwargs):
"""
Uses the ase.io.read method to read in a file, and convert it to a
qmpy.Structure object. Passes any optional keyword arguments to the
ase.io.read call.
"""
if not qmpy.FOUND_ASE:
print("ASE must be installed to convert Atoms object to a Structure")
return
atoms = ase.io.read(filename, **kwargs)
return atoms_to_structure(atoms)
def write(structure, **kwargs):
atoms = structure_to_atoms(structure)
return ase.io.write(atoms, **kwargs)
| wolverton-research-group/qmpy | qmpy/io/ase_mapper.py | Python | mit | 2,038 | [
"ASE"
] | 7c10b758666611462830e0b7009f7aa4e92d35d68952f3d160b1101d9b72375c |
#! /usr/bin/env python
__author__ = 'Chengwei Luo (luo.chengwei@gatech.edu)'
__version__ = '0.0.1'
__date__ = 'November 2013'
"""
metaHGT (meta-community Horizontal Gene Transfer tracker):
in-situ and real time HGT tracker for series metagenomes
Copyright(c) 2013 Chengwei Luo (luo.chengwei@gatech.edu)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
https://github.com/luo-chengwei/metaHGT
for help, type:
python metaHGT.py --help
"""
USAGE = \
"""Usage: %prog <required_parameters> [options]
metaHGT: in-situ and real time HGT tracker for series metagenomes
metaHGT is a platform for in-situ and real time HGT tracker series metagenomes.
It utilizes the power of PE mapping and infers HGTs from mapping contrasts across
different time points.
It is written in Python, therefore it should run on Mac OS, and Unix/Linux.
Add --help to see a full list of required and optional
arguments to run metaHGT.
Additional information can also be found at:
https://github.com/luo-chengwei/metaHGT/wiki
If you use metaHGT in your work, please cite it as:
<metaHGT citation here>
Copyright: Chengwei Luo, Konstantinidis Lab, Georgia Institute of Technology, 2013
"""
import sys
import os
import re
import glob
from optparse import OptionParser, OptionGroup
from subprocess import PIPE, Popen
import cPickle
from time import ctime, time
from datetime import timedelta
import calHGT
import bams
####################### PROJECT INFORMATION CLASS #############################
class ProjectInfo:
def __init__(self):
self.samples = []
self.timepairs = []
self.num_proc = 1
self.quiet = False
self.bam_dir = None
self.assembly_dir = None
self.reads_dir = None
self.interleaved = []
self.outdir = None
self.bwa = None
self.samtools = None
self.contig_length = 0
self.mapq = 0
self.align_perc = 0.
self.min_links = 0
def getBAMFiles(self, sample1, sample2):
BAMs = []
meshedSamplePairs = [(sample1, sample1), (sample2, sample2),
(sample1, sample2), (sample2, sample1)]
for sampleA, sampleB in meshedSamplePairs:
bamfiles = glob.glob(self.bam_dir + '/' + sampleA + '.vs.' + sampleB + '*bam')
if len(bamfiles) == 0:
sys.stderr.write('FATAL: Eror in fetching the BAM file for samples: %s and %s\n' % (sampleA, sampleB))
exit(0)
if len(bamfiles) > 1:
sys.stderr.write('FATAL: Ambiguous naming for BAM files for samples: %s and %s\n' % (sampleA, sampleB))
sys.stderr.write(' The following files are found:\n')
for file in bamfiles:
sys.stderr.write(' %s\n' % file)
exit(1)
else:
bamfile = bamfiles[0]
baifile = bamfile + '.bai'
if not os.path.exists(baifile):
sys.stderr.write('FATAL: cannot locate the index file for BAM file: %s\n' % bamfile)
exit(1)
BAMs.append(os.path.realpath(bamfiles[0]))
return BAMs
def getReadsFile(self, sample):
files1 = glob.glob(self.reads_dir + '/' + sample + '*1.fa')
if len(files1) == 0:
files1 = glob.glob(self.reads_dir + '/' + sample + '*1.fastq')
if len(files1) == 0:
files1 = glob.glob(self.reads_dir + '/' + sample + '*1.fasta')
files2 = glob.glob(self.reads_dir + '/' + sample + '*2.fa')
if len(files2) == 0:
files2 = glob.glob(self.reads_dir + '/' + sample + '*2.fastq')
if len(files2) == 0:
files2 = glob.glob(self.reads_dir + '/' + sample + '*2.fasta')
if len(files1) == 0 and len(files2) == 0:
files = glob.glob(self.reads_dir + '/' + sample + '*fa')
if len(files) == 0:
files = glob.glob(self.reads_dir + '/' + sample + '*fastq')
if len(files) == 0:
files = glob.glob(self.reads_dir + '/' + sample + '*fasta')
if len(files) == 0:
sys.stderr.write('FATAL: Eror in fetching the reads file for sample: %s\n' % sample)
exit(0)
if len(files) > 1:
sys.stderr.write('FATAL: Ambiguous naming for reads file for sample: %s\n' % sample)
sys.stderr.write(' The following files are found:\n')
for file in files:
sys.stderr.write(' %s\n' % file)
exit(0)
else:
return files
else:
if len(files1) == 0:
sys.stderr.write('FATAL: Eror in fetching the 5\' reads file for sample: %s\n' % sample)
exit(0)
if len(files1) > 1:
sys.stderr.write('FATAL: Ambiguous naming for reads file for sample: %\n' % sample)
sys.stderr.write(' The following files are found:\n')
for file in files1:
sys.stderr.write(' %s\n' % file)
exit(0)
if len(files2) == 0:
sys.stderr.write('FATAL: Eror in fetching the 3\' reads file for sample: %s\n' % sample)
exit(0)
if len(files2) > 1:
sys.stderr.write('FATAL: Ambiguous naming for reads file for sample: %\n' % sample)
sys.stderr.write(' The following files are found:\n')
for file in files2:
sys.stderr.write(' %s\n' % file)
exit(0)
return files1 + files2
def getAssemblyFile(self, sample):
files = glob.glob(self.assembly_dir + '/' + sample + '*fa')
if len(files) == 0:
sys.stderr.write('FATAL: Eror in fetching the assembly file for sample: %s\n' % sample)
exit(0)
if len(files) > 1:
sys.stderr.write('FATAL: Ambiguous naming for assembly file for sample: %\n' % sample)
sys.stderr.write(' The following files are found:\n')
for file in files:
sys.stderr.write(' %s\n' % file)
exit(1)
else:
return os.path.realpath(files[0])
def initProject(self, options):
if os.path.exists(options.sample_list):
for timepoint in open(options.sample_list, 'r'):
if timepoint[:-1] == '':
continue
self.samples.append(tuple(timepoint[:-1].split(':')))
elif options.sample_list.count(',') > 0:
for timepoint in options.sample_list.split(','):
self.samples.append(tuple(timepoint.split(':')))
else:
sys.stderr.write('FATAL: Error in extracting samples, please check your input.\n')
if options.quiet:
self.quiet = True
# init outfile
self.outfile = options.outfile
# qvalue cutoff
self.qvalue = options.qvalue
# generate timepairs
for timepoint1, timepoint2 in zip(self.samples[:-1], self.samples[1:]):
for sample1 in timepoint1:
for sample2 in timepoint2:
self.timepairs.append((sample1, sample2))
self.num_proc = options.num_proc
self.quiet = options.quiet
self.contig_length = options.contig_length
self.mapq = options.mapq
self.align_perc = options.align_perc
self.min_links = options.min_links
# you supply either BAM dir or assembly dir + reads dir
# if you supply BAMs, then it skips the BWA+Samtoools step to generate the BAM files,
# it would rely on you for generate the correct BAMs (sorted and indexed).
# otherwise, this will try to generate all the BAMs files needs.
if options.bam_dir and os.path.exists(options.bam_dir):
self.bam_dir = options.bam_dir
for sample1, sample2 in self.timepairs:
bamfiles = self.getBAMFiles(sample1, sample2)
else:
if options.assembly_dir == None or options.reads_dir == None:
sys.stderr.write('FATAL: You need to either supply the BAM file directory or the assembly and reads directory.\n')
exit(0)
if not os.path.exists(options.assembly_dir):
sys.stderr.write('FATAL: cannot locate the assembly fasta files directory, you supplied: %s\n' % options.assembly_dir)
exit(0)
else:
self.assembly_dir = options.assembly_dir
if not os.path.exists(options.reads_dir):
sys.stderr.write('FATAL: cannot locate the reads directory, you supplied: %s\n' % options.reads_dir)
exit(0)
else:
self.reads_dir = options.reads_dir
# test files
for timepoint in self.samples:
for sample in timepoint:
readsfile = self.getReadsFile(sample)
assemblyfile = self.getAssemblyFile(sample)
# test samtools and bwa
bwaTest = Popen(options.bwa, shell=True, stdout=PIPE, stderr=PIPE).stderr.read()
if not bwaTest or bwaTest.count('not found') ==1:
sys.stderr.write("FATAL: BWA not found in path!\n")
exit(0)
else:
self.bwa = options.bwa
samtoolsTest = Popen(options.samtools, shell=True, stdout=PIPE, stderr=PIPE).stderr.read()
if samtoolsTest.count('not found') ==1 or not samtoolsTest:
sys.stderr.write("FATAL: samtools not found in path!\n")
exit(0)
else:
self.samtools = options.samtools
# End of initProject
################################### MAIN ######################################
def main(argv = sys.argv[1:]):
parser = OptionParser(usage = USAGE, version="Version: " + __version__)
# Required arguments
requiredOptions = OptionGroup(parser, "Required options",
"These options are required to run BinGeR, and may be supplied in any order.")
requiredOptions.add_option("-l", "--sample_list", type = "string", metavar = "FILE/STRING",
help = "Text file containing all sample names, one per line, in longitudinal order; \
replicates of the same time point should be separated by colon.\n\
Alternatively, you can directly supply the sample names in longitudinal order, \
timepoints separated by comma and replicates separated by colon.")
parser.add_option_group(requiredOptions)
# Optional arguments that need to be supplied if not the same as default
optOptions = OptionGroup(parser, "Optional parameters",
"There options are optional, and may be supplied in any order.")
optOptions.add_option("-o", "--outfile", type = "string", default = 'HGTs_info.txt', metavar = "FILE",
help = "Output file where HGTs will be reported to.")
optOptions.add_option("-b", "--bam_dir", type = "string", default = "BAMs", metavar = "DIR",
help = "Directory where sorted bam files (reads versus assembly, same sample) are, \
the naming should follow \"sample1.vs.sample2.*.bam\" convention. \
NOTE: if you specify this option, metaHGT will assume that you have performed the BWA")
optOptions.add_option("-a", "--assembly_dir", type = "string", default = "Assemblies", metavar = "DIR",
help = "Directory where assembly files ni fasta format are, \
the naming should follow \"sample.*.fa\" convention.\n\
The tags of contigs should follow: binID.contigXX.* fashion, where binID is the \
identifier of bins, and contigXX is the identifier of the contigs belong to the bin. \
Unclassified (unbinned) contigs should be renamed as sampleID.contigXX.*, where ID \
should be the sample ID.")
optOptions.add_option("-r", "--reads_dir", type = "string", default = "Reads", metavar = "DIR",
help = "Directory where reads are. They should be in fastq format, \
and can be in both interleaved, or separate two files. \
The naming should follow \"sample.*.fastq\" (interleaved) or \"sample.*.1.fastq\" \
and \"sample.*.2.fastq\" (separate) convention.")
optOptions.add_option("--bwa", type = "string", default = "bwa", metavar = "STRING",
help = "Location of BWA (Li et al, Bioinformatics, 2009). Only needed if you haven't \
generated the BAM files yourself; otherwise, please specify BAM_dir (-d/--BAM_dir).\
default: [$PATH:/bwa], version: 0.7+")
optOptions.add_option("--samtools", type = "string", default = "samtools", metavar = "STRING",
help = "Location of the Samtools binary (Li et al, Bioinformatics, 2009).Only needed \
if you haven't generated the BAM files yourself; otherwise, please specify BAM dir (-d/--bam_dir).")
optOptions.add_option("--contig_length", type = "int", default = 1000, metavar = "INT",
help = "minimun contig length for contigs to be considered in HGT inference [default: 1000]")
optOptions.add_option("--mapq", type = "int", default = 30, metavar = "INT",
help = "minimun mapping quality for a mapped read to be considered in HGT inference [default: 30]")
optOptions.add_option("--align_perc", type = "float", default = 0.9, metavar = "FLOAT",
help = "minimun aligned length percetange for reads to be considered in HGT inference [default: 0.9]")
optOptions.add_option("--min_links", type = "int", default = 3, metavar = "INT",
help = "minimun cross-aligned read numbers required to initiate an HGT hotspot scan [default: 3]")
optOptions.add_option("--qvalue", type = "float", default = 0.2, metavar = "FLOAT",
help = "max Q-value (FDR corrected p-value) cutoff, HGTs higher then this won't be reported. [default: 0.2]")
optOptions.add_option("-t", "--num_proc", type = "int", default = 1, metavar = 'INT',
help = "Number of processor for BinGeR to use [default: 1].")
parser.add_option_group(optOptions)
# runtime settings that could affect the file saving and message printing
runtimeSettings = OptionGroup(parser, "Runtime settings",
"There options are optional, and may be supplied in any order.")
runtimeSettings.add_option("-q", "--quiet", default = False, action = "store_true",
help = "Suppress printing detailed runtime information, only important messages will show [default: False].")
parser.add_option_group(runtimeSettings)
(options, args) = parser.parse_args(argv)
if options.sample_list is None:
parser.error("A list of samples is required!")
exit(0)
if options.qvalue < 0 or options.qvalue > 1:
parser.error("Q value should be float ranging between 0 and 1, you supplied: %.2f\n" % options.qvalue)
exit(0)
# kickstart
sys.stdout.write("metaHGT started at %s\n"%(ctime()))
sys.stdout.flush()
# check sanity of the files in required directories
projInfo = ProjectInfo()
projInfo.initProject(options)
# if necessary, run bwa + samtools to generate sorted and indexed BAM files
perform_mapping = False
if projInfo.bam_dir == None:
perform_mapping = True
if perform_mapping:
bams.genBAMs(projInfo)
# run calHGT
HGTs = calHGT.calHGT(projInfo)
# generate output
ofh = open(projInfo.outfile, 'w')
ofh.write('# output of metaHGT v%s\n' % __version__)
ofh.write('# author: %s\n' % __author__)
ofh.write('# copyright: Chengwei Luo, Konstantinidis Lab, Georgia Institute of Technology, 2013.\n')
ofh.write('# run command: %s\n' % ' '.join(argv))
ofh.write('#timepoint1\ttimepoint2\tbin1\tbin2\tcontigA\tbreakpointA\torientationA\t')
ofh.write('bin2\tcontigB\tbreakpointB\torientationB\tperc_1\tperc_2\traw_pval\tadj_pval\n')
for (sample1, sample2) in HGTs:
hs = HGTs[(sample1, sample2)]
for h in hs:
if h.adj_pvalue > projInfo.qvalue:
continue
ofh.write('%s\t%s\t%s\n' % (sample1, sample2, h.strHGT()))
ofh.close()
# end
sys.stdout.write("metaHGT finished at %s\n"%(ctime()))
sys.stdout.flush()
if __name__ == '__main__':
main()
| fw1121/metaHGT | src/metaHGT.py | Python | gpl-3.0 | 15,284 | [
"BWA"
] | feca812ff95cb5851451a5c6f46b71be7158ba21bf8fb76f195d4108fdb27511 |
import re
def find_n_elect(file_name):
log_file = open('%s' % file_name)
for line in log_file:
if 'alpha electrons' in line:
alpha = int(line.split()[0])
beta = int(line.split()[3])
n_ele = alpha + beta
break
return n_ele, alpha, beta
def find_homo(file_name):
log_file = open('%s' % file_name)
occ_orbitals = []
for line in log_file:
if 'Alpha occ. eigenvalues' in line:
i = 4
while i < len(line.split()):
occ_orbitals.append(float(line.split()[i]))
i += 1
elif 'Beta occ. eigenvalues' in line:
i = 4
while i < len(line.split()):
occ_orbitals.append(float(line.split()[i]))
i += 1
return max(occ_orbitals)
def find_lumo(file_name):
log_file = open('%s' % file_name)
occ_orbitals = []
for line in log_file:
if 'Alpha virt. eigenvalues' in line:
i = 4
while i < len(line.split()):
occ_orbitals.append(float(line.split()[i]))
i += 1
elif 'Beta virt. eigenvalues' in line:
i = 4
while i < len(line.split()):
occ_orbitals.append(float(line.split()[i]))
i += 1
return min(occ_orbitals)
def find_scf_energy(file_name):
energy = 0.
log_file = open('%s' % file_name)
for line in log_file:
if 'SCF Done:' in line:
energy = float(line.split()[4])
return energy
def find_ccsdt_energy(file_name):
log_file = open('%s' % file_name)
collect = ''
for line in log_file:
if 'Version=' in line:
break
if 'State=' in line:
break
for line in log_file:
collect += line[1:-1]
collect = re.sub('\\n ', '', collect)
energy = 0.
for i in collect.split('\\'):
if 'CCSD(T)=' in i:
energy = i.split('=')[1]
break
return float(energy)
def find_ccsd_energy(file_name):
log_file = open('%s' % file_name)
collect = ''
for line in log_file:
if 'Version=' in line:
break
if 'State=' in line:
break
for line in log_file:
collect += line[1:-1]
collect = re.sub('\\n ', '', collect)
energy = 0.
for i in collect.split('\\'):
if 'CCSD=' in i:
energy = i.split('=')[1]
break
return float(energy)
def find_xdm_energy(file_name):
log_file = open('%s' % file_name)
for line in log_file:
if 'total energy (SCF+XDM)' in line:
tot_ene = float(line.split()[3])
if 'dispersion energy' in line:
disp_ene = float(line.split()[2])
return tot_ene, disp_ene
def grab_coords(log_file):
log_file = open('%s' % log_file)
coords = []
for line in log_file:
if 'Optimization completed' in line:
break
for line in log_file:
if 'Standard orientation:' in line:
break
line = log_file.next()
line = log_file.next()
line = log_file.next()
line = log_file.next()
for line in log_file:
if '-----------' in line:
break
else:
a, b, c, d, e, f = line.split()
coords.append([b, float(d), float(e), float(f)])
return coords
# Returns polarisabilities read from a gaussian file as a list in the form
# [a_xx, a_xy, a_yy, a_xz, a_yz, a_zz]
def find_polar(log_file):
log_file = open('%s' % log_file)
collect = ''
for line in log_file:
if 'Version=' in line:
break
if 'State=' in line:
break
for line in log_file:
collect += line
collect = re.sub('\\n ', '', collect)
polar = [0., 0., 0., 0., 0., 0.]
for i in collect.split('\\'):
if 'Polar' in i:
polar = i.split('=')[1]
polar = polar.split(',')
break
return polar
# Returns hyperpolarisabilities read from a gaussian file as a list in the form
# [b_xxx, b_xxy, b_xyy, b_yyy, b_xxz, b_xyz, b_yyz, b_xzz, b_yzz, b_zzz]
def find_hyperpolar(log_file):
log_file = open('%s' % log_file)
collect = ''
for line in log_file:
if 'Version=' in line:
break
if 'State=' in line:
break
for line in log_file:
collect += line
collect = re.sub('\\n ', '', collect)
hpolar = [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]
for i in collect.split('\\'):
if 'HyperPolar' in i:
hpolar = i.split('=')[1]
hpolar = hpolar.split(',')
break
return hpolar
# Returns B05 and B13 energy read from the output file of Axels postG code (Apr 2017)
def find_B05(file_name):
b05_out = open('%s' % file_name, 'r')
B05 = 0.0
B13 = 0.0
for line in b05_out:
if 'B05 energy' in line:
B05 = float(line.split()[3])
if 'B13-0 energy' in line:
B13 = float(line.split()[3])
return [B05, B13]
def find_hirsh(file_name):
log_file = open('%s' % file_name, 'r')
hirsh = []
for line in log_file:
if 'Hirshfeld charges, spin densities, dipoles,' in line:
break
k = 0
for line in log_file:
if k == 0:
k += 1
continue
elif 'Tot' in line:
break
else:
hirsh.append((line.split()[1], float(line.split()[2])))
return hirsh
def find_mull(file_name):
log_file = open('%s' % file_name, 'r')
mull = []
for line in log_file:
if 'Mulliken charges:' in line:
break
k = 0
for line in log_file:
if k == 0:
k += 1
continue
elif 'Sum of Mulliken charges' in line:
break
else:
mull.append((line.split()[1], float(line.split()[2])))
return mull
# Returns the absolute dipole moment of the system
def find_dipole(file_name):
gaus_out = open('%s' % file_name, 'r')
dipole = 0
for line in gaus_out:
if 'Dipole moment' in line:
break
for line in gaus_out:
if ' X= ' in line:
dipole = float(line.split()[7])
elif 'Results using SCF density:' in line:
break
return dipole
| Wo0o0o0ble/stags | g09_grab_numbers.py | Python | gpl-3.0 | 6,321 | [
"Gaussian"
] | 7c8e9384cdba57fec0a1a8872c8657ec7f31ef8edb9cfe6814fee8d82f947f15 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.