text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import glob
import os
import json
import webbrowser
import requests
import re
import subprocess
import datetime
from invoke import task
from monty.os import cd
from pymatgen import __version__ as CURRENT_VER
NEW_VER = datetime.datetime.today().strftime("%Y.%-m.%-d")
"""
Deployment file to facilitate releases of pymatgen.
Note that this file is meant to be run from the root directory of the pymatgen
repo.
"""
__author__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "Sep 1, 2014"
@task
def make_doc(ctx):
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split("\-{3,}", contents)
n = len(toks[0].split()[-1])
changes = [toks[0]]
changes.append("\n" + "\n".join(toks[1].strip().split("\n")[0:-1]))
changes = ("-" * n).join(changes)
with open("docs/latest_changes.rst", "w") as f:
f.write(changes)
with cd("examples"):
ctx.run("jupyter nbconvert --to html *.ipynb")
ctx.run("mv *.html ../docs/_static")
with cd("docs"):
ctx.run("cp ../CHANGES.rst change_log.rst")
ctx.run("sphinx-apidoc --separate -d 6 -o . -f ../pymatgen")
ctx.run("rm pymatgen*.tests.*rst")
for f in glob.glob("*.rst"):
if f.startswith('pymatgen') and f.endswith('rst'):
newoutput = []
suboutput = []
subpackage = False
with open(f, 'r') as fid:
for line in fid:
clean = line.strip()
if clean == "Subpackages":
subpackage = True
if not subpackage and not clean.endswith("tests"):
newoutput.append(line)
else:
if not clean.endswith("tests"):
suboutput.append(line)
if clean.startswith("pymatgen") and not clean.endswith("tests"):
newoutput.extend(suboutput)
subpackage = False
suboutput = []
with open(f, 'w') as fid:
fid.write("".join(newoutput))
ctx.run("make html")
ctx.run("cp _static/* _build/html/_static")
# This makes sure pymatgen.org works to redirect to the Gihub page
ctx.run("echo \"pymatgen.org\" > _build/html/CNAME")
# Avoid ths use of jekyll so that _dir works as intended.
ctx.run("touch _build/html/.nojekyll")
@task
def update_doc(ctx):
with cd("docs/_build/html/"):
ctx.run("git pull")
make_doc(ctx)
with cd("docs/_build/html/"):
ctx.run("git add .")
ctx.run("git commit -a -m \"Update dev docs\"")
ctx.run("git push origin gh-pages")
@task
def publish(ctx):
ctx.run("python setup.py release")
@task
def set_ver(ctx):
lines = []
with open("pymatgen/__init__.py", "rt") as f:
for l in f:
if "__version__" in l:
lines.append('__version__ = "%s"' % NEW_VER)
else:
lines.append(l.rstrip())
with open("pymatgen/__init__.py", "wt") as f:
f.write("\n".join(lines))
lines = []
with open("setup.py", "rt") as f:
for l in f:
lines.append(re.sub(r'version=([^,]+),', 'version="%s",' % NEW_VER,
l.rstrip()))
with open("setup.py", "wt") as f:
f.write("\n".join(lines))
@task
def update_coverage(ctx):
with cd("docs/_build/html/"):
ctx.run("git pull")
ctx.run("nosetests --config=nose.cfg --cover-html --cover-html-dir=docs/_build/html/coverage")
update_doc()
@task
def merge_stable(ctx):
ctx.run("git commit -a -m \"v%s release\"" % NEW_VER)
ctx.run("git push")
ctx.run("git checkout stable")
ctx.run("git pull")
ctx.run("git merge master")
ctx.run("git push")
ctx.run("git checkout master")
@task
def release_github(ctx):
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split("\-+", contents)
desc = toks[1].strip()
toks = desc.split("\n")
desc = "\n".join(toks[:-1]).strip()
payload = {
"tag_name": "v" + NEW_VER,
"target_commitish": "master",
"name": "v" + ver,
"body": desc,
"draft": False,
"prerelease": False
}
response = requests.post(
"https://api.github.com/repos/materialsproject/pymatgen/releases",
data=json.dumps(payload),
headers={"Authorization": "token " + os.environ["GITHUB_RELEASES_TOKEN"]})
print(response.text)
@task
def update_changelog(ctx):
output = subprocess.check_output(["git", "log", "--pretty=format:%s",
"v%s..HEAD" % ver])
lines = ["* " + l for l in output.decode("utf-8").strip().split("\n")]
with open("CHANGES.rst") as f:
contents = f.read()
l = "=========="
toks = contents.split(l)
head = "\n\nv%s\n-----------\n" % NEW_VER
toks.insert(-1, head + "\n".join(lines))
with open("CHANGES.rst", "w") as f:
f.write(toks[0] + l + "".join(toks[1:]))
@task
def log_ver(ctx):
filepath = os.path.join(os.environ["HOME"], "Dropbox", "Public",
"pymatgen", NEW_VER)
with open(filepath, "w") as f:
f.write("Release")
@task
def release(ctx, notest=False):
set_ver(ctx)
if not notest:
ctx.run("nosetests")
publish(ctx)
log_ver(ctx)
update_doc(ctx)
merge_stable(ctx)
release_github(ctx)
@task
def open_doc(ctx):
pth = os.path.abspath("docs/_build/html/index.html")
webbrowser.open("file://" + pth)
|
xhqu1981/pymatgen
|
tasks.py
|
Python
|
mit
| 5,832
|
[
"pymatgen"
] |
94e4cf5e5ea0828a83e283f6b8861634a8296d65648869e9558d91ebe2e83f94
|
# proxy module
from __future__ import absolute_import
from mayavi.core.common import *
|
enthought/etsproxy
|
enthought/mayavi/core/common.py
|
Python
|
bsd-3-clause
| 87
|
[
"Mayavi"
] |
f1c3df1163ba0c962706debb5c3d1428bb9a1a3cac63160a70cffbc157b3e8d0
|
import numpy as np
from xd_elg_utils import *
import XD_selection_module as XD
cnames = ["Gold", "Silver", "LowOII", "NoOII", "LowZ", "NoZ", "D2reject", "DR3unmatched","D2unobserved"]
param_directory = "./XD-parameters/"
K_i = [2,2,2,3,2,2,7]
dNdm_type = [1, 1, 0, 1, 0, 0, 1]
param_tag2 = "-Field34" # "-Field34" if using only Field 3 and 4 data. Otherwise "".
##############################################################################
print("Parameter directory: ")
if param_tag2 == "-Field34":
print("Using Field 3 and 4 training fits.")
##############################################################################
print("Load the parameters")
params = XD.generate_XD_model_dictionary(param_directory, K_i=K_i, dNdm_type=dNdm_type, tag2=param_tag2)
print("Completed\n")
##############################################################################
# Class name, CN, component gaussian, A_ij, mu_ij_rz [1], mu_ij_gr [0], rzrz, rzgr, grgr
# Class Name $i$ & Component Gaussian $j$ & $A_{ij}$& $\mu^{g-r}_{ij}$ & $ \mu^{r-z}_{ij}$ & $\Sigma^{g-r,g-r}_{ij} $ & $ \Sigma^{g-r,r-z}_{ij}$ & $\Sigma^{r-z,r-z}_{ij}$\\ \hline
print("Print GMM fit parameters")
num_class = 7
for i in range(num_class):
idx = np.flip(np.argsort(params[(i,"amp")]), -1)
counter = 0
for j in idx:
counter +=1
# print(i,j)
row_str = []
# Class name
if counter == 1:
row_str.append(cnames[i])
else:
row_str.append("")
# Class number.
# Nothing
# Component gaussian
row_str.append("%d"%counter)
# A_ij
row_str.append("%.6f"%params[(i,"amp")][j])
# mu_ij_rz [0]
row_str.append("%.6f"%params[(i,"mean")][j][0])
# mu_ij_gr [1]
row_str.append("%.6f"%params[(i,"mean")][j][1])
# Covar
# rzrz
row_str.append("%.6f"%params[(i,"covar")][j][0, 0])
# rzgr
row_str.append("%.6f"%params[(i,"covar")][j][0, 1])
# grgr
row_str.append("%.6f"%params[(i,"covar")][j][1, 1])
row_str = " & ".join(row_str)
row_str += "\\\\ \\hline"
print(row_str)
print("Completed\n")
##############################################################################
print("dNdM fit parameters")
# Class Name, Type, alpha, beta, f, phi
num_class = 7
for i in range(num_class):
row_str = []
# Class name
row_str.append(cnames[i])
# By type
if dNdm_type[i]==0:
row_str.append("Power")
# Alpha
row_str.append("%.5f"%(-params[(i, "dNdm")][0]))
# Beta
row_str.append("---")
# Fs
row_str.append("%d"%1)
# phi
row_str.append("%.5f"%params[(i, "dNdm")][1])
else:
row_str.append("Broken")
# Alpha
row_str.append("%.5f"%(params[(i, "dNdm")][0]))
# Beta
row_str.append("%.5f"%(params[(i, "dNdm")][1]))
# Fs
row_str.append("%.5f"%(params[(i, "dNdm")][2]))
# phi
row_str.append("%.1f"%params[(i, "dNdm")][3])
#
row_str = " & ".join(row_str)
row_str += "\\\\ \\hline"
print(row_str)
print("Completed\n")
|
jaekor91/xd-elg-scripts
|
print-params.py
|
Python
|
gpl-3.0
| 3,317
|
[
"Gaussian"
] |
0d982b9ad0761c990f341dd2463550279c8594c751f06fcac95446fe47f6ebdf
|
import pytest
class SeleniumSessionTestCase:
@pytest.fixture(scope="module")
def session(self):
raise NotImplementedError()
@pytest.fixture(autouse=True)
def reset_session(self, session):
try:
yield
finally:
session.reset()
def test_fill_in_with_clear_backspace_fills_in_a_field_replacing_an_existing_value(self, session):
session.visit("/form")
session.fill_in("form_first_name", value="Harry", fill_options={"clear": "backspace"})
assert session.find("fillable_field", "form_first_name").value == "Harry"
def test_fill_in_with_clear_backspace_fills_in_a_field_replacing_an_existing_value_even_with_a_caret_position(self, session):
session.visit("/form")
el = session.find("css", "#form_first_name")
move_caret_to_beginning_js = (
"""
this.focus();
this.setSelectionRange(0, 0);
""")
el.execute_script(move_caret_to_beginning_js)
session.fill_in("form_first_name", value="Harry", fill_options={"clear": "backspace"})
assert session.find("fillable_field", "form_first_name").value == "Harry"
def test_fill_in_with_clear_backspace_only_triggers_onchange_once(self, session):
session.visit("/with_js")
session.fill_in(
"with_change_event", value="some value", fill_options={"clear": "backspace"})
# click outside the field to trigger the change event
session.find("css", "body").click()
assert session.find(
"css", ".change_event_triggered", match="one", wait=5).has_text("some value")
def test_fill_in_with_clear_backspace_triggers_change_when_clearing_field(self, session):
session.visit("/with_js")
session.fill_in("with_change_event", value="", fill_options={"clear": "backspace"})
# click outside the field to trigger the change event
session.find("css", "body").click()
assert session.has_selector("css", ".change_event_triggered", match="one", wait=5)
def test_fill_in_with_clear_backspace_triggers_input_event_field_value_length_times(self, session):
session.visit("/with_js")
session.fill_in("with_change_event", value="", fill_options={"clear": "backspace"})
# click outside the field to trigger the change event
session.find("css", "body").click()
assert session.has_xpath("//p[@class='input_event_triggered']", count=13)
def test_repr_outputs_obsolete_elements(self, session):
session.visit("/form")
el = session.find("button", "Click me!")
el.click()
assert session.has_no_button("Click me!")
assert repr(el) == "Obsolete <capybara.node.element.Element>"
|
elliterate/capybara.py
|
tests/selenium_session_test_case.py
|
Python
|
mit
| 2,768
|
[
"VisIt"
] |
7590d28bf1330e9bfdd7521cdd27d1e26d691f19b12588c9890a555c4edc5bd2
|
from sdssgaussfitter import gaussfit
import numpy as np
import os,sys
from util import utils
from util.readDict import readDict
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def aperture(startpx,startpy,radius=7):
r = radius
length = 2*r
height = length
allx = xrange(startpx-int(np.ceil(length/2.0)),startpx+int(np.floor(length/2.0))+1)
ally = xrange(startpy-int(np.ceil(height/2.0)),startpy+int(np.floor(height/2.0))+1)
pixx = []
pixy = []
mask=np.ones((46,44))
for x in allx:
for y in ally:
if (np.abs(x-startpx))**2+(np.abs(y-startpy))**2 <= (r)**2 and 0 <= y and y < 46 and 0 <= x and x < 44:
mask[y,x]=0.
return mask
#def gaussian(height, center_x, center_y, width_x, width_y,offset):
# """Returns a gaussian function with the given parameters"""
# width_x = float(width_x)
# width_y = float(width_y)
# return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)+offset
#testy = np.array([[gaussian(2,10,10,3,3,5)(x,y) for y in range(46)] for x in range(44)])
#utils.plotArray(testy,cbar=True)
param = readDict()
if len(sys.argv)<2:
print "Provide file name to fit. Syntax >>python fitPsf.py objectparams.dict [filenumber]"
sys.exit(1)
#read in parameter file as command line argument
param.read_from_file(sys.argv[1])
#provide optional file number if the object in the param file has alternate .npz files to be specified individually
fileNum = None
if len(sys.argv)>2:
fileNum = "_"+str(sys.argv[2])
npzLoadFile = param['npzLoadFile']
npzfitpsf = param['npzfitpsf']
giffitpsf = param['giffitpsf']
if fileNum != None:
npzLoadFile = npzLoadFile.split('.')[0]+fileNum+'.'+npzLoadFile.split('.')[1]
npzfitpsf = npzfitpsf.split('.')[0]+fileNum+'.'+npzfitpsf.split('.')[1]
giffitpsf = giffitpsf.split('.')[0]+fileNum+'.'+giffitpsf.split('.')[1]
FramesPerFile = param['FramesPerFile']
#NumFiles = param['NumFiles']
#for filenum in range(len(NumFiles)):
# if NumFiles[filenum] > 0:
# NumFiles[filenum] = NumFiles[filenum]*FramesPerFile
#NumFrames = NumFiles
NumFrames = 31
print "There should be this many frames: ", NumFrames
guessX = param['guessX'][0]
guessY = param['guessY'][0]
stackDict = np.load(npzLoadFile)
stack = stackDict['stack']
wvls = stackDict['wvls']
print "The file actually has this many: ", len(wvls)
paramsList = []
errorsList = []
fitImgList = []
chisqList = []
plt.ion()
for iFrame in range(0,np.shape(stack)[0]):
frame = stack[iFrame,:,:]
#print "Frame max= ", np.nanmax(frame,axis=None)
#frame *= CorrFactors
#print "Corrected Frame max= ", np.nanmax(frame,axis=None)
nanMask = np.isnan(frame)
#for interval in xrange(len(NumFrames)-1):
# if NumFrames[interval] != NumFrames[interval+1]:
# if NumFrames[interval] < iFrame <= NumFrames[interval+1]:
# guessX = guessX[interval]
# guessY = guessY[interval]
# print guessX, guessY
'''
apertureMask = aperture(guessX,guessY,radius=4)
err = np.sqrt(frame) #divide by 2 to constrain PSF fit even tighter to avoid fitting to wrong peak if PSF is divided by dead pixels
err[frame > 100]=np.inf
#err[frame<10] = 100
frame[nanMask] = 0 #set to finite value that will be ignored
err[nanMask] = np.inf #ignore these data points
err[frame==0] = np.inf
err[apertureMask==1] = 1.0 #np.sqrt(frame[apertureMask==1])/2.0 #weight points closer to the expected psf higher
nearDeadCutoff = 1 #100/15 cps for 4000-6000 angstroms
err[frame<nearDeadCutoff] = np.inf
entireMask = (err==np.inf)
maFrame = np.ma.masked_array(frame,entireMask)
'''
apertureMask = aperture(guessX,guessY,radius=5)
#if iFrame < 19:
# err = np.ones(np.shape(frame))*10.0
#else:
# err = np.zeros(np.shape(frame))
err = np.ones(np.shape(frame))*1.0
#err = np.sqrt(frame)
#err[apertureMask==1] = np.inf #weight points closer to the expected psf higher
#err[frame>100] = np.inf
frame[nanMask]=0#set to finite value that will be ignored
err[nanMask] = np.inf#ignore these data points
nearDeadCutoff=0#100/15 cps for 4000-6000 angstroms
err[frame<nearDeadCutoff] = np.inf
entireMask = (err==np.inf)
maFrame = np.ma.masked_array(frame,entireMask)
guessAmp = 30.
guessHeight = 5.
guessWidth = 1.3
guessParams = [guessHeight,guessAmp,guessX,guessY,guessWidth]
limitedmin = 5*[True]
limitedmax = 5*[True]
minpars = [0,0,0,0,0.1] #default min pars, usually work fine
#minpars = [0,0,27,27,1] #tighter constraint on PSF width to avoid fitting wrong peak if PSF is divided by dead pixels
maxpars = [40,200,43,43,10]
#maxpars = [40,200,33,33,10]
''' #forced parameters for Landolt standard
if iFrame == 27:
minpars = [8,5,0,0,0.5]
maxpars = [30,25,43,45,1.1]
if iFrame == 28:
minpars = [8,5,0,0,0.5]
maxpars = [30,25,43,45,1.1]
if iFrame == 29:
minpars = [8,5,0,0,0.5]
maxpars = [30,25,43,45,1.1]
if iFrame == 30:
minpars = [8,5,0,0,0.5]
maxpars = [30,25,43,45,1.10]
'''
usemoments=[True,True,True,True,True] #doesn't use our guess values, default
#usemoments=[False,False,False,False,False]
print "=========================="
print wvls[iFrame]
print "frame ",iFrame
out = gaussfit(data=maFrame,err=err,params=guessParams,returnfitimage=True,quiet=True,limitedmin=limitedmin,limitedmax=limitedmax,minpars=minpars,maxpars=maxpars,circle=1,usemoments=usemoments,returnmp=True)
mp = out[0]
outparams = mp.params
paramErrors = mp.perror
chisq = mp.fnorm
dof = mp.dof
reducedChisq = chisq/dof
print "reducedChisq =", reducedChisq
fitimg = out[1]
chisqList.append([chisq,dof])
paramsList.append(outparams)
errorsList.append(paramErrors)
print "outparams = ", outparams
print "paramErrors = ", paramErrors
# expectedResiduals = np.ma.masked_array(np.sqrt(frame),mask=entireMask)
# residuals = np.ma.masked_array(np.abs(frame-fitimg),mask=entireMask)
# utils.plotArray(expectedResiduals,cbar=True)
# utils.plotArray(residuals,cbar=True)
# fig = plt.figure()
# ax = fig.add_subplot(111,projection='3d')
# x = np.arange(0,44)
# y = np.arange(0,46)
# X,Y = np.meshgrid(x,y)
# linearMask = np.ravel(entireMask==0)
# ax.plot_wireframe(X,Y,fitimg)
# ax.scatter(outparams[2],outparams[3],outparams[0]+outparams[1],c='black')
# ax.scatter(np.ravel(X)[linearMask],np.ravel(Y)[linearMask],np.ravel(frame)[linearMask],c='red')
#
fitimg[nanMask]=0
# print fitimg[np.isnan(fitimg)]
fitImgList.append(fitimg)
# utils.plotArray(frame,cbar=True)
# utils.plotArray(maFrame,cbar=True)
# utils.plotArray(fitimg,cbar=True)
# plt.show()
# utils.confirm('Enter to continue.')
# plt.close()
# plt.close()
# plt.close()
frame[nanMask]=np.nan
# fig = plt.figure()
# ax1=fig.add_subplot(211)
# ax2 = fig.add_subplot(212)
# for iRow in range(len(frame)):
# ax1.scatter(range(44),frame[iRow,:],c='red',marker='o',alpha=.5,label='data')
# ax1.scatter(range(44),fitimg[iRow,:],c='blue',marker='^',alpha=.5,label='fit')
# ax1.set_title('Fit seen along Cols')
# for iCol in range(np.shape(frame)[1]):
# ax2.scatter(range(46),frame[:,iCol],c='red',marker='o',alpha=.5,label='data')
# ax2.scatter(range(46),fitimg[:,iCol],c='blue',marker='^',alpha=.5,label='fit')
# ax2.set_title('Fit seen along Rows')
# plt.show()
plt.close()
print 'closed'
cube = np.array(fitImgList)
chisqs = np.array(chisqList)
params = np.array(paramsList)
errors = np.array(errorsList)
np.savez(npzfitpsf,fitImg=cube,params=params,errors=errors,chisqs=chisqs,wvls=wvls)
print 'saved'
utils.makeMovie(fitImgList,frameTitles=wvls, cbar=True, outName=giffitpsf, normMin=0, normMax=50)
|
bmazin/ARCONS-pipeline
|
examples/Pal2014_throughput/fitPsf.py
|
Python
|
gpl-2.0
| 8,028
|
[
"Gaussian"
] |
7b33c79e4bb28a8c500019a15bc91e76e2b883dd1b9812ed58c176c347f0596b
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns # prettify matplotlib
import sklearn.gaussian_process as gp
import sys
if sys.version_info[0] == 3: # python 3
from queue import Empty
from math import isclose, inf
elif sys.version_info[0] == 2: # python 2
from Queue import Empty
inf = float('inf')
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
else:
print('unsupported python version')
def make2D(arr):
''' convert a numpy array with shape (l,) into an array with shape (l,1)
(np.atleast_2d behaves similarly but would give shape (1,l) instead)
'''
return arr.reshape(-1, 1)
class Noise1D:
'''
create and store an array of noise which can be accessed deterministically
with get().
'''
def __init__(self, xs, sigma):
self.xs = xs
if sigma == 0.0:
self.noise = np.zeros(shape=(len(xs)))
else:
self.noise = np.random.normal(0, sigma, size=(len(xs)))
def get(self, x):
'''
get the noise value for the given x value
if x is an array of values, then apply to each value in the array.
note: this should hold:
np.all(self.noise == self.get(self.xs))
'''
if isinstance(x, np.ndarray):
# cannot vectorize if self is an argument
@np.vectorize
def vec_get(x):
return self.noise[self.get_index(x)]
return vec_get(x)
else:
return self.noise[self.get_index(x)]
def get_index(self, x):
'''
get the index into self.noise which is closest to the given x value
(based on the fact that entries in self.noise correspond to values of
xs passed into the constructor)
eg if xs = [1,2,3] then the index for x = 1.2 should be 0 (the index
corresponding to x = 1)
'''
return np.argmin(np.abs(self.xs-x))
class Noise2D:
'''
create and store a grid of 2D noise which can be accessed deterministically
with get().
'''
def __init__(self, xs, ys, sigma, fixed=True):
self.xs = xs
self.ys = ys
self.fixed = fixed
self.sigma = sigma
if fixed:
if sigma == 0.0:
self.noise = np.zeros(shape=(len(xs), len(ys)))
else:
self.noise = np.random.normal(0, sigma, size=(len(xs), len(ys)))
def get(self, x, y):
'''
get the noise value for the coordinates x and y.
if x and y are meshgrids or arrays of points, then return a meshgrid or
array of values with the noise values filled in.
note: this should hold:
np.all(self.noise == self.get(*np.meshgrid(self.xs, self.ys)))
'''
if isinstance(x, np.ndarray):
if self.fixed:
# cannot vectorize if self is an argument
@np.vectorize
def vec_get(x, y):
return self.noise[self.get_index(x, y)]
return vec_get(x, y)
else:
assert x.shape == y.shape
return np.random.normal(0, self.sigma, size=x.shape)
else:
if self.fixed:
return self.noise[self.get_index(x, y)]
else:
return np.random.normal(0, self.sigma)
def get_index(self, x, y):
'''
get the index into self.noise which is closest to the given x,y
coordinate (based on the fact that entries in self.noise correspond to
locations of xs and ys passed into the constructor)
eg if xs = [1,2,3] then the index for x = 1.2 should be 0 (the index
corresponding to x = 1)
'''
xi, yi = np.argmin(np.abs(self.xs-x)), np.argmin(np.abs(self.ys-y))
# noise is indexed as [row, col] so x and y must be swapped
return yi, xi
def random_covariance_matrix(d, max_variation):
'''
based on: https://math.stackexchange.com/a/1879937
Generate a random covariance matrix which implies that it must be:
- symmetric
- positive semi-definite, meaning that all eigenvalues must be >0
(but for practical purposes all eigenvalues must be >eps (for some small eps) instead of >0)
Method: Use the eigendecomposition: A = P * D * P^T
where
- P is the horizontal stacking of the eigenvectors
- D is a diagonal matrix with eigenvalues along the diagonal
choose P randomly such that each eigenvector is orthogonal and has unit length (currently the algorithm isn't great)
then choose (positive) eigenvalues (lengths of the eigenvectors)
I found that drawing the eigenvalues from a Gaussian distribution led to more correlated results (which I wanted)
Could also draw from uniform
note: it turns out that covariance matrices can be represented as R^TR
where R is an upper triangular matrix and who's elements correspond to
the cosine and sine of the angles of the correlation (spherical
parameterisation)
(https://www.robots.ox.ac.uk/seminars/Extra/2012_30_08_MichaelOsborne.pdf)
This would be a better way of creating the covariance matrices with a
particular shape in mind
'''
P = np.random.uniform(0, 1, size=(d, d))
P = sp.linalg.orth(P) # generate orthogonal basis for the given matrix
assert P.shape == (d, d) # must be full rank (matrix spans d dimensions => needs d basis vectors)
P /= np.linalg.norm(P, ord=2, axis=0) # normalise to make vectors unit length
evs = np.abs(np.random.normal(max_variation/2, max_variation/2, size=(d,)))
D = np.diag(evs.T)
return np.matmul(P, np.matmul(D, P.T))
class GaussianMixture:
def __init__(self, ranges, num_gaussians, weights=None):
'''
ranges: dict with keys: xmin, xmax, ymin, ymax, var
'''
self.num_gaussians = num_gaussians
self.mus = np.random.uniform(
[ranges['xmin'], ranges['ymin']],
[ranges['xmax'], ranges['ymax']],
size=(num_gaussians, 2)
)
self.sigmas = [random_covariance_matrix(d=2, max_variation=ranges['var']) for i in range(num_gaussians)]
# the weight of each Gaussian towards the mixture
if weights is None:
self.weights = [1.0/num_gaussians]*num_gaussians # all with equal probability
else:
self.weights = weights
def sample(self, num_samples):
''' draw x,y samples from the distribution '''
samples = np.empty(shape=(num_samples, 2))
for n in range(num_samples):
# choose which Gaussians to sample based on their weight
i = np.random.choice(range(self.num_gaussians), p=self.weights)
x,y = np.random.multivariate_normal(self.mus[i], self.sigmas[i])
samples[n,:] = (x, y)
return samples
def pdf(self, xy):
''' calculate the probability density of the distribution at a given point '''
return sum([
self.weights[i] * sp.stats.multivariate_normal.pdf(xy, mean=self.mus[i], cov=self.sigmas[i])
for i in range(self.num_gaussians)])
class Data2D:
def __init__(self):
# ranges for selecting the means and variances
ranges = {
'xmin' : -10,
'xmax' : 10,
'ymin' : -10,
'ymax' : 10,
'var' : 4 # absolute value
}
var = ranges['var']
self.extent = [ranges['xmin']-var, ranges['xmax']+var, ranges['ymin']-var, ranges['ymax']+var]
self.G = GaussianMixture(ranges, num_gaussians=5)
self.xys = np.array(self.G.sample(2000))
self.zs = make2D(np.apply_along_axis(self.G.pdf, 1, self.xys))
res = 100 # resolution
x, y = np.meshgrid(np.linspace(ranges['xmin']-var, ranges['xmax']+var, res),
np.linspace(ranges['ymin']-var, ranges['ymax']+var, res))
self.all_xys = np.empty(x.shape + (2,)) # eg (100, 100) + (2,) == (100, 100, 2)
self.all_xys[:, :, 0] = x
self.all_xys[:, :, 1] = y
self.x, self.y = x, y
self.all_xys_coords = np.hstack([make2D(x.flatten()), make2D(y.flatten())]) # eg (10000, 2)
p = lambda x,y: self.G.pdf(np.array((x,y)))
self.all_zs = np.vectorize(p)(x, y)
def plot_samples(self):
plt.figure(figsize=(16,8))
plt.axes().set_ylim(-15, 15)
plt.axes().set_xlim(-30, 30)
plt.plot(self.xys[:,0], self.xys[:,1], 'b.', markersize=3.0, label='data')
plt.margins(0.1, 0.1)
plt.legend(loc='upper left')
plt.show()
def plot_pdf(self):
plt.figure(figsize=(16,8))
plt.axes().set_ylim(-15, 15)
plt.axes().set_xlim(-30, 30)
plt.axes().grid(False)
plt.imshow(self.all_zs, cmap='plasma', interpolation='nearest', origin='lower', extent=self.extent)
plt.colorbar()
plt.show()
def plot_histogram(self):
plt.figure(figsize=(16,8))
plot_range = [[-30, 30], [-15, 15]]
plt.axes().grid(False)
plt.hist2d(self.xys[:,0], self.xys[:,1], bins=(128, 64), range=plot_range, cmap='plasma')
plt.colorbar()
plt.show()
class Data1D:
# some functions to fit
@staticmethod
def fun_1(x):
s = 0.05 # noise std dev
y = np.cos(2*x - 1/2.0)/2.0 + np.cos(x) + 1
return s,y
@staticmethod
def fun_2(x):
s = 0.3 # noise std dev
y = x * np.cos(x)
return s, y
def __init__(self, f):
nfac = 1 # factor of n to _actually_ use (this is a hack because the
# keep_ids are based on the assumption of 2000 samples)
n = 1666 * nfac # num samples (including the samples which will be thrown out)
self.min_x = 0
self.max_x = 10
self.full_x = make2D(np.linspace(self.min_x, self.max_x, n))
self.s, self.full_exact_y = f(self.full_x)
# remove some chunks
self.keep_ids = [(200,400), (750,1000), (1250,1450)]
self.keep_ids = [(int(a*nfac), int(b*nfac)) for a, b in self.keep_ids]
def remove_chunks(arr):
return np.vstack(arr[a:b] for a, b in self.keep_ids)
self.x = remove_chunks(self.full_x)
self.exact_y = remove_chunks(self.full_exact_y)
self.noise = make2D(np.random.normal(0, self.s, self.x.shape[0]))
self.y = self.exact_y + self.noise
# 0.0 where no samples, 1.0 where there are
self.populated = np.zeros(len(self.full_x))
for a, b in self.keep_ids:
self.populated[a:b] = 1.0
print('training GP')
kernel = 1.0 * gp.kernels.Matern(nu=1.5) + gp.kernels.WhiteKernel()
gp_params = dict(
alpha = 1e-10, # noise level
kernel = kernel.clone_with_theta(np.array([5.0566808, 1.94789113, -2.29135835])),
optimizer = None,
#n_restarts_optimizer = 10,
normalize_y = True
)
self.gp_model = gp.GaussianProcessRegressor(**gp_params)
self.gp_model.fit(self.x, self.y)
self.gp_mus, self.gp_sigmas = self.gp_model.predict(self.full_x, return_std=True)
self.gp_sigmas = make2D(self.gp_sigmas)
print('done')
# Jeremy says this has no theoretical grounding :(
'''
# larger noise
big_noise = make2D(np.random.normal(0, self.s*4, len(self.full_x)))
small_noise = make2D(np.random.normal(0, self.s, len(self.full_x)))
self.big_noise = [small_noise[i] if self.populated_b[i] else big_noise[i] for i in range(len(self.full_x))]
self.full_noisy_y = self.full_exact_y + self.big_noise
'''
def plot_samples(self, show_samples=True, show_exact=True, show_populated=True, show_gp=True):
plt.figure(figsize=(16,8))
if show_samples:
plt.plot(self.x, self.y, 'b.', label='data')
if show_exact:
plt.plot(self.full_x, self.full_exact_y, 'g-', label='generator')
if show_populated:
plt.plot(self.full_x, self.populated, 'r-', label='populated')
if show_gp:
plt.plot(self.full_x, self.gp_mus, 'm-', label='gp mean')
n_sigma = 2
s = n_sigma*self.gp_sigmas.flatten()
m = self.gp_mus.flatten()
plt.fill_between(self.full_x.flatten(), m-s, m+s, alpha=0.3,
color='mediumpurple', label='gp ${}\\sigma$'.format(n_sigma))
plt.margins(0.1, 0.1)
plt.legend(loc='upper left')
plt.show()
|
mbway/Bayesian-Optimisation
|
old_library/synthetic_data.py
|
Python
|
gpl-3.0
| 12,722
|
[
"Gaussian"
] |
9b188d7b23630b2bda187286eae67ffc2dc884227e4ef93886826e7cfc5fa783
|
# NOTE: This example uses the next generation Twilio helper library - for more
# information on how to download and install this version, visit
# https://www.twilio.com/docs/libraries/python
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
token = "your_auth_token"
client = Client(account, token)
credential = client.notify \
.credentials("CRxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx") \
.update(friendly_name="MyCredential",
sandbox=True)
print(credential.friendly_name)
|
teoreteetik/api-snippets
|
notifications/rest/credentials/update-credential/update-credential.6.x.py
|
Python
|
mit
| 624
|
[
"VisIt"
] |
4e2ea7b921f23b3765c78de4afe29b40160cc1faa4ec0b8b0fb35b39fd266750
|
"""Test LCAO density calculation and conversion to grid.
Test that the density generated by the following three procedures is the same:
* basis_functions.construct_density as used in a normal calculation
* axpy used on the psit_nG as constructed by lcao_to_grid
* axpy used on the Phit_MG[i] * Phit_MG[j] * rho[j, i], where Phit_MG
are the actual basis functions on the grid, constructed using lcao_to_grid
TODO: non-gamma-point test
"""
import numpy as np
from ase.structure import molecule
from gpaw import GPAW
from gpaw.utilities.blas import axpy
system = molecule('H2O')
system.center(vacuum=2.5)
calc = GPAW(mode='lcao',
#basis='dzp',
maxiter=1)
system.set_calculator(calc)
try:
system.get_potential_energy()
except:
pass
wfs = calc.wfs
kpt = wfs.kpt_u[0]
nt_G = calc.density.gd.zeros()
bfs = wfs.basis_functions
nao = wfs.setups.nao
f_n = kpt.f_n
rho_MM = np.zeros((nao, nao))
wfs.calculate_density_matrix(kpt.f_n, kpt.C_nM, rho_MM)
bfs.construct_density(rho_MM, nt_G, -1)
nbands = wfs.bd.nbands
psit_nG = wfs.gd.zeros(nbands)
bfs.lcao_to_grid(kpt.C_nM, psit_nG, -1)
nt2_G = calc.density.gd.zeros()
for f, psit_G in zip(f_n, psit_nG):
axpy(f, psit_G**2, nt2_G)
identity_MM = np.identity(nao)
Phit_MG = calc.wfs.gd.zeros(nao)
bfs.lcao_to_grid(identity_MM, Phit_MG, -1)
nt3_G = calc.density.gd.zeros()
for M1, Phit1_G in enumerate(Phit_MG):
for M2, Phit2_G in enumerate(Phit_MG):
nt3_G += rho_MM[M1, M2] * Phit1_G * Phit2_G
err1_G = nt2_G - nt_G
err2_G = nt3_G - nt_G
maxerr1 = np.abs(err1_G).max()
maxerr2 = np.abs(err2_G).max()
print 'err1', maxerr1
print 'err2', maxerr2
assert max(maxerr1, maxerr2) < 1e-15
|
ajylee/gpaw-rtxs
|
gpaw/test/lcao_density.py
|
Python
|
gpl-3.0
| 1,690
|
[
"ASE",
"GPAW"
] |
a25c41111a7dd0dd182c7073e8ec709fcdc89dbd0b5d169b918e47c435b5a024
|
import numpy as np
from histomicstk.utils import gradient_diffusion
def gvf_tracking(I, Mask, K=1000, Diffusions=10, Mu=5, Lambda=5, Iterations=10,
dT=0.05):
"""
Performs gradient-field tracking to segment smoothed images of cell nuclei.
Takes as input a smoothed intensity or Laplacian-of-Gaussian filtered image
and a foreground mask, and groups pixels by tracking them to mutual
gradient sinks. Typically requires merging of sinks (seeds) as a post
processing steps.
Parameters
----------
I : array_like
Smoothed intensity or log-filtered response where nuclei regions have
larger intensity values than background.
Mask : array_like
Binary mask where foreground objects have value 1, and background
objects have value 0. Used to restrict influence of background vectors
on diffusion process and to reduce tracking computations.
K : float
Number of steps to check for tracking cycle. Default value = 1000.
Mu : float
Weight parmeter from Navier-Stokes diffusion - weights divergence and
Laplacian terms. Default value = 5.
Lambda : float
Weight parameter from Navier-Stokes diffusion - used to weight
divergence. Default value = 5.
Iterations : float
Number of time-steps to use in Navier-Stokes diffusion. Default value =
10.
dT : float
Timestep to be used in Navier-Stokes diffusion. Default value = 0.05.
Returns
-------
Segmentation : array_like
Label image where positive values correspond to foreground pixels that
share mutual sinks.
Sinks : array_like
N x 2 array containing the (x,y) locations of the tracking sinks. Each
row is an (x,y) pair - in that order.
See Also
--------
histomicstk.utils.gradient_diffusion,
histomicstk.segmentation.label.shuffle
References
----------
.. [#] G. Li et al "3D cell nuclei segmentation based on gradient flow
tracking" in BMC Cell Biology,vol.40,no.8, 2007.
"""
# get image shape
M = I.shape[0]
N = I.shape[1]
# calculate gradient
dy, dx = np.gradient(I)
# diffusion iterations
if Diffusions > 0:
dx, dy = gradient_diffusion(dx, dy, Mask, Mu, Lambda, Diffusions,
dT)
# normalize to unit magnitude
Mag = ((dx**2 + dy**2)**0.5 + np.finfo(float).eps)
dy = dy / Mag
dx = dx / Mag
# define mask to track pixels that are mapped to a sink
Mapped = np.zeros(I.shape)
# define label image
Segmentation = np.zeros(I.shape)
# initialize lists of sinks
Sinks = []
# define coordinates for foreground pixels (Mask == 1)
i, j = np.nonzero(Mask)
# track pixels
for index, (x, y) in enumerate(zip(j, i)):
# initialize angle, trajectory length, novel flag, and allocation count
phi = 0
points = 1
novel = 1
alloc = 1
# initialize trajectory
Trajectory = np.zeros((K, 2))
Trajectory[0, 0] = x
Trajectory[0, 1] = y
# track while angle defined by successive steps is < np.pi / 2
while(phi < np.pi / 2):
# calculate step
xStep = round_float(dx[Trajectory[points-1, 1],
Trajectory[points-1, 0]])
yStep = round_float(dy[Trajectory[points-1, 1],
Trajectory[points-1, 0]])
# check image edge
if ((Trajectory[points-1, 0] + xStep < 0) or
(Trajectory[points-1, 0] + xStep > N-1) or
(Trajectory[points-1, 1] + yStep < 0) or
(Trajectory[points-1, 1] + yStep > M-1)):
break
# add new point to trajectory list
if points < K: # buffer is not overrun
Trajectory[points, 0] = Trajectory[points-1, 0] + xStep
Trajectory[points, 1] = Trajectory[points-1, 1] + yStep
else: # buffer overrun
# check for cycle
cycle = detect_cycle(Trajectory, points)
if cycle == points: # no cycle, simple overflow. grow buffer.
# copy and reallocate
temp = Trajectory
Trajectory = np.zeros((K*alloc, 2))
Trajectory[K*(alloc-1):K*alloc, ] = temp
alloc += 1
# add new point
Trajectory[points, 0] = Trajectory[points-1, 0] + xStep
Trajectory[points, 1] = Trajectory[points-1, 1] + yStep
else: # overflow due to cycle, terminate tracking
points = cycle
# check mapping
if Mapped[Trajectory[points, 1], Trajectory[points, 0]] == 1:
novel = 0
phi = np.pi
elif Mask[Trajectory[points, 1], Trajectory[points, 0]] == 0:
phi = np.pi
else:
phi = np.arccos(dy[Trajectory[points-1, 1],
Trajectory[points-1, 0]] *
dy[Trajectory[points, 1],
Trajectory[points, 0]] +
dx[Trajectory[points-1, 1],
Trajectory[points-1, 0]] *
dx[Trajectory[points, 1],
Trajectory[points, 0]])
# increment trajectory length counter
points += 1
# determine if sink is novel
if novel == 1:
# record sinks
Sinks.append(Trajectory[points-1, ])
# add trajectory to label image with new sink value, add mapping
for j in range(points):
Segmentation[Trajectory[j, 1], Trajectory[j, 0]] = len(Sinks)
Mapped[Trajectory[j, 1], Trajectory[j, 0]] = 1
else:
# add trajectory to label image with sink value of final point
for j in range(points):
Segmentation[Trajectory[j, 1], Trajectory[j, 0]] = \
Segmentation[Trajectory[points-1, 1],
Trajectory[points-1, 0]]
# convert Sinks to numpy array
Sinks = np.asarray(Sinks)
return Segmentation, Sinks
def merge_sinks(Label, Sinks, Radius=5):
"""
Merges attraction basins obtained from gradient flow tracking using
sink locations.
Parameters
----------
Segmentation : array_like
Label image where positive values correspond to foreground pixels that
share mutual sinks.
Sinks : array_like
N x 2 array containing the (x,y) locations of the tracking sinks. Each
row is an (x,y) pair - in that order.
Radius : float
Radius used to merge sinks. Sinks closer than this radius to one
another will have their regions of attraction merged.
Default value = 5.
Returns
-------
Merged : array_like
Label image where attraction regions are merged.
"""
import skimage.morphology as mp
from skimage import measure as ms
# build seed image
SeedImage = np.zeros(Label.shape)
for i in range(Sinks.shape[0]):
SeedImage[Sinks[i, 1], Sinks[i, 0]] = i+1
# dilate sink image
Dilated = mp.binary_dilation(SeedImage, mp.disk(Radius))
# generate new labels for merged seeds, define memberships
Labels = ms.label(Dilated)
New = Labels[Sinks[:, 1].astype(np.int), Sinks[:, 0].astype(np.int)]
# get unique list of seed clusters
Unique = np.arange(1, New.max()+1)
# generate new seed list
Merged = np.zeros(Label.shape)
# get pixel list for each sink object
Props = ms.regionprops(Label.astype(np.int))
# fill in new values
for i in Unique:
Indices = np.nonzero(New == i)[0]
for j in Indices:
Coords = Props[j].coords
Merged[Coords[:, 0], Coords[:, 1]] = i
return Merged
def detect_cycle(Trajectory, points):
# initialize trajectory length
length = 0
# identify trajectory bounding box
xMin = np.min(Trajectory[0:points, 0])
xMax = np.max(Trajectory[0:points, 0])
xRange = xMax - xMin + 1
yMin = np.min(Trajectory[0:points, 1])
yMax = np.max(Trajectory[0:points, 1])
yRange = yMax - yMin + 1
# fill in trajectory map
Map = np.zeros((yRange, xRange))
for i in range(points):
if Map[Trajectory[i, 1]-yMin, Trajectory[i, 0]-xMin] == 1:
break
else:
Map[Trajectory[i, 1]-yMin, Trajectory[i, 0]-xMin] = 1
length += 1
return length
def round_float(x):
if x >= 0.0:
t = np.ceil(x)
if t - x > 0.5:
t -= 1.0
return t
else:
t = np.ceil(-x)
if t + x > 0.5:
t -= 1.0
return -t
|
DigitalSlideArchive/HistomicsTK
|
histomicstk/segmentation/nuclear/gvf_tracking.py
|
Python
|
apache-2.0
| 8,978
|
[
"Gaussian"
] |
5cf86b00ce94962cde61b0890d64bf18bedd3ea3fe2ed98a38b00fa031e28473
|
import os
import unittest
import numpy as np
from deepchem.utils import rdkit_utils
from deepchem.utils.fragment_utils import get_contact_atom_indices
from deepchem.utils.fragment_utils import merge_molecular_fragments
from deepchem.utils.fragment_utils import get_partial_charge
from deepchem.utils.fragment_utils import strip_hydrogens
from deepchem.utils.fragment_utils import MolecularFragment
from deepchem.utils.fragment_utils import AtomShim
class TestFragmentUtil(unittest.TestCase):
def setUp(self):
# TODO test more formats for ligand
current_dir = os.path.dirname(os.path.realpath(__file__))
self.protein_file = os.path.join(
current_dir, '../../feat/tests/data/3ws9_protein_fixer_rdkit.pdb')
self.ligand_file = os.path.join(current_dir,
'../../feat/tests/data/3ws9_ligand.sdf')
def test_get_contact_atom_indices(self):
complexes = rdkit_utils.load_complex([self.protein_file, self.ligand_file])
contact_indices = get_contact_atom_indices(complexes)
assert len(contact_indices) == 2
def test_create_molecular_fragment(self):
mol_xyz, mol_rdk = rdkit_utils.load_molecule(self.ligand_file)
fragment = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)
assert len(mol_rdk.GetAtoms()) == len(fragment.GetAtoms())
assert (fragment.GetCoords() == mol_xyz).all()
def test_strip_hydrogens(self):
mol_xyz, mol_rdk = rdkit_utils.load_molecule(self.ligand_file)
fragment = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)
# Test on RDKit
frag = strip_hydrogens(mol_xyz, mol_rdk)
def test_merge_molecular_fragments(self):
mol_xyz, mol_rdk = rdkit_utils.load_molecule(self.ligand_file)
fragment1 = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)
fragment2 = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)
joint = merge_molecular_fragments([fragment1, fragment2])
assert len(mol_rdk.GetAtoms()) * 2 == len(joint.GetAtoms())
def test_get_partial_charge(self):
from rdkit import Chem
mol = Chem.MolFromSmiles("CC")
atom = mol.GetAtoms()[0]
partial_charge = get_partial_charge(atom)
assert partial_charge == 0
def test_atom_shim(self):
atomic_num = 5
partial_charge = 1
atom_coords = np.array([0., 1., 2.])
shim = AtomShim(atomic_num, partial_charge, atom_coords)
assert shim.GetAtomicNum() == atomic_num
assert shim.GetPartialCharge() == partial_charge
assert (shim.GetCoords() == atom_coords).all()
|
miaecle/deepchem
|
deepchem/utils/test/test_fragment_utils.py
|
Python
|
mit
| 2,484
|
[
"RDKit"
] |
a2634aff5674870d46e6a7cccadc1a6c3aa27ef79a8f0d175cef6e24ac006b9e
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ldap3
class CERNLDAPSyncPlugin(object):
"""Synchronization plugin for mapping new users to CERN accounts.
This plugin results in new users having two additional fields in the CS,
CERNAccountType and PrimaryCERNAccount. If the new nickname does not have a
corresponding CERN account it will be rejected.
"""
def __init__(self):
"""Initialise the plugin and underlying LDAP connection."""
self._server = ldap3.Server('ldap://xldap.cern.ch')
self._connection = ldap3.Connection(
self._server,
client_strategy=ldap3.SAFE_SYNC,
auto_bind=True
)
def verifyAndUpdateUserInfo(self, username, userDict):
"""Add the "CERNAccountType" and "PrimaryCERNAccount" values to the CS attributes.
:param username: DIRAC name of the user to be added
:param userDict: user information collected by the VOMS2CSAgent
:returns: None
:raise ValueError: if no corresponding CERN account is found.
"""
attributes = self._getUserInfo(username)
cernAccountType = attributes["cernAccountType"]
userDict["CERNAccountType"] = cernAccountType[0]
if cernAccountType == ["Primary"]:
userDict["PrimaryCERNAccount"] = username
else:
userDict["PrimaryCERNAccount"] = self._findOwnerAccountName(username, attributes)
def _findOwnerAccountName(self, username, attributes):
"""Find the owner account from a CERN LDAP entry.
:param username: DIRAC name of the user to be added
:param attributes: output of ``_getUserInfo`` for ``username``
:returns: The name of the owning CERN account
"""
owners = attributes["cernAccountOwner"]
if len(owners) != 1:
raise ValueError(
"Expected exactly one cernAccountOwner for %s but found %s"
% (username, len(owners))
)
commonNames = [v for k, v, _ in ldap3.utils.dn.parse_dn(owners[0]) if k == "CN"]
if len(commonNames) != 1:
raise ValueError(
"Expected exactly one common name in the cernAccountOwner of %s but found %s"
% (username, len(commonNames))
)
primaryAccountName = commonNames[0]
primaryAttributes = self._getUserInfo(primaryAccountName)
if primaryAttributes["cernAccountType"] != ["Primary"]:
raise ValueError("Something is very wrong!")
return primaryAccountName
def _getUserInfo(self, commonName):
"""Query the CERN LDAP server for the given ``commonName``.
:param commonName: Common Name of an account known to CERN
:returns: ``dict`` of the account attributes from LDAP
:raise ValueError: if no corresponding CERN account is found
"""
status, result, response, _ = self._connection.search(
"OU=Users,OU=Organic Units,DC=cern,DC=ch",
"(CN=%s)" % commonName,
attributes=["cernAccountOwner", "cernAccountType"],
)
if not status:
raise ValueError("Bad status from LDAP search: %s" % result)
if len(response) != 1:
raise ValueError(
"Expected exactly one match for CN=%s but found %s"
% (commonName, len(response))
)
# https://github.com/PyCQA/pylint/issues/4148
return response[0]["attributes"] # pylint: disable=unsubscriptable-object
|
yujikato/DIRAC
|
src/DIRAC/ConfigurationSystem/Client/SyncPlugins/CERNLDAPSyncPlugin.py
|
Python
|
gpl-3.0
| 3,313
|
[
"DIRAC"
] |
44974bf891ef51aba130ef66b2fb1313959212888cf9ce00f94d74b8f32a354b
|
# -*- coding: utf-8 -*-
"""
Miscellaneous functions for simulations
gaussian : make a gaussian distribution along an x axis with defined mean and std
pha2r : Create a time series that is a nonlinear mapping of phase
simphase : simulate an oscillation and its phase by bandpass filtering white noise
spikes2lfp : Convolve a spike train with a synaptic potential to simulate a local field potential (LFP)
simfiltonef : Simulate a signal with 1/f^2 noise
"""
from __future__ import division
import numpy as np
import scipy as sp
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def pha2r(pha, method, mod_frac, firing_rate, sqprc=10.0, normstd = 1):
'''
Create a time-varying firing rate from a phase time series
Parameters
----------
pha : array
time series of phases
method : string ('sin', 'sq', or 'gauss')
option for phase-to-firing rate transform
mod_frac : float
Fraction of the total firing rate to be phase-modulated
firing_rate : float
Average firing rate
sqprc : float (between 0 and 100)
Percentage of the cycle to bias the firing if using the 'sq' method
normstd : float
Standard deviation (in radians) of the gaussian that maps phase to
firing rate
Returns
-------
r : array
time series of instantaneous firing rates
'''
# Generate time series of desired shape with mean 1 (1Hz)
if method == 'sin':
r_dep = np.sin(pha+np.pi/2) + 1
elif method == 'sq':
sqpha_thresh = np.percentile(pha,sqprc)
t_bias = pha < sqpha_thresh
r_dep = t_bias * 100.0 / sqprc
elif method == 'gauss':
import matplotlib.mlab as mlab
r_dep = mlab.normpdf(pha,0,normstd)
r_dep = r_dep / np.mean(r_dep)
# Normalize for firing rate
r_dep = r_dep * firing_rate * mod_frac
r_indep = firing_rate*(1-mod_frac) * np.ones(len(pha))
return r_dep + r_indep
def simphase(T, flo, w=3, dt=.001, randseed = 0, returnwave=False):
""" Simulate the phase of an oscillation
The first and last second of the oscillation are simulated and taken out
in order to avoid edge artifacts in the simulated phase
Parameters
----------
T : float
length of time of simulated oscillation
flo : 2-element array (lo,hi)
frequency range of simulated oscillation
dt : float
time step of simulated oscillation
returnwave : boolean
option to return the simulated oscillation
"""
from tools.spec import bandpass_default
np.random.seed(randseed)
whitenoise = np.random.randn(int((T+2)/dt))
theta, _ = bandpass_default(whitenoise, flo, 1/dt, rmv_edge = False, w = w)
print theta
if returnwave:
return np.angle(sp.signal.hilbert(theta[int(1/dt):int((T+1)/dt)])), theta[int(1/dt):int((T+1)/dt)]
else:
return np.angle(sp.signal.hilbert(theta[int(1/dt):int((T+1)/dt)]))
def spikes2lfp(spikes,
gmax = 1, Tpsp = 100, tau_rise = 0.3, tau_decay = 2):
"""Simulate an LFP by convolving spikes with a synaptic potential"""
# Create synaptic potential kernel
t_dexp = np.arange(Tpsp)
psp = gmax * (np.exp(-t_dexp/tau_decay) - np.exp(-t_dexp/tau_rise))
return np.convolve(spikes, psp, mode='same')
def simbrown(N):
"""Simulate a brown noise signal (power law distribution 1/f^2)
with N samples"""
wn = np.random.randn(N)
return np.cumsum(wn)
def simfiltonef(T, f_range, Fs, N, samp_buffer = 10000):
""" Simulate a band-pass filtered signal with 1/f^2
Input suggestions: f_range=(2,None), Fs=1000, N=1000
Parameters
----------
T : float
length of time of simulated oscillation
Fs : float
oscillation sampling rate
f_range : 2-element array (lo,hi)
frequency range of simulated data
if None: do not filter
N : int
order of filter
"""
if f_range is None:
# Do not filter
# Generate 1/f^2 noise
brownN = simbrown(int(T*Fs))
return brownN
elif f_range[1] is None:
# High pass filter
# Generate 1/f^2 noise
brownN = simbrown(int(T*Fs+N*2))
# Filter
nyq = Fs / 2.
if N % 2 == 0:
print 'NOTE: Increased high-pass filter order by 1 in order to be odd'
N += 1
taps = sp.signal.firwin(N, f_range[0] / nyq, pass_zero=False)
brownNf = sp.signal.filtfilt(taps, [1], brownN)
return brownNf[N:-N]
else:
# Bandpass filter
# Generate 1/f^2 noise
brownN = simbrown(int(T*Fs+N*2))
# Filter
nyq = Fs / 2.
taps = sp.signal.firwin(N, np.array(f_range) / nyq, pass_zero=False)
brownNf = sp.signal.filtfilt(taps, [1], brownN)
return brownNf[N:-N]
|
srcole/tools
|
sim.py
|
Python
|
mit
| 4,942
|
[
"Gaussian"
] |
7429f7e179fa8c3e8cb3d180f85ce3d332954a4e199597d14e69fcb679378e08
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits
from sklearn.cross_validation import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
"""
Gaussian Naive Bayes classification.
This checks that GaussianNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
def test_gnb_prior():
"""Test whether class priors are properly set. """
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_discrete_prior():
"""Test whether class priors are properly set. """
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
"""Test Multinomial Naive Bayes classification.
This checks that MultinomialNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_discretenb_pickle():
"""Test picklability of discrete naive Bayes classifiers"""
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
"""Test input checks for the fit method"""
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
"""Test discrete NB classes' probability scores"""
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
"""Test whether discrete NB classes fit a uniform prior
when fit_prior=False and class_prior=None"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
"""Test whether discrete NB classes use provided prior"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check wample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
"""coef_ and intercept_ should have shapes as in other linear models.
Non-regression test for issue #2127.
"""
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
|
chaluemwut/fbserver
|
venv/lib/python2.7/site-packages/sklearn/tests/test_naive_bayes.py
|
Python
|
apache-2.0
| 11,977
|
[
"Gaussian"
] |
820defb9f3ce014e4c4bfb70ef37ae5a1ccba32d89a0202451d02d11fee80db6
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright (C) 2012 Canonical Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gettext
import misc
# Useful vars for gettext (translations)
APP = "cnchi"
DIR = "po"
# Esto permite traducir los textos escritos en el .py (no en glade)
gettext.textdomain(APP)
gettext.bindtextdomain(APP, DIR)
# Y con esto podemos marcar las cadenas a traducir de la forma _("cadena")
_ = gettext.gettext
_language_list = "data/languagelist.data.gz"
def utf8(s, errors="strict"):
"""Decode a string as UTF-8 if it isn't already Unicode."""
if isinstance(s, str):
return s
else:
return str(s, "utf-8", errors)
# Returns a tuple of (current language, sorted choices, display map).
def get_languages(current_language_index=-1, only_installable=False):
import gzip
#import icu
current_language = "English"
if only_installable:
from apt.cache import Cache
#workaround for an issue where euid != uid and the
#apt cache has not yet been loaded causing a SystemError
#when libapt-pkg tries to load the Cache the first time.
with misc.raised_privileges():
cache = Cache()
languagelist = gzip.open(_language_list)
language_display_map = {}
i = 0
for line in languagelist:
line = utf8(line)
if line == '' or line == '\n':
continue
code, name, trans = line.strip('\n').split(':')[1:]
if code in ('C', 'dz', 'km'):
i += 1
continue
# KDE fails to round-trip strings containing U+FEFF ZERO WIDTH
# NO-BREAK SPACE, and we don't care about the NBSP anyway, so strip
# it.
# https://bugs.launchpad.net/bugs/1001542
# (comment #5 and on)
trans = trans.strip(" \ufeff")
if only_installable:
pkg_name = 'language-pack-%s' % code
#special case these
if pkg_name.endswith('_CN'):
pkg_name = 'language-pack-zh-hans'
elif pkg_name.endswith('_TW'):
pkg_name = 'language-pack-zh-hant'
elif pkg_name.endswith('_NO'):
pkg_name = pkg_name.split('_NO')[0]
elif pkg_name.endswith('_BR'):
pkg_name = pkg_name.split('_BR')[0]
try:
pkg = cache[pkg_name]
if not (pkg.installed or pkg.candidate):
i += 1
continue
except KeyError:
i += 1
continue
language_display_map[trans] = (name, code)
if i == current_language_index:
current_language = trans
i += 1
languagelist.close()
if only_installable:
del cache
#try:
# Note that we always collate with the 'C' locale. This is far
# from ideal. But proper collation always requires a specific
# language for its collation rules (languages frequently have
# custom sorting). This at least gives us common sorting rules,
# like stripping accents.
#collator = icu.Collator.createInstance(icu.Locale('C'))
#except:
# collator = None
collator = None
def compare_choice(x):
if language_display_map[x][1] == 'C':
return None # place C first
if collator:
try:
return collator.getCollationKey(x).getByteArray()
except:
pass
# Else sort by unicode code point, which isn't ideal either,
# but also has the virtue of sorting like-glyphs together
return x
sorted_choices = sorted(language_display_map, key=compare_choice)
return current_language, sorted_choices, language_display_map
|
axaxs/Cnchi
|
src/i18n.py
|
Python
|
gpl-3.0
| 4,436
|
[
"FEFF"
] |
a53c1e58ced675fdc5e2bc23570f2d57f83afd4411b1e5ee6a837cc3a6d307db
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import functools
import itertools
import math
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.compat import compat
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.cached_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.cached_session(use_gpu=True):
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = self.evaluate(rgb)
self.assertAllClose(rgb_tf, rgb_np)
class RGBToYIQTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YIQ and back, as a batch and individually
with self.cached_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yiq(batch0)
batch2 = image_ops.yiq_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yiq, split0))
split2 = list(map(image_ops.yiq_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class RGBToYUVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YUV and back, as a batch and individually
with self.cached_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yuv(batch0)
batch2 = image_ops.yuv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yuv, split0))
split2 = list(map(image_ops.yuv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in xrange(images.shape[0]):
for y in xrange(images.shape[1]):
for x in xrange(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array(
[[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array(
[[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testGrayscaleToRGBInputValidation(self):
# tests whether the grayscale_to_rgb function raises
# an exception if the input images' last dimension is
# not of size 1, i.e. the images have shape
# [batch size, height, width] or [height, width]
# tests if an exception is raised if a three dimensional
# input is used, i.e. the images have shape [batch size, height, width]
with self.cached_session(use_gpu=True):
# 3-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "Last dimension of a grayscale image should be size 1"
with self.assertRaisesRegexp(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
# tests if an exception is raised if a two dimensional
# input is used, i.e. the images have shape [height, width]
with self.cached_session(use_gpu=True):
# 1-D input without batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "A grayscale image must be at least two-dimensional"
with self.assertRaisesRegexp(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
@test_util.run_deprecated_v1
def testShapeInference(self):
# Shape inference works and produces expected output where possible
rgb_shape = [7, None, 19, 3]
gray_shape = rgb_shape[:-1] + [1]
with self.cached_session(use_gpu=True):
rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
gray = image_ops.rgb_to_grayscale(rgb_tf)
self.assertEqual(gray_shape, gray.get_shape().as_list())
with self.cached_session(use_gpu=True):
gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)
rgb = image_ops.grayscale_to_rgb(gray_tf)
self.assertEqual(rgb_shape, rgb.get_shape().as_list())
# Shape inference does not break for unknown shapes
with self.cached_session(use_gpu=True):
rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)
gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)
self.assertFalse(gray_unknown.get_shape())
with self.cached_session(use_gpu=True):
gray_tf_unknown = array_ops.placeholder(dtypes.uint8)
rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)
self.assertFalse(rgb_unknown.get_shape())
class AdjustGamma(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def test_adjust_gamma_less_zero_float32(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegexp(ValueError, err_msg):
image_ops.adjust_gamma(x, gamma=-1)
@test_util.run_deprecated_v1
def test_adjust_gamma_less_zero_uint8(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegexp(ValueError, err_msg):
image_ops.adjust_gamma(x, gamma=-1)
@test_util.run_deprecated_v1
def test_adjust_gamma_less_zero_tensor(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = constant_op.constant(-1.0, dtype=dtypes.float32)
image = image_ops.adjust_gamma(x, gamma=y)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegexp(errors.InvalidArgumentError, err_msg):
self.evaluate(image)
def _test_adjust_gamma_uint8(self, gamma):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 255, (8, 8)).astype(np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = np.trunc(y.eval())
# calculate gamma correction using numpy
# firstly, transform uint8 to float representation
# then perform correction
y_np = np.power(x_np / 255.0, gamma)
# convert correct numpy image back to uint8 type
y_np = np.trunc(np.clip(y_np * 255.5, 0, 255.0))
self.assertAllClose(y_tf, y_np, 1e-6)
def _test_adjust_gamma_float32(self, gamma):
"""Verifying the output with expected results for gamma
correction for float32 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 1.0, (8, 8))
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = y.eval()
y_np = np.clip(np.power(x_np, gamma), 0, 1.0)
self.assertAllClose(y_tf, y_np, 1e-6)
@test_util.run_deprecated_v1
def test_adjust_gamma_one_float32(self):
"""Same image should be returned for gamma equal to one"""
self._test_adjust_gamma_float32(1.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_one_uint8(self):
self._test_adjust_gamma_uint8(1.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_zero_uint8(self):
"""White image should be returned for gamma equal
to zero for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_less_one_uint8(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.5)
@test_util.run_deprecated_v1
def test_adjust_gamma_greater_one_uint8(self):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=1.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_less_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for float32 images
"""
self._test_adjust_gamma_float32(0.5)
@test_util.run_deprecated_v1
def test_adjust_gamma_greater_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to two for float32 images
"""
self._test_adjust_gamma_float32(1.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_zero_float32(self):
"""White image should be returned for gamma equal
to zero for float32 images
"""
self._test_adjust_gamma_float32(0.0)
class AdjustHueTest(test_util.TensorFlowTestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_hue(x, delta_h)
y_tf = self.evaluate(y)
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
class FlipImageBenchmark(test.Benchmark):
def _benchmarkFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkRandomFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkRandomFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkBatchedRandomFlipLeftRight(self, device, cpu_count):
image_shape = [16, 299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s step_time: "
"%.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkFlipLeftRightCpu1(self):
self._benchmarkFlipLeftRight("/cpu:0", 1)
def benchmarkFlipLeftRightCpuAll(self):
self._benchmarkFlipLeftRight("/cpu:0", None)
def benchmarkFlipLeftRightGpu(self):
self._benchmarkFlipLeftRight(test.gpu_device_name(), None)
def benchmarkRandomFlipLeftRightCpu1(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", 1)
def benchmarkRandomFlipLeftRightCpuAll(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", None)
def benchmarkRandomFlipLeftRightGpu(self):
self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None)
def benchmarkBatchedRandomFlipLeftRightCpu1(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", 1)
def benchmarkBatchedRandomFlipLeftRightCpuAll(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", None)
def benchmarkBatchedRandomFlipLeftRightGpu(self):
self._benchmarkBatchedRandomFlipLeftRight(test.gpu_device_name(), None)
class AdjustHueBenchmark(test.Benchmark):
def _benchmarkAdjustHue(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_hue(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustHue_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustHue_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustHueCpu1(self):
self._benchmarkAdjustHue("/cpu:0", 1)
def benchmarkAdjustHueCpuAll(self):
self._benchmarkAdjustHue("/cpu:0", None)
def benchmarkAdjustHueGpu(self):
self._benchmarkAdjustHue(test.gpu_device_name(), None)
class AdjustSaturationBenchmark(test.Benchmark):
def _benchmarkAdjustSaturation(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_saturation(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for _ in xrange(warmup_rounds):
self.evaluate(run_op)
start = time.time()
for _ in xrange(benchmark_rounds):
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustSaturation_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustSaturationCpu1(self):
self._benchmarkAdjustSaturation("/cpu:0", 1)
def benchmarkAdjustSaturationCpuAll(self):
self._benchmarkAdjustSaturation("/cpu:0", None)
def benchmarkAdjustSaturationGpu(self):
self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
class ResizeBilinearBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bilinear(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_bilinear_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class ResizeBicubicBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bicubic(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
min_iters=20,
name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
def benchmarkSimilar4Channel(self):
self._benchmarkResize((183, 229), 4)
def benchmarkScaleUp4Channel(self):
self._benchmarkResize((141, 186), 4)
def benchmarkScaleDown4Channel(self):
self._benchmarkResize((749, 603), 4)
class ResizeAreaBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_area_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class AdjustSaturationTest(test_util.TensorFlowTestCase):
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchSaturation(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
@test_util.run_deprecated_v1
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.cached_session(use_gpu=True):
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
y_fused = image_ops.adjust_saturation(x_np, scale).eval()
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class FlipTransposeRotateTest(test_util.TensorFlowTestCase):
def testInvolutionLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
@test_util.run_deprecated_v1
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
self.assertTrue(y.op.name.startswith("flip_left_right"))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
@test_util.run_deprecated_v1
def testRandomFlipLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
@test_util.run_deprecated_v1
def testRandomFlipLeftRightWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [3, 2, 1]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
# 100 trials, each containing batch_size elements
# Mean: 50 * batch_size
# Std Dev: ~5 * sqrt(batch_size)
# Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size))
# = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680
six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size)
self.assertGreaterEqual(count_flipped, six_sigma)
self.assertGreaterEqual(count_unflipped, six_sigma)
def testInvolutionUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
@test_util.run_deprecated_v1
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
self.assertTrue(y.op.name.startswith("flip_up_down"))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
@test_util.run_deprecated_v1
def testRandomFlipUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
@test_util.run_deprecated_v1
def testRandomFlipUpDownWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
# 100 trials, each containing batch_size elements
# Mean: 50 * batch_size
# Std Dev: ~5 * sqrt(batch_size)
# Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size))
# = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680
six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size)
self.assertGreaterEqual(count_flipped, six_sigma)
self.assertGreaterEqual(count_unflipped, six_sigma)
def testInvolutionTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
@test_util.run_deprecated_v1
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
self.assertTrue(y.op.name.startswith("transpose"))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]],
dtype=np.uint8).reshape([2, 3, 2, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
@test_util.run_deprecated_v1
def testPartialShapes(self):
p_unknown_rank = array_ops.placeholder(dtypes.uint8)
p_unknown_dims_3 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None])
p_unknown_dims_4 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None, None])
p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
p_unknown_batch = array_ops.placeholder(
dtypes.uint8, shape=[None, 64, 64, 3])
p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
#Ops that support 3D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_rank = op(p_unknown_rank)
self.assertEqual(3, transformed_unknown_rank.get_shape().ndims)
transformed_unknown_dims_3 = op(p_unknown_dims_3)
self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims)
transformed_unknown_width = op(p_unknown_width)
self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
with self.assertRaisesRegexp(ValueError, "must be > 0"):
op(p_zero_dim)
#Ops that support 4D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_dims_4 = op(p_unknown_dims_4)
self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)
transformed_unknown_batch = op(p_unknown_batch)
self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)
with self.assertRaisesRegexp(ValueError,
"must be at least three-dimensional"):
op(p_wrong_rank)
def testRot90GroupOrder(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90GroupOrderWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
@test_util.run_deprecated_v1
def testRot90NumpyEquivalence(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session(use_gpu=True):
k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
y_tf = image_ops.rot90(image, k_placeholder)
for k in xrange(4):
y_np = np.rot90(image, k=k)
self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
@test_util.run_deprecated_v1
def testRot90NumpyEquivalenceWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session(use_gpu=True):
k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
y_tf = image_ops.rot90(image, k_placeholder)
for k in xrange(4):
y_np = np.rot90(image, k=k, axes=(1, 2))
self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, 1e-6)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=0.5)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testContrastFactorShape(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
with self.assertRaisesRegexp(
ValueError, 'Shape must be rank 0 but is rank 1'):
image_ops.adjust_contrast(x_np, [2.0])
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta, tol=1e-6):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, tol)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat32(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat16(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float16).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float16).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255., tol=1e-3)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10. / 255.)
class PerImageWhiteningTest(test_util.TensorFlowTestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
mn = np.mean(x)
std = np.std(x)
stddev = max(std, 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
@test_util.run_deprecated_v1
def testBasic(self):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=np.float32).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.per_image_standardization(x)
self.assertTrue(y.op.name.startswith("per_image_standardization"))
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, atol=1e-4)
def testUniformImage(self):
im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
im = constant_op.constant(im_np)
whiten = image_ops.per_image_standardization(im)
with self.cached_session(use_gpu=True):
whiten_np = self.evaluate(whiten)
self.assertFalse(np.any(np.isnan(whiten_np)))
def testBatchWhitening(self):
imgs_np = np.random.uniform(0., 255., [4, 24, 24, 3])
whiten_np = [self._NumpyPerImageWhitening(img) for img in imgs_np]
with self.cached_session(use_gpu=True):
imgs = constant_op.constant(imgs_np)
whiten = image_ops.per_image_standardization(imgs)
whiten_tf = self.evaluate(whiten)
for w_tf, w_np in zip(whiten_tf, whiten_np):
self.assertAllClose(w_tf, w_np, atol=1e-4)
def testPreservesDtype(self):
imgs_npu8 = np.random.uniform(0., 255., [2, 5, 5, 3]).astype(np.uint8)
imgs_tfu8 = constant_op.constant(imgs_npu8)
whiten_tfu8 = image_ops.per_image_standardization(imgs_tfu8)
self.assertEqual(whiten_tfu8.dtype, dtypes.uint8)
imgs_npf16 = np.random.uniform(0., 255., [2, 5, 5, 3]).astype(np.float16)
imgs_tff16 = constant_op.constant(imgs_npf16)
whiten_tff16 = image_ops.per_image_standardization(imgs_tff16)
self.assertEqual(whiten_tff16.dtype, dtypes.float16)
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._CropToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, 0, 0, x, x_shape)
@test_util.run_deprecated_v1
def testCrop(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y_shape = [2, 3, 1]
y = [4, 5, 6, 7, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y_shape = [3, 2, 1]
y = [2, 3, 5, 6, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [2, 3, 1]
y = [1, 2, 3, 4, 5, 6]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [3, 2, 1]
y = [1, 2, 4, 5, 7, 8]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
@test_util.run_deprecated_v1
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must have either 3 or 4 dimensions.")
@test_util.run_deprecated_v1
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# Multiple assertion could fail, but the evaluation order is arbitrary.
# Match gainst generic pattern.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"assertion failed:",
use_tensor_inputs_options=[True])
@test_util.run_deprecated_v1
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# (offset_height, offset_width, target_height, target_width), err_msg
test_config = (([-1, 0, 3, 3], "offset_height must be >= 0"), ([
0, -1, 3, 3
], "offset_width must be >= 0"), ([0, 0, 0, 3],
"target_height must be > 0"),
([0, 0, 3, 0], "target_width must be > 0"),
([2, 0, 3, 3], "height must be >= target + offset"),
([0, 2, 3, 3], "width must be >= target + offset"))
for params, err_msg in test_config:
self._assertRaises(x, x_shape, *params, err_msg=err_msg)
@test_util.run_deprecated_v1
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.name.startswith("crop_to_bounding_box"))
class CentralCropTest(test_util.TensorFlowTestCase):
def _assertShapeInference(self, pre_shape, fraction, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.central_crop(image, fraction)
if post_shape is None:
self.assertEqual(y.get_shape().dims, None)
else:
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shapes = [[13, 9, 3], [5, 13, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 1.0)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
self.assertEqual(y.op.name, x.op.name)
def testCropping(self):
x_shape = [4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
x_shape = [2, 4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]],
[[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1])
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
@test_util.run_deprecated_v1
def testCropping2(self):
# Test case for 10315
x_shapes = [[240, 320, 3], [5, 240, 320, 3]]
expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]]
for x_shape, y_shape in zip(x_shapes, expected_y_shapes):
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = array_ops.placeholder(shape=x_shape, dtype=dtypes.int32)
y = image_ops.central_crop(x, 0.33)
y_tf = y.eval(feed_dict={x: x_np})
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
@test_util.run_deprecated_v1
def testShapeInference(self):
# Test no-op fraction=1.0, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
self._assertShapeInference([None, None, None], 1.0, [None, None, None])
# Test no-op fraction=0.5, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3])
self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
self._assertShapeInference([50, None, 3], 0.5, [26, None, 3])
self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
self._assertShapeInference([50, 60, None], 0.5, [26, 30, None])
self._assertShapeInference([None, None, None], 0.5, [None, None, None])
# Test no-op fraction=1.0, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3])
self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3])
self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3])
self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None])
self._assertShapeInference([5, None, None, None], 1.0,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 1.0,
[None, None, None, None])
# Test no-op fraction=0.5, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3])
self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3])
self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3])
self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None])
self._assertShapeInference([5, None, None, None], 0.5,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 0.5,
[None, None, None, None])
def testErrorOnInvalidCentralCropFractionValues(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.0)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 1.01)
def testErrorOnInvalidShapes(self):
x_shapes = [None, [], [3], [3, 9], [3, 9, 3, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.5)
@test_util.run_deprecated_v1
def testNameScope(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y = image_ops.central_crop(x_np, 1.0)
self.assertTrue(y.op.name.startswith("central_crop"))
class PadToBoundingBoxTest(test_util.TensorFlowTestCase):
def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.pad_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._PadToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testInt64(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)
y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3])
with self.cached_session(use_gpu=True):
self.assertAllClose(y, self.evaluate(y_tf))
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
@test_util.run_deprecated_v1
def testPadding(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
@test_util.run_deprecated_v1
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must have either 3 or 4 dimensions.")
@test_util.run_deprecated_v1
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
@test_util.run_deprecated_v1
def testBadParams(self):
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = ((-1, 0, 4, 4, "offset_height must be >= 0"),
(0, -1, 4, 4, "offset_width must be >= 0"),
(2, 0, 4, 4, "height must be <= target - offset"),
(0, 2, 4, 4, "width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(x, x_shape, *config_item)
@test_util.run_deprecated_v1
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.op.name.startswith("pad_to_bounding_box"))
class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
def _testSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered, aspect_ratio_range,
area_range):
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
num_iter = 1000
with self.cached_session(use_gpu=True):
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(
bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# min_object_covered as tensor
min_object_covered_placeholder = array_ops.placeholder(dtypes.float32)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered_placeholder,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = y.eval(feed_dict={
min_object_covered_placeholder: min_object_covered
})
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# Ensure that each entry is observed within 3 standard deviations.
# num_bins = 10
# aspect_ratio_hist, _ = np.histogram(aspect_ratios,
# bins=num_bins,
# range=aspect_ratio_range)
# mean = np.mean(aspect_ratio_hist)
# stddev = np.sqrt(mean)
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# TODO(irving): Since the rejection probability is not independent of the
# aspect ratio, the aspect_ratio random value is not exactly uniformly
# distributed in [min_aspect_ratio, max_aspect_ratio). This test should be
# fixed to reflect the true statistical property, then tightened to enforce
# a stricter bound. Or, ideally, the sample_distorted_bounding_box Op
# be fixed to not use rejection sampling and generate correctly uniform
# aspect ratios.
# self.assertAllClose(aspect_ratio_hist,
# [mean] * num_bins, atol=3.6 * stddev)
# The resulting crop will not be uniformly distributed in area. In practice,
# we find that the area skews towards the small sizes. Instead, we perform
# a weaker test to ensure that the area ratios are merely within the
# specified bounds.
self.assertLessEqual(max(area_ratios), area_range[1])
self.assertGreaterEqual(min(area_ratios), area_range[0])
# For reference, here is what the distribution of area ratios look like.
area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)
print("area_ratio_hist ", area_ratio_hist)
# Ensure that fraction_object_covered is satisfied.
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)
@test_util.run_deprecated_v1
def testWholeImageBoundingBox(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
self._testSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
@test_util.run_deprecated_v1
def testWithBoundingBox(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
# Create an object with 1's in a region with area A and require that
# the total pixel values >= 0.1 * A.
min_object_covered = 0.1
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
self._testSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_object_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
@test_util.run_deprecated_v1
def testSampleDistortedBoundingBoxShape(self):
with self.cached_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=array_ops.placeholder(dtypes.float32),
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
def testDefaultMinObjectCovered(self):
# By default min_object_covered=0.1 if not provided
with self.cached_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
class ResizeImagesV2Test(test_util.TensorFlowTestCase):
METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5,
image_ops.ResizeMethod.GAUSSIAN, image_ops.ResizeMethod.MITCHELLCUBIC
]
# Some resize methods, such as Gaussian, are non-interpolating in that they
# change the image even if there is no scale change, for some test, we only
# check the value on the value preserving methods.
INTERPOLATING_METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images_v2(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethod.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
@test_util.run_deprecated_v1
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
# half_pixel_centers unsupported in ResizeBilinear
@test_util.run_deprecated_v1
@test_util.disable_xla("b/127616992")
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
new_size = array_ops.placeholder(dtypes.int32, shape=(2))
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, new_size, method)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images_v2(image, new_size, self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(single_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = image_ops.resize_images_v2(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = image_ops.resize_images_v2(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = image_ops.resize_images_v2(image, new_size,
image_ops.ResizeMethod.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = image_ops.resize_images_v2(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images_v2(image, [6, 4.0],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images_v2(image, [None, 4],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images_v2(image, [6, None],
image_ops.ResizeMethod.BILINEAR)
@test_util.run_deprecated_v1
def testReturnDtype(self):
target_shapes = [[6, 4], [3, 2],
[
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images_v2(image, target_shape, method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
# half_pixel_centers not supported by XLA
@test_util.disable_xla("b/127616992")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(
image, [target_height, target_width], method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUp(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
64.0, 56.0, 40.0, 32.0, 56.0, 52.0, 44.0, 40.0, 40.0, 44.0, 52.0, 56.0,
36.5, 45.625, 63.875, 73.0, 45.5, 56.875, 79.625, 91.0, 50.0, 62.5,
87.5, 100.0
]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.LANCZOS3] = [
75.8294, 59.6281, 38.4313, 22.23, 60.6851, 52.0037, 40.6454, 31.964,
35.8344, 41.0779, 47.9383, 53.1818, 24.6968, 43.0769, 67.1244, 85.5045,
35.7939, 56.4713, 83.5243, 104.2017, 44.8138, 65.1949, 91.8603, 112.2413
]
expected_data[image_ops.ResizeMethod.LANCZOS5] = [
77.5699, 60.0223, 40.6694, 23.1219, 61.8253, 51.2369, 39.5593, 28.9709,
35.7438, 40.8875, 46.5604, 51.7041, 21.5942, 43.5299, 67.7223, 89.658,
32.1213, 56.784, 83.984, 108.6467, 44.5802, 66.183, 90.0082, 111.6109
]
expected_data[image_ops.ResizeMethod.GAUSSIAN] = [
61.1087, 54.6926, 41.3074, 34.8913, 54.6926, 51.4168, 44.5832, 41.3074,
41.696, 45.2456, 52.6508, 56.2004, 39.4273, 47.0526, 62.9602, 70.5855,
47.3008, 57.3042, 78.173, 88.1764, 51.4771, 62.3638, 85.0752, 95.9619
]
expected_data[image_ops.ResizeMethod.BICUBIC] = [
70.1453, 59.0252, 36.9748, 25.8547, 59.3195, 53.3386, 41.4789, 35.4981,
36.383, 41.285, 51.0051, 55.9071, 30.2232, 42.151, 65.8032, 77.731,
41.6492, 55.823, 83.9288, 98.1026, 47.0363, 62.2744, 92.4903, 107.7284
]
expected_data[image_ops.ResizeMethod.MITCHELLCUBIC] = [
66.0382, 56.6079, 39.3921, 29.9618, 56.7255, 51.9603, 43.2611, 38.4959,
39.1828, 43.4664, 51.2864, 55.57, 34.6287, 45.1812, 64.4458, 74.9983,
43.8523, 56.8078, 80.4594, 93.4149, 48.9943, 63.026, 88.6422, 102.6739
]
for nptype in self.TYPES:
for method in expected_data:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-04)
# XLA doesn't implement half_pixel_centers
@test_util.disable_xla("b/127616992")
def testLegacyBicubicMethodsMatchNewMethods(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
methods_to_test = ((gen_image_ops.resize_bilinear, "triangle"),
(gen_image_ops.resize_bicubic, "keyscubic"))
for legacy_method, new_method in methods_to_test:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=np.float32).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
legacy_result = legacy_method(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
half_pixel_centers=True)
scale = (
constant_op.constant([target_height, target_width],
dtype=dtypes.float32) /
math_ops.cast(array_ops.shape(image)[1:3], dtype=dtypes.float32))
new_result = gen_image_ops.scale_and_translate(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
scale,
array_ops.zeros([2]),
kernel_type=new_method,
antialias=False)
self.assertAllClose(
self.evaluate(legacy_result), self.evaluate(new_result), atol=1e-04)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
@test_util.run_deprecated_v1
def testNameScope(self):
with self.cached_session(use_gpu=True):
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
target_max = [max_h, max_w]
x_tensor = x
feed_dict = {}
y = image_ops.resize_images(
x_tensor, target_max, preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertResizeEqual(self,
x,
x_shape,
y,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self,
x,
x_shape,
target_shape,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
@test_util.run_deprecated_v1
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(
x, x_shape, [250, 250], [10, 250, 250, 10], preserve_aspect_ratio=False)
@test_util.run_deprecated_v1
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImagesTest(test_util.TensorFlowTestCase):
METHODS = [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.BICUBIC, image_ops.ResizeMethodV1.AREA
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
@test_util.run_deprecated_v1
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
@test_util.run_deprecated_v1
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
new_size = array_ops.placeholder(dtypes.int32, shape=(2))
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, new_size, method)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, new_size, self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(single_shape, newshape)
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethodV1.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, 4.0],
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [None, 4],
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, None],
image_ops.ResizeMethodV1.BILINEAR)
@test_util.run_deprecated_v1
def testReturnDtype(self):
target_shapes = [[6, 4], [3, 2], [
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images(image, target_shape, method)
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session() as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUpAlignCornersFalse(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0,
41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=False)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpAlignCornersTrue(self):
img_shape = [1, 3, 2, 1]
data = [6, 3, 3, 6, 6, 9]
target_height = 5
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5,
6.5, 7.5, 6.0, 7.0, 8.0, 9.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0,
9.0, 9.0, 6.0, 6.0, 9.0, 9.0
]
# TODO(b/37749740): Improve alignment of ResizeMethodV1.AREA when
# align_corners=True.
expected_data[image_ops.ResizeMethodV1.AREA] = [
6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0,
3.0, 6.0, 6.0, 6.0, 6.0, 9.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=True)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [
128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [
128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100,
105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69,
75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105
]
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.BICUBIC)
resized = self.evaluate(y)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.BILINEAR,
align_corners=align_corners)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
@test_util.run_deprecated_v1
def testNameScope(self):
img_shape = [1, 3, 2, 1]
with self.cached_session(use_gpu=True):
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
target_max = [max_h, max_w]
x_tensor = x
feed_dict = {}
y = image_ops.resize_images(x_tensor, target_max,
preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertResizeEqual(self, x, x_shape, y, y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self, x, x_shape, target_shape,
y_shape, preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
@test_util.run_deprecated_v1
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [250, 250], [10, 250, 250, 10],
preserve_aspect_ratio=False)
@test_util.run_deprecated_v1
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImageWithPadV1Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_pad_v1(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e: # pylint: disable=broad-except
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 3, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# half_pixel_centers not supported by XLA
@test_util.for_all_test_methods(test_util.disable_xla, "b/127616992")
class ResizeImageWithPadV2Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_pad_v2(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e: # pylint: disable=broad-except
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 3.5, 5.5, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_crop_or_pad(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_crop_or_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPad(self):
# Pad even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0]
y_shape = [2, 6, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0]
y_shape = [2, 7, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0]
y_shape = [4, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
y_shape = [5, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
@test_util.run_deprecated_v1
def testCrop(self):
# Crop even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [2, 3, 6, 7]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x_shape = [2, 6, 1]
y = [2, 3, 4, 8, 9, 10]
y_shape = [2, 3, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [3, 4, 5, 6]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
x_shape = [8, 2, 1]
y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
y_shape = [5, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
@test_util.run_deprecated_v1
def testCropAndPad(self):
# Pad along row but crop along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 2, 3, 6, 7, 0, 0]
y_shape = [4, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop along row but pad along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [0, 3, 4, 0, 0, 5, 6, 0]
y_shape = [2, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
@test_util.run_deprecated_v1
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
target_height, target_width = [4, 4]
for x_shape in ([3, 5],):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must have either 3 or 4 dimensions.")
for x_shape in ([1, 3, 5, 1, 1],):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must have either 3 or 4 dimensions.")
@test_util.run_deprecated_v1
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
target_height, target_width = [1, 1]
x = []
for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
@test_util.run_deprecated_v1
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# target_height <= 0
target_height, target_width = [0, 5]
self._assertRaises(x, x_shape, target_height, target_width,
"target_height must be > 0")
# target_width <= 0
target_height, target_width = [5, 0]
self._assertRaises(x, x_shape, target_height, target_width,
"target_width must be > 0")
@test_util.run_deprecated_v1
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_image_with_crop_or_pad(image, 55, 66)
self.assertTrue(y.op.name.startswith("resize_image_with_crop_or_pad"))
def _SimpleColorRamp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / np.prod(image0.shape)
def testExisting(self):
# Read a real jpeg and verify shape
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session(use_gpu=True) as sess:
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 1.4)
def testCmyk(self):
# Confirm that CMYK reads in as RGB
base = "tensorflow/core/lib/jpeg/testdata"
rgb_path = os.path.join(base, "jpeg_merge_test1.jpg")
cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
shape = 256, 128, 3
for channels in 3, 0:
with self.cached_session(use_gpu=True) as sess:
rgb = image_ops.decode_jpeg(
io_ops.read_file(rgb_path), channels=channels)
cmyk = image_ops.decode_jpeg(
io_ops.read_file(cmyk_path), channels=channels)
rgb, cmyk = self.evaluate([rgb, cmyk])
self.assertEqual(rgb.shape, shape)
self.assertEqual(cmyk.shape, shape)
error = self.averageError(rgb, cmyk)
self.assertLess(error, 4)
def testCropAndDecodeJpeg(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5],
[h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]]
for crop_window in crop_windows:
# Explicit two stages: decode + crop.
image1 = image_ops.decode_jpeg(jpeg0)
y, x, h, w = crop_window
image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w)
# Combined decode+crop.
image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
# Combined decode+crop should have the same shape inference
self.assertAllEqual(image1_crop.get_shape().as_list(),
image2.get_shape().as_list())
# CropAndDecode should be equal to DecodeJpeg+Crop.
image1_crop, image2 = self.evaluate([image1_crop, image2])
self.assertAllEqual(image1_crop, image2)
@test_util.run_deprecated_v1
def testCropAndDecodeJpegWithInvalidCropWindow(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
# Invalid crop windows.
crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11],
[11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0],
[0, 0, h + 1, w], [0, 0, h, w + 1]]
for crop_window in crop_windows:
result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
lambda e: "Invalid JPEG data or crop window" in str(e)):
self.evaluate(result)
def testSynthetic(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testSyntheticFasterAlgorithm(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input, but
# note this is worse than the slower algorithm because it is
# less accurate.
self.assertLess(self.averageError(image0, image1), 0.95)
# Repeated compression / decompression will have a higher error
# with a lossier algorithm.
self.assertLess(self.averageError(image1, image2), 1.05)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testDefaultDCTMethodIsIntegerFast(self):
with self.cached_session(use_gpu=True) as sess:
# Compare decoding with both dct_option=INTEGER_FAST and
# default. They should be the same.
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(jpeg0)
image1, image2 = self.evaluate([image1, image2])
# The images should be the same.
self.assertAllClose(image1, image2)
@test_util.run_deprecated_v1
def testShape(self):
with self.cached_session(use_gpu=True) as sess:
jpeg = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
@test_util.run_deprecated_v1
def testExtractJpegShape(self):
# Read a real jpeg and verify shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session(use_gpu=True) as sess:
jpeg = io_ops.read_file(path)
# Extract shape without decoding.
[image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])
self.assertEqual(image_shape.tolist(), [256, 128, 3])
@test_util.run_deprecated_v1
def testExtractJpegShapeforCmyk(self):
# Read a cmyk jpeg image, and verify its shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1_cmyk.jpg")
with self.cached_session(use_gpu=True) as sess:
jpeg = io_ops.read_file(path)
[image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])
# Cmyk jpeg image has 4 channels.
self.assertEqual(image_shape.tolist(), [256, 128, 4])
def testRandomJpegQuality(self):
# Previous implementation of random_jpeg_quality had a bug.
# This unit test tests the fixed version, but due to forward compatibility
# this test can only be done when fixed version is used.
if compat.forward_compatible(2019, 4, 4):
# Test jpeg quality dynamic randomization.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
random_jpeg_image = image_ops.random_jpeg_quality(image, 40, 100)
with self.cached_session(use_gpu=True) as sess:
# Test randomization.
random_jpeg_images = [sess.run(random_jpeg_image) for _ in range(5)]
are_images_equal = []
for i in range(1, len(random_jpeg_images)):
# Most of them should be different if randomization is occurring
# correctly.
are_images_equal.append(
np.array_equal(random_jpeg_images[0], random_jpeg_images[i]))
self.assertFalse(all(are_images_equal))
def testAdjustJpegQuality(self):
# Test if image_ops.adjust_jpeg_quality works when jpeq quality
# is an int (not tensor) for backward compatibility.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
jpeg_quality = np.random.randint(40, 100)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
adjust_jpeg_quality_image = image_ops.adjust_jpeg_quality(
image, jpeg_quality)
with self.cached_session(use_gpu=True) as sess:
sess.run(adjust_jpeg_quality_image)
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = "tensorflow/core/lib/png/testdata/"
inputs = ((1, "lena_gray.png"), (4, "lena_rgba.png"),
(3, "lena_palette.png"), (4, "lena_palette_trns.png"))
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.cached_session(use_gpu=True) as sess:
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = self.evaluate([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, self.evaluate(image1))
def testSynthetic(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testSyntheticUint16(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp(), dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 800)
self.assertLessEqual(len(png0), 1500)
def testSyntheticTwoChannel(self):
with self.cached_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testSyntheticTwoChannelUint16(self):
with self.cached_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
@test_util.run_deprecated_v1
def testShape(self):
with self.cached_session(use_gpu=True):
png = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class GifTest(test_util.TensorFlowTestCase):
def _testValid(self, filename):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
WIDTH = 20
HEIGHT = 40
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.cached_session(use_gpu=True) as sess:
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = self.evaluate([gif0, image0])
self.assertEqual(image0.shape, shape)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * STRIDE
end = (frame_idx + 1) * STRIDE
print(frame_idx)
if end <= WIDTH:
gt[:, start:end, :] = 255
else:
start -= WIDTH
end -= WIDTH
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
def testValid(self):
self._testValid("scan.gif")
self._testValid("optimized.gif")
@test_util.run_deprecated_v1
def testShape(self):
with self.cached_session(use_gpu=True) as sess:
gif = constant_op.constant("nonsense")
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
class ConvertImageTest(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.cached_session(use_gpu=True):
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y.eval(), y_np, atol=1e-5)
if output_dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64
]:
y_saturate = image_ops.convert_image_dtype(
image, output_dtype, saturate=True)
self.assertTrue(y_saturate.dtype == output_dtype)
self.assertAllClose(y_saturate.eval(), y_np, atol=1e-5)
@test_util.run_deprecated_v1
def testNoConvert(self):
# Make sure converting to the same data type creates only an identity op
with self.cached_session(use_gpu=True):
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEquals(y.op.type, "Identity")
self.assertEquals(y.op.inputs[0], image)
@test_util.run_deprecated_v1
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.cached_session(use_gpu=True):
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1])
self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32])
@test_util.run_deprecated_v1
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.cached_session(use_gpu=True):
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
@test_util.run_deprecated_v1
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.cached_session(use_gpu=True):
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
@test_util.run_deprecated_v1
def testConvertBetweenInt16AndInt8(self):
with self.cached_session(use_gpu=True):
# uint8, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255])
self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256])
# int8, uint16
self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127])
self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256])
# int16, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128])
self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256])
class TotalVariationTest(test_util.TensorFlowTestCase):
"""Tests the function total_variation() in image_ops.
We test a few small handmade examples, as well as
some larger examples using an equivalent numpy
implementation of the total_variation() function.
We do NOT test for overflows and invalid / edge-case arguments.
"""
def _test(self, x_np, y_np):
"""Test that the TensorFlow implementation of
total_variation(x_np) calculates the values in y_np.
Note that these may be float-numbers so we only test
for approximate equality within some narrow error-bound.
"""
# Create a TensorFlow session.
with self.cached_session(use_gpu=True):
# Add a constant to the TensorFlow graph that holds the input.
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# Add ops for calculating the total variation using TensorFlow.
y = image_ops.total_variation(images=x_tf)
# Run the TensorFlow session to calculate the result.
y_tf = self.evaluate(y)
# Assert that the results are as expected within
# some small error-bound in case they are float-values.
self.assertAllClose(y_tf, y_np)
def _total_variation_np(self, x_np):
"""Calculate the total variation of x_np using numpy.
This implements the same function as TensorFlow but
using numpy instead.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
dim = len(x_np.shape)
if dim == 3:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[1:, :, :] - x_np[:-1, :, :]
dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]
# Sum for all axis.
sum_axis = None
elif dim == 4:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]
dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]
# Only sum for the last 3 axis.
sum_axis = (1, 2, 3)
else:
# This should not occur in this test-code.
pass
tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \
np.sum(np.abs(dif2), axis=sum_axis)
return tot_var
def _test_tensorflow_vs_numpy(self, x_np):
"""Test the TensorFlow implementation against a numpy implementation.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
# Calculate the y-values using the numpy implementation.
y_np = self._total_variation_np(x_np)
self._test(x_np, y_np)
def _generateArray(self, shape):
"""Generate an array of the given shape for use in testing.
The numbers are calculated as the cumulative sum, which
causes the difference between neighboring numbers to vary."""
# Flattened length of the array.
flat_len = np.prod(shape)
a = np.array(range(flat_len), dtype=int)
a = np.cumsum(a)
a = a.reshape(shape)
return a
# TODO(b/133851381): re-enable this test.
def disabledtestTotalVariationNumpy(self):
"""Test the TensorFlow implementation against a numpy implementation.
The two implementations are very similar so it is possible that both
have the same bug, which would not be detected by this test. It is
therefore necessary to test with manually crafted data as well."""
# Generate a test-array.
# This is an 'image' with 100x80 pixels and 3 color channels.
a = self._generateArray(shape=(100, 80, 3))
# Test the TensorFlow implementation vs. numpy implementation.
# We use a numpy implementation to check the results that are
# calculated using TensorFlow are correct.
self._test_tensorflow_vs_numpy(a)
self._test_tensorflow_vs_numpy(a + 1)
self._test_tensorflow_vs_numpy(-a)
self._test_tensorflow_vs_numpy(1.1 * a)
# Expand to a 4-dim array.
b = a[np.newaxis, :]
# Combine several variations of the image into a single 4-dim array.
multi = np.vstack((b, b + 1, -b, 1.1 * b))
# Test that the TensorFlow function can also handle 4-dim arrays.
self._test_tensorflow_vs_numpy(multi)
def testTotalVariationHandmade(self):
"""Test the total variation for a few handmade examples."""
# We create an image that is 2x2 pixels with 3 color channels.
# The image is very small so we can check the result by hand.
# Red color channel.
# The following are the sum of absolute differences between the pixels.
# sum row dif = (4-1) + (7-2) = 3 + 5 = 8
# sum col dif = (2-1) + (7-4) = 1 + 3 = 4
r = [[1, 2], [4, 7]]
# Blue color channel.
# sum row dif = 18 + 29 = 47
# sum col dif = 7 + 18 = 25
g = [[11, 18], [29, 47]]
# Green color channel.
# sum row dif = 120 + 193 = 313
# sum col dif = 47 + 120 = 167
b = [[73, 120], [193, 313]]
# Combine the 3 color channels into a single 3-dim array.
# The shape is (2, 2, 3) corresponding to (height, width and color).
a = np.dstack((r, g, b))
# Total variation for this image.
# Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564
tot_var = 564
# Calculate the total variation using TensorFlow and assert it is correct.
self._test(a, tot_var)
# If we add 1 to all pixel-values then the total variation is unchanged.
self._test(a + 1, tot_var)
# If we negate all pixel-values then the total variation is unchanged.
self._test(-a, tot_var)
# Scale the pixel-values by a float. This scales the total variation as
# well.
b = 1.1 * a
self._test(b, 1.1 * tot_var)
# Scale by another float.
c = 1.2 * a
self._test(c, 1.2 * tot_var)
# Combine these 3 images into a single array of shape (3, 2, 2, 3)
# where the first dimension is for the image-number.
multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :]))
# Check that TensorFlow correctly calculates the total variation
# for each image individually and returns the correct array.
self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))
class FormatTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testFormats(self):
prefix = "tensorflow/core/lib"
paths = ("png/testdata/lena_gray.png", "jpeg/testdata/jpeg_merge_test1.jpg",
"gif/testdata/lena.gif")
decoders = {
"jpeg": functools.partial(image_ops.decode_jpeg, channels=3),
"png": functools.partial(image_ops.decode_png, channels=3),
"gif": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0),
}
with self.cached_session():
for path in paths:
contents = io_ops.read_file(os.path.join(prefix, path)).eval()
images = {}
for name, decode in decoders.items():
image = decode(contents).eval()
self.assertEqual(image.ndim, 3)
for prev_name, prev in images.items():
print("path %s, names %s %s, shapes %s %s" %
(path, name, prev_name, image.shape, prev.shape))
self.assertAllEqual(image, prev)
images[name] = image
def testError(self):
path = "tensorflow/core/lib/gif/testdata/scan.gif"
with self.cached_session():
for decode in image_ops.decode_jpeg, image_ops.decode_png:
with self.assertRaisesOpError(r"Got 12 frames"):
decode(io_ops.read_file(path)).eval()
class NonMaxSuppressionTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def NonMaxSuppressionTest(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
self.assertAllClose(selected_indices.eval(), [3, 0, 5])
@test_util.run_deprecated_v1
def testInvalidShape(self):
# The boxes should be 2D of shape [num_boxes, 4].
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 2 but is rank 1"):
boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
with self.assertRaisesRegexp(ValueError, "Dimension must be 4 but is 3"):
boxes = constant_op.constant([[0.0, 0.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The boxes is of shape [num_boxes, 4], and the scores is
# of shape [num_boxes]. So an error will be thrown.
with self.assertRaisesRegexp(ValueError,
"Dimensions must be equal, but are 1 and 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9, 0.75])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The scores should be 1D of shape [num_boxes].
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 1 but is rank 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([[0.9]])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The max_output_size should be a scalar (0-D).
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 0 but is rank 1"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, [3], 0.5)
# The iou_threshold should be a scalar (0-D).
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 0 but is rank 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, [[0.5]])
@test_util.run_deprecated_v1
def testDataTypes(self):
# Test case for GitHub issue 20199.
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = float("-inf")
# Note: There are multiple versions of non_max_suppression v2, v3, v4.
# gen_image_ops.non_max_suppression_v2:
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
selected_indices = gen_image_ops.non_max_suppression_v2(
boxes, scores, max_output_size, iou_threshold).eval()
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v3
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
selected_indices = gen_image_ops.non_max_suppression_v3(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v4.
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
selected_indices, _ = gen_image_ops.non_max_suppression_v4(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v5.
soft_nms_sigma_np = float(0.0)
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np, dtype=dtype)
selected_indices, _, _ = gen_image_ops.non_max_suppression_v5(
boxes, scores, max_output_size, iou_threshold, score_threshold,
soft_nms_sigma)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
class NonMaxSuppressionWithScoresTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testSelectFromThreeClustersWithSoftNMS(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 6
iou_threshold_np = 1.0
score_threshold_np = 0.0
soft_nms_sigma_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np)
selected_indices, selected_scores = \
image_ops.non_max_suppression_with_scores(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold,
soft_nms_sigma)
selected_indices, selected_scores = self.evaluate(
[selected_indices, selected_scores])
self.assertAllClose(selected_indices, [3, 0, 1, 5, 4, 2])
self.assertAllClose(selected_scores,
[0.95, 0.9, 0.384, 0.3, 0.256, 0.197],
rtol=1e-2, atol=1e-2)
class NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testSelectFromThreeClusters(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices_padded, num_valid_padded = \
image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True)
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices_padded.eval(), [3, 0, 5, 0, 0])
self.assertEqual(num_valid_padded.eval(), 3)
self.assertAllClose(selected_indices.eval(), [3, 0, 5])
self.assertEqual(num_valid.eval(), 3)
@test_util.run_deprecated_v1
def testSelectFromContinuousOverLap(self):
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices.eval(), [0, 2, 4])
self.assertEqual(num_valid.eval(), 3)
class NonMaxSuppressionWithOverlapsTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testSelectOneFromThree(self):
overlaps_np = [
[1.0, 0.7, 0.2],
[0.7, 1.0, 0.0],
[0.2, 0.0, 1.0],
]
scores_np = [0.7, 0.9, 0.1]
max_output_size_np = 3
overlaps = constant_op.constant(overlaps_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
overlap_threshold = 0.6
score_threshold = 0.4
selected_indices = image_ops.non_max_suppression_with_overlaps(
overlaps, scores, max_output_size, overlap_threshold, score_threshold)
with self.cached_session():
self.assertAllClose(selected_indices.eval(), [1])
class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase):
"""Tests utility function used by ssim() and psnr()."""
@test_util.run_deprecated_v1
def testWrongDims(self):
img = array_ops.placeholder(dtype=dtypes.float32)
img_np = np.array((2, 2))
with self.cached_session(use_gpu=True) as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img: img_np})
@test_util.run_deprecated_v1
def testShapeMismatch(self):
img1 = array_ops.placeholder(dtype=dtypes.float32)
img2 = array_ops.placeholder(dtype=dtypes.float32)
img1_np = np.array([1, 2, 2, 1])
img2_np = np.array([1, 3, 3, 1])
with self.cached_session(use_gpu=True) as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img1, img2)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img1: img1_np, img2: img2_np})
class PSNRTest(test_util.TensorFlowTestCase):
"""Tests for PSNR."""
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/psnr/testdata", filename))
im = image_ops.decode_jpeg(content, dct_method="INTEGER_ACCURATE")
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session(use_gpu=True) as sess:
q20 = self._LoadTestImage(sess, "cat_q20.jpg")
q72 = self._LoadTestImage(sess, "cat_q72.jpg")
q95 = self._LoadTestImage(sess, "cat_q95.jpg")
return q20, q72, q95
def _PSNR_NumPy(self, orig, target, max_value):
"""Numpy implementation of PSNR."""
mse = ((orig - target) ** 2).mean(axis=(-3, -2, -1))
return 20 * np.log10(max_value) - 10 * np.log10(mse)
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
@test_util.run_deprecated_v1
def testPSNRSingleImage(self):
image1 = self._RandomImage((8, 8, 1), 1)
image2 = self._RandomImage((8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session(use_gpu=True):
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1.0, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
@test_util.run_deprecated_v1
def testPSNRMultiImage(self):
image1 = self._RandomImage((10, 8, 8, 1), 1)
image2 = self._RandomImage((10, 8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session(use_gpu=True):
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
@test_util.run_deprecated_v1
def testGoldenPSNR(self):
q20, q72, q95 = self._LoadTestImages()
# Verify NumPy implementation first.
# Golden values are generated using GNU Octave's psnr() function.
psnr1 = self._PSNR_NumPy(q20, q72, 1)
self.assertNear(30.321, psnr1, 0.001, msg="q20.dtype=" + str(q20.dtype))
psnr2 = self._PSNR_NumPy(q20, q95, 1)
self.assertNear(29.994, psnr2, 0.001)
psnr3 = self._PSNR_NumPy(q72, q95, 1)
self.assertNear(35.302, psnr3, 0.001)
# Test TensorFlow implementation.
with self.cached_session(use_gpu=True):
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32)
tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32)
tf_psnr1 = image_ops.psnr(tf_q20, tf_q72, 1, "psnr1").eval()
tf_psnr2 = image_ops.psnr(tf_q20, tf_q95, 1, "psnr2").eval()
tf_psnr3 = image_ops.psnr(tf_q72, tf_q95, 1, "psnr3").eval()
self.assertAllClose(psnr1, tf_psnr1, atol=0.001)
self.assertAllClose(psnr2, tf_psnr2, atol=0.001)
self.assertAllClose(psnr3, tf_psnr3, atol=0.001)
@test_util.run_deprecated_v1
def testInfinity(self):
q20, _, _ = self._LoadTestImages()
psnr = self._PSNR_NumPy(q20, q20, 1)
with self.cached_session(use_gpu=True):
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_q20, tf_q20, 1, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
@test_util.run_deprecated_v1
def testInt(self):
img1 = self._RandomImage((10, 8, 8, 1), 255)
img2 = self._RandomImage((10, 8, 8, 1), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
psnr_uint8 = image_ops.psnr(img1, img2, 255)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
psnr_float32 = image_ops.psnr(img1, img2, 1.0)
with self.cached_session(use_gpu=True):
self.assertAllClose(
psnr_uint8.eval(), self.evaluate(psnr_float32), atol=0.001)
class SSIMTest(test_util.TensorFlowTestCase):
"""Tests for SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_ssim = np.asarray([[1.000000, 0.230880, 0.231153],
[0.230880, 1.000000, 0.996828],
[0.231153, 0.996828, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session(use_gpu=True) as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
@test_util.run_deprecated_v1
def testAgainstMatlab(self):
"""Tests against values produced by Matlab."""
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3)]
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
ssim = image_ops.ssim(
*ph, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
scores = [ssim.eval(dict(zip(ph, t)))
for t in itertools.combinations_with_replacement(img, 2)]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testBatch(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBroadcast(self):
img = self._LoadTestImages()[:2]
expected = self._ssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
ssim = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
@test_util.run_deprecated_v1
def testNegative(self):
"""Tests against negative SSIM index."""
step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0)
img1 = np.tile(step, (16, 1))
img2 = np.fliplr(img1)
img1 = img1.reshape((1, 16, 16, 1))
img2 = img2.reshape((1, 16, 16, 1))
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
255,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session(use_gpu=True):
self.assertLess(ssim.eval(), 0)
@test_util.run_deprecated_v1
def testInt(self):
img1 = self._RandomImage((1, 16, 16, 3), 255)
img2 = self._RandomImage((1, 16, 16, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(
ssim_uint8.eval(), self.evaluate(ssim_float32), atol=0.001)
class MultiscaleSSIMTest(test_util.TensorFlowTestCase):
"""Tests for MS-SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_msssim = np.asarray([[1.000000, 0.091016, 0.091025],
[0.091016, 1.000000, 0.999567],
[0.091025, 0.999567, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session(use_gpu=True) as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
@test_util.run_deprecated_v1
def testAgainstMatlab(self):
"""Tests against MS-SSIM computed with Matlab implementation.
For color images, MS-SSIM scores are averaged over color channels.
"""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3)]
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
msssim = image_ops.ssim_multiscale(
*ph, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
scores = [msssim.eval(dict(zip(ph, t)))
for t in itertools.combinations_with_replacement(img, 2)]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
@test_util.run_deprecated_v1
def testUnweightedIsDifferentiable(self):
img = self._LoadTestImages()
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
scaled_ph = [x * scalar for x in ph]
msssim = image_ops.ssim_multiscale(
*scaled_ph,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
grads = gradients.gradients(msssim, scalar)
with self.cached_session(use_gpu=True) as sess:
np_grads = sess.run(grads, feed_dict={ph[0]: img[0], ph[1]: img[1]})
self.assertTrue(np.isfinite(np_grads).all())
def testBatch(self):
"""Tests MS-SSIM computed in batch."""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
msssim = image_ops.ssim_multiscale(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(msssim), 1e-4)
def testBroadcast(self):
"""Tests MS-SSIM broadcasting."""
img = self._LoadTestImages()[:2]
expected = self._msssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
score_tensor = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(score_tensor), 1e-4)
def testRange(self):
"""Tests against low MS-SSIM score.
MS-SSIM is a geometric mean of SSIM and CS scores of various scales.
If any of the value is negative so that the geometric mean is not
well-defined, then treat the MS-SSIM score as zero.
"""
with self.cached_session(use_gpu=True) as sess:
img1 = self._LoadTestImage(sess, "checkerboard1.png")
img2 = self._LoadTestImage(sess, "checkerboard3.png")
images = [img1, img2, np.zeros_like(img1),
np.full_like(img1, fill_value=255)]
images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images]
msssim_ops = [
image_ops.ssim_multiscale(
x, y, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
for x, y in itertools.combinations(images, 2)
]
msssim = self.evaluate(msssim_ops)
msssim = np.squeeze(msssim)
self.assertTrue(np.all(msssim >= 0.0))
self.assertTrue(np.all(msssim <= 1.0))
@test_util.run_deprecated_v1
def testInt(self):
img1 = self._RandomImage((1, 180, 240, 3), 255)
img2 = self._RandomImage((1, 180, 240, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim_multiscale(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(
ssim_uint8.eval(), self.evaluate(ssim_float32), atol=0.001)
def testNumpyInput(self):
"""Test case for GitHub issue 28241."""
image = np.random.random([512, 512, 1])
score_tensor = image_ops.ssim_multiscale(image, image, max_val=1.0)
with self.cached_session(use_gpu=True):
_ = self.evaluate(score_tensor)
class ImageGradientsTest(test_util.TensorFlowTestCase):
def testImageGradients(self):
shape = [1, 2, 4, 1]
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
img = array_ops.reshape(img, shape)
expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape)
expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape)
dy, dx = image_ops.image_gradients(img)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsMultiChannelBatch(self):
batch = [[[[1, 2], [2, 5], [3, 3]],
[[8, 4], [5, 1], [9, 8]]],
[[[5, 3], [7, 9], [1, 6]],
[[1, 2], [6, 3], [6, 3]]]]
expected_dy = [[[[7, 2], [3, -4], [6, 5]],
[[0, 0], [0, 0], [0, 0]]],
[[[-4, -1], [-1, -6], [5, -3]],
[[0, 0], [0, 0], [0, 0]]]]
expected_dx = [[[[1, 3], [1, -2], [0, 0]],
[[-3, -3], [4, 7], [0, 0]]],
[[[2, 6], [-6, -3], [0, 0]],
[[5, 1], [0, 0], [0, 0]]]]
batch = constant_op.constant(batch)
assert batch.get_shape().as_list() == [2, 2, 3, 2]
dy, dx = image_ops.image_gradients(batch)
with self.cached_session(use_gpu=True):
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsBadShape(self):
# [2 x 4] image but missing batch and depth dimensions.
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
with self.assertRaises(ValueError):
image_ops.image_gradients(img)
class SobelEdgesTest(test_util.TensorFlowTestCase):
def testSobelEdges1x2x3x1(self):
img = constant_op.constant([[1, 3, 6], [4, 1, 5]],
dtype=dtypes.float32, shape=[1, 2, 3, 1])
expected = np.reshape([[[0, 0], [0, 12], [0, 0]],
[[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2])
sobel = image_ops.sobel_edges(img)
with self.cached_session(use_gpu=True):
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected, actual_sobel)
def testSobelEdges5x3x4x2(self):
batch_size = 5
plane = np.reshape([[1, 3, 6, 2], [4, 1, 5, 7], [2, 5, 1, 4]],
[1, 3, 4, 1])
two_channel = np.concatenate([plane, plane], axis=3)
batch = np.concatenate([two_channel] * batch_size, axis=0)
img = constant_op.constant(batch, dtype=dtypes.float32,
shape=[batch_size, 3, 4, 2])
expected_plane = np.reshape([[[0, 0], [0, 12], [0, 10], [0, 0]],
[[6, 0], [0, 6], [-6, 10], [-6, 0]],
[[0, 0], [0, 0], [0, 10], [0, 0]]],
[1, 3, 4, 1, 2])
expected_two_channel = np.concatenate(
[expected_plane, expected_plane], axis=3)
expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0)
sobel = image_ops.sobel_edges(img)
with self.cached_session(use_gpu=True):
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected_batch, actual_sobel)
class DecodeImageTest(test_util.TensorFlowTestCase):
def testJpegUint16(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngUint16(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testGifUint16(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpUint16(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testJpegFloat32(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngFloat32(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testGifFloat32(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpFloat32(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testExpandAnimations(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(
gif0, dtype=dtypes.float32, expand_animations=False)
# image_ops.decode_png() handles GIFs and returns 3D tensors
animation = image_ops.decode_gif(gif0)
first_frame = array_ops.gather(animation, 0)
image1 = image_ops.convert_image_dtype(first_frame, dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertEqual(len(image0.shape), 3)
self.assertAllEqual(list(image0.shape), [40, 20, 3])
self.assertAllEqual(image0, image1)
if __name__ == "__main__":
googletest.main()
|
alsrgv/tensorflow
|
tensorflow/python/ops/image_ops_test.py
|
Python
|
apache-2.0
| 202,340
|
[
"Gaussian"
] |
23fc84d0ca0651ad90e6efd6d9ca8466314c8944eb75cee77a2c167a97251662
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
class PRIORITY:
LOWEST = -100
LOWER = -50
LOW = -10
NORMAL = 0
HIGH = 10
HIGHER = 50
HIGHEST = 100
class SORT_ORDER:
FIRST = 0
SECOND = 1
THIRD = 2
FOURTH = 3
FIFTH = 4
LAST = 100
class DBMS:
ACCESS = "Microsoft Access"
DB2 = "IBM DB2"
FIREBIRD = "Firebird"
MAXDB = "SAP MaxDB"
MSSQL = "Microsoft SQL Server"
MYSQL = "MySQL"
ORACLE = "Oracle"
PGSQL = "PostgreSQL"
SQLITE = "SQLite"
SYBASE = "Sybase"
HSQLDB = "HSQLDB"
class DBMS_DIRECTORY_NAME:
ACCESS = "access"
DB2 = "db2"
FIREBIRD = "firebird"
MAXDB = "maxdb"
MSSQL = "mssqlserver"
MYSQL = "mysql"
ORACLE = "oracle"
PGSQL = "postgresql"
SQLITE = "sqlite"
SYBASE = "sybase"
HSQLDB = "hsqldb"
class CUSTOM_LOGGING:
PAYLOAD = 9
TRAFFIC_OUT = 8
TRAFFIC_IN = 7
class OS:
LINUX = "Linux"
WINDOWS = "Windows"
class PLACE:
GET = "GET"
POST = "POST"
URI = "URI"
COOKIE = "Cookie"
USER_AGENT = "User-Agent"
REFERER = "Referer"
HOST = "Host"
CUSTOM_POST = "(custom) POST"
CUSTOM_HEADER = "(custom) HEADER"
class POST_HINT:
SOAP = "SOAP"
JSON = "JSON"
JSON_LIKE = "JSON-like"
MULTIPART = "MULTIPART"
XML = "XML (generic)"
ARRAY_LIKE = "Array-like"
class HTTPMETHOD:
GET = "GET"
POST = "POST"
HEAD = "HEAD"
PUT = "PUT"
DELETE = "DETELE"
TRACE = "TRACE"
OPTIONS = "OPTIONS"
CONNECT = "CONNECT"
PATCH = "PATCH"
class NULLCONNECTION:
HEAD = "HEAD"
RANGE = "Range"
SKIP_READ = "skip-read"
class REFLECTIVE_COUNTER:
MISS = "MISS"
HIT = "HIT"
class CHARSET_TYPE:
BINARY = 1
DIGITS = 2
HEXADECIMAL = 3
ALPHA = 4
ALPHANUM = 5
class HEURISTIC_TEST:
CASTED = 1
NEGATIVE = 2
POSITIVE = 3
class HASH:
MYSQL = r'(?i)\A\*[0-9a-f]{40}\Z'
MYSQL_OLD = r'(?i)\A(?![0-9]+\Z)[0-9a-f]{16}\Z'
POSTGRES = r'(?i)\Amd5[0-9a-f]{32}\Z'
MSSQL = r'(?i)\A0x0100[0-9a-f]{8}[0-9a-f]{40}\Z'
MSSQL_OLD = r'(?i)\A0x0100[0-9a-f]{8}[0-9a-f]{80}\Z'
MSSQL_NEW = r'(?i)\A0x0200[0-9a-f]{8}[0-9a-f]{128}\Z'
ORACLE = r'(?i)\As:[0-9a-f]{60}\Z'
ORACLE_OLD = r'(?i)\A[01-9a-f]{16}\Z'
MD5_GENERIC = r'(?i)\A[0-9a-f]{32}\Z'
SHA1_GENERIC = r'(?i)\A[0-9a-f]{40}\Z'
SHA224_GENERIC = r'(?i)\A[0-9a-f]{28}\Z'
SHA384_GENERIC = r'(?i)\A[0-9a-f]{48}\Z'
SHA512_GENERIC = r'(?i)\A[0-9a-f]{64}\Z'
CRYPT_GENERIC = r'(?i)\A(?!\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z)(?![0-9]+\Z)[./0-9A-Za-z]{13}\Z'
WORDPRESS = r'(?i)\A\$P\$[./0-9A-Za-z]{31}\Z'
# Reference: http://www.zytrax.com/tech/web/mobile_ids.html
class MOBILES:
BLACKBERRY = ("BlackBerry 9900", "Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+")
GALAXY = ("Samsung Galaxy S", "Mozilla/5.0 (Linux; U; Android 2.2; en-US; SGH-T959D Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1")
HP = ("HP iPAQ 6365", "Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; 240x320; HP iPAQ h6300)")
HTC = ("HTC Sensation", "Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30")
IPHONE = ("Apple iPhone 4s", "Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3")
NEXUS = ("Google Nexus 7", "Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19")
NOKIA = ("Nokia N97", "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344")
class PROXY_TYPE:
HTTP = "HTTP"
HTTPS = "HTTPS"
SOCKS4 = "SOCKS4"
SOCKS5 = "SOCKS5"
class REGISTRY_OPERATION:
READ = "read"
ADD = "add"
DELETE = "delete"
class DUMP_FORMAT:
CSV = "CSV"
HTML = "HTML"
SQLITE = "SQLITE"
class HTTP_HEADER:
ACCEPT = "Accept"
ACCEPT_CHARSET = "Accept-Charset"
ACCEPT_ENCODING = "Accept-Encoding"
ACCEPT_LANGUAGE = "Accept-Language"
AUTHORIZATION = "Authorization"
CACHE_CONTROL = "Cache-Control"
CONNECTION = "Connection"
CONTENT_ENCODING = "Content-Encoding"
CONTENT_LENGTH = "Content-Length"
CONTENT_RANGE = "Content-Range"
CONTENT_TYPE = "Content-Type"
COOKIE = "Cookie"
SET_COOKIE = "Set-Cookie"
HOST = "Host"
PRAGMA = "Pragma"
PROXY_AUTHORIZATION = "Proxy-Authorization"
PROXY_CONNECTION = "Proxy-Connection"
RANGE = "Range"
REFERER = "Referer"
SERVER = "Server"
USER_AGENT = "User-Agent"
TRANSFER_ENCODING = "Transfer-Encoding"
URI = "URI"
VIA = "Via"
class EXPECTED:
BOOL = "bool"
INT = "int"
class OPTION_TYPE:
BOOLEAN = "boolean"
INTEGER = "integer"
FLOAT = "float"
STRING = "string"
class HASHDB_KEYS:
DBMS = "DBMS"
CONF_TMP_PATH = "CONF_TMP_PATH"
KB_ABS_FILE_PATHS = "KB_ABS_FILE_PATHS"
KB_BRUTE_COLUMNS = "KB_BRUTE_COLUMNS"
KB_BRUTE_TABLES = "KB_BRUTE_TABLES"
KB_CHARS = "KB_CHARS"
KB_DYNAMIC_MARKINGS = "KB_DYNAMIC_MARKINGS"
KB_INJECTIONS = "KB_INJECTIONS"
KB_XP_CMDSHELL_AVAILABLE = "KB_XP_CMDSHELL_AVAILABLE"
OS = "OS"
class REDIRECTION:
YES = "Y"
NO = "N"
class PAYLOAD:
SQLINJECTION = {
1: "boolean-based blind",
2: "error-based",
3: "UNION query",
4: "stacked queries",
5: "AND/OR time-based blind",
6: "inline query",
}
PARAMETER = {
1: "Unescaped numeric",
2: "Single quoted string",
3: "LIKE single quoted string",
4: "Double quoted string",
5: "LIKE double quoted string",
}
RISK = {
0: "No risk",
1: "Low risk",
2: "Medium risk",
3: "High risk",
}
CLAUSE = {
0: "Always",
1: "WHERE",
2: "GROUP BY",
3: "ORDER BY",
4: "LIMIT",
5: "OFFSET",
6: "TOP",
7: "Table name",
8: "Column name",
}
class METHOD:
COMPARISON = "comparison"
GREP = "grep"
TIME = "time"
UNION = "union"
class TECHNIQUE:
BOOLEAN = 1
ERROR = 2
UNION = 3
STACKED = 4
TIME = 5
QUERY = 6
class WHERE:
ORIGINAL = 1
NEGATIVE = 2
REPLACE = 3
class WIZARD:
BASIC = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba")
INTERMEDIATE = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba", "getUsers", "getDbs", "getTables", "getSchema", "excludeSysDbs")
ALL = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba", "getHostname", "getUsers", "getPasswordHashes", "getPrivileges", "getRoles", "dumpAll")
class ADJUST_TIME_DELAY:
DISABLE = -1
NO = 0
YES = 1
class WEB_API:
PHP = "php"
ASP = "asp"
ASPX = "aspx"
JSP = "jsp"
class CONTENT_TYPE:
TECHNIQUES = 0
DBMS_FINGERPRINT = 1
BANNER = 2
CURRENT_USER = 3
CURRENT_DB = 4
HOSTNAME = 5
IS_DBA = 6
USERS = 7
PASSWORDS = 8
PRIVILEGES = 9
ROLES = 10
DBS = 11
TABLES = 12
COLUMNS = 13
SCHEMA = 14
COUNT = 15
DUMP_TABLE = 16
SEARCH = 17
SQL_QUERY = 18
COMMON_TABLES = 19
COMMON_COLUMNS = 20
FILE_READ = 21
FILE_WRITE = 22
OS_CMD = 23
REG_READ = 24
PART_RUN_CONTENT_TYPES = {
"checkDbms": CONTENT_TYPE.TECHNIQUES,
"getFingerprint": CONTENT_TYPE.DBMS_FINGERPRINT,
"getBanner": CONTENT_TYPE.BANNER,
"getCurrentUser": CONTENT_TYPE.CURRENT_USER,
"getCurrentDb": CONTENT_TYPE.CURRENT_DB,
"getHostname": CONTENT_TYPE.HOSTNAME,
"isDba": CONTENT_TYPE.IS_DBA,
"getUsers": CONTENT_TYPE.USERS,
"getPasswordHashes": CONTENT_TYPE.PASSWORDS,
"getPrivileges": CONTENT_TYPE.PRIVILEGES,
"getRoles": CONTENT_TYPE.ROLES,
"getDbs": CONTENT_TYPE.DBS,
"getTables": CONTENT_TYPE.TABLES,
"getColumns": CONTENT_TYPE.COLUMNS,
"getSchema": CONTENT_TYPE.SCHEMA,
"getCount": CONTENT_TYPE.COUNT,
"dumpTable": CONTENT_TYPE.DUMP_TABLE,
"search": CONTENT_TYPE.SEARCH,
"sqlQuery": CONTENT_TYPE.SQL_QUERY,
"tableExists": CONTENT_TYPE.COMMON_TABLES,
"columnExists": CONTENT_TYPE.COMMON_COLUMNS,
"readFile": CONTENT_TYPE.FILE_READ,
"writeFile": CONTENT_TYPE.FILE_WRITE,
"osCmd": CONTENT_TYPE.OS_CMD,
"regRead": CONTENT_TYPE.REG_READ
}
class CONTENT_STATUS:
IN_PROGRESS = 0
COMPLETE = 1
class AUTH_TYPE:
BASIC = "basic"
DIGEST = "digest"
NTLM = "ntlm"
PKI = "pki"
class AUTOCOMPLETE_TYPE:
SQL = 0
OS = 1
SQLMAP = 2
|
pwnieexpress/raspberry_pwn
|
src/pentest/sqlmap/lib/core/enums.py
|
Python
|
gpl-3.0
| 9,259
|
[
"Galaxy"
] |
0fe60ae0a1d83a3000cdc5c187fe7d0379578cccc09a46abdbc3ea261fda2953
|
'''
Wifi Facade.
=============
The :class:`Wifi` is to provide access to the wifi of your mobile/ desktop
devices.
It currently supports `connecting`, `disconnecting`, `scanning`, `getting
available wifi network list` and `getting network information`.
Simple examples
---------------
To enable/ turn on wifi scanning::
>>> from plyer import wifi
>>> wifi.start_scanning()
Once the wifi is enabled/ turned on, then this command starts to scan
all the nearby available wifi networks.
To get network info::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> return wifi.get_network_info(name)
Returns network details of the network who's name/ssid is provided in the
`name` parameter.
To connect to a network::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> wifi.connect(network, parameters)
This connects to the network who's name/ssid is provided under `network`
parameter and along with other necessary methods for connection
which depends upon platform to platform.
please visit following files for more details about requirements of
`paramaters` argument in `connect` method:
plyer/platforms/win/wifi.py
plyer/platforms/macosx/wifi.py
plyer/platforms/win/wifi.py
To disconnect from wifi::
>>> from plyer import wifi
>>> wifi.disconnect()
This disconnects your device from any wifi network.
To get available wifi networks::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> return wifi.get_available_wifi()
This returns all the available wifi networks near the device.
Supported Platforms
-------------------
Windows, OS X, Linux
Ex: 6
----------
from plyer import wifi
wifi.enable()
This enables wifi device.
Ex: 7
----------
from plyer import wifi
wifi.disable()
This disable wifi device
'''
class Wifi(object):
'''
Wifi Facade.
'''
def is_enabled(self):
'''
Return enabled status of WiFi hardware.
'''
return self._is_enabled()
def is_connected(self, interface=None):
'''
Return connection state of WiFi interface.
.. versionadded:: 1.3.3
'''
return self._is_connected(interface=interface)
@property
def interfaces(self):
'''
List all available WiFi interfaces.
.. versionadded:: 1.3.3
'''
raise NotImplementedError()
def start_scanning(self, interface=None):
'''
Turn on scanning.
'''
return self._start_scanning(interface=interface)
def get_network_info(self, name):
'''
Return a dictionary of secified network.
'''
return self._get_network_info(name=name)
def get_available_wifi(self):
'''
Returns a list of all the available wifi.
'''
return self._get_available_wifi()
def connect(self, network, parameters, interface=None):
'''
Method to connect to some network.
'''
self._connect(
network=network,
parameters=parameters,
interface=interface
)
def disconnect(self, interface=None):
'''
To disconnect from some network.
'''
self._disconnect(interface=interface)
def enable(self):
'''
Wifi interface power state is set to "ON".
'''
self._enable()
def disable(self):
'''
Wifi interface power state is set to "OFF".
'''
self._disable()
# private
def _is_enabled(self):
raise NotImplementedError()
def _is_connected(self, interface=None):
raise NotImplementedError()
def _start_scanning(self, interface=None):
raise NotImplementedError()
def _get_network_info(self, **kwargs):
raise NotImplementedError()
def _get_available_wifi(self):
raise NotImplementedError()
def _connect(self, **kwargs):
raise NotImplementedError()
def _disconnect(self, interface=None):
raise NotImplementedError()
def _enable(self):
raise NotImplementedError()
def _disable(self):
raise NotImplementedError()
|
KeyWeeUsr/plyer
|
plyer/facades/wifi.py
|
Python
|
mit
| 4,176
|
[
"VisIt"
] |
a9d21bc3c297a7da4c48708a4c1d9506f757b1478b66fb6d59de3573ffcdcc53
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import astronomical modules
from astropy.io.fits import Header
from ipyvolume.embed import embed_html
# Import the relevant PTS classes and modules
from pts.core.tools import filesystem as fs
from pts.core.tools import introspection
from pts.modeling.basics.models import load_3d_model
from pts.modeling.basics.properties import GalaxyProperties
from pts.magic.core.frame import Frame
from pts.magic.basics.coordinatesystem import CoordinateSystem
from pts.modeling.basics.models import DeprojectionModel3D
from pts.modeling.basics.models import load_2d_model
from pts.core.units.quantity import PhotometricQuantity
from pts.core.units.parsing import parse_quantity
from pts.core.units.parsing import parse_unit as u
from pts.modeling.config.parameters import default_units
from pts.modeling.plotting.model import plot_galaxy_components, generate_html
from pts.core.tools.utils import lazyproperty
# -----------------------------------------------------------------
# Determine the path to the dropbox path and the path of the directory with the data for M81
m81_data_path = fs.join(introspection.get_dropbox_tests_pts_path_for_subproject("modeling"), "M81")
# -----------------------------------------------------------------
models_path = fs.join(m81_data_path, "models")
disk2d_path = fs.join(models_path, "disk.mod")
bulge2d_path = fs.join(models_path, "bulge.mod")
# -----------------------------------------------------------------
# Determine paths to the components
path = fs.join(m81_data_path, "components")
bulge_path = fs.join(path, "bulge.mod")
disk_path = fs.join(path, "disk.mod")
# -----------------------------------------------------------------
instrument_name = "earth"
# -----------------------------------------------------------------
titles = dict()
titles["bulge"] = "Evolved stellar bulge"
titles["old"] = "Evolved stellar disk"
titles["young"] = "Young stars"
titles["ionizing"] = "Ionizing stars"
titles["dust"] = "Dust disk"
# -----------------------------------------------------------------
# Bulge
bulge_template = "BruzualCharlot"
bulge_age = 10
bulge_metallicity = 0.03
# -----------------------------------------------------------------
# Old stellar disk
disk_template = "BruzualCharlot"
disk_age = 8
# disk_metallicity = 0.02
disk_metallicity = 0.03
# -----------------------------------------------------------------
# Young stellar disk
young_template = "BruzualCharlot"
young_age = 0.1
# young_metallicity = 0.02
young_metallicity = 0.03
# -----------------------------------------------------------------
# Ionizing stellar disk
ionizing_metallicity = 0.03 # XU KONG et al. 2000
ionizing_compactness = 6.
ionizing_pressure = 1e12 * u("K/m3")
ionizing_covering_factor = 0.2
# Convert the SFR into a FUV luminosity
sfr = 0.8 # The star formation rate # see Perez-Gonzalez 2006 (mentions Devereux et al 1995)
# -----------------------------------------------------------------
# fuv_young:6.0068695608165e+36 W/micron
# fuv_ionizing:2.4590756925069244e+33 W/micron]
# 15450820.890962543 Msun
fuv_young = PhotometricQuantity(1e36, "W/micron")
fuv_ionizing = PhotometricQuantity(2.5e33, "W/micron")
dust_mass = parse_quantity("1.5e7 Msun")
#dust_mass = 1.5e7 * u("Msun")
hydrocarbon_pops = 25
silicate_pops = 25
# -----------------------------------------------------------------
old_filename = "old_stars.fits"
young_filename = "young_stars.fits"
ionizing_filename = "ionizing_stars.fits"
dust_filename = "dust.fits"
# -----------------------------------------------------------------
# Define the possible free parameters
possible_free_parameters = ["dust_mass", "fuv_young", "fuv_ionizing", "distance"]
default_free_parameters = ["dust_mass", "fuv_young", "fuv_ionizing"]
# Define the free parameter types
free_parameter_types = dict()
# Parameter types
free_parameter_types["dust_mass"] = "mass"
free_parameter_types["fuv_young"] = "spectral luminosity density"
free_parameter_types["fuv_ionizing"] = "spectral luminosity density"
free_parameter_types["distance"] = "length"
# Define free parameter units
free_parameter_units = dict()
for label in free_parameter_types:
parameter_type = free_parameter_types[label]
free_parameter_units[label] = default_units[parameter_type]
# Define the number of digits
parameter_ndigits = dict()
for label in free_parameter_types:
parameter_ndigits[label] = 3
# Absolute ski file parameters
free_parameters_absolute_paths = dict()
# Stellar component parameters
free_parameters_relative_stellar_component_paths = dict()
free_parameters_relative_stellar_component_paths["fuv_young"] = ("normalization/SpectralLuminosityStellarCompNormalization/luminosity", titles["young"])
free_parameters_relative_stellar_component_paths["fuv_ionizing"] = ("normalization/SpectralLuminosityStellarCompNormalization/luminosity", titles["ionizing"])
# Dust component parameters
free_parameters_relative_dust_component_paths = dict()
free_parameters_relative_dust_component_paths["dust_mass"] = ("normalization/DustMassDustCompNormalization/dustMass", titles["dust"])
# Instrument parameters
free_parameters_relative_instruments_paths = dict()
free_parameters_relative_instruments_paths["distance"] = ("distance", instrument_name)
# Free parameter descriptions
free_parameter_descriptions = dict()
free_parameter_descriptions["dust_mass"] = "total dust mass"
free_parameter_descriptions["fuv_young"] = "FUV spectral luminosity of the young stellar component"
free_parameter_descriptions["fuv_ionizing"] = "FUV spectral luminosity of the ionizing stellar component"
free_parameter_descriptions["distance"] = "galaxy distance"
# -----------------------------------------------------------------
seds_path = fs.join(m81_data_path, "seds")
dustpedia_sed_path = fs.join(seds_path, "DustPedia.dat")
# -----------------------------------------------------------------
reference_wavelength_grid_filename = "wavelengths.txt"
# -----------------------------------------------------------------
# Determine the path
properties_path = fs.join(m81_data_path, "properties.dat")
# -----------------------------------------------------------------
# Determine path to maps directory
maps_path = fs.join(m81_data_path, "maps")
old_map_path = fs.join(maps_path, old_filename)
young_map_path = fs.join(maps_path, young_filename)
ionizing_map_path = fs.join(maps_path, ionizing_filename)
dust_map_path = fs.join(maps_path, dust_filename)
# -----------------------------------------------------------------
header_path = fs.join(maps_path, "header.txt")
# -----------------------------------------------------------------
class M81TestData(object):
"""
This class ...
"""
def __init__(self):
"""
This function ...
"""
# -----------------------------------------------------------------
@lazyproperty
def properties(self):
"""
This function ...
:return:
"""
# Load
return GalaxyProperties.from_file(properties_path)
# -----------------------------------------------------------------
@lazyproperty
def disk2d_model(self):
"""
This function ....
:return:
"""
return load_2d_model(disk2d_path)
# -----------------------------------------------------------------
@lazyproperty
def bulge2d_model(self):
"""
This function ...
:return:
"""
return load_2d_model(bulge2d_path)
# -----------------------------------------------------------------
@lazyproperty
def bulge_fluxdensity(self):
"""
This function ...
:return:
"""
# Get the flux density of the bulge
return self.bulge2d_model.fluxdensity
# -----------------------------------------------------------------
@lazyproperty
def old_scale_height(self):
"""
This function ...
:return:
"""
# Get the scale height
return self.disk2d_model.scalelength / 8.26 # De Geyter et al. 2014
# -----------------------------------------------------------------
@lazyproperty
def old_fluxdensity(self):
"""
This function ...
:return:
"""
# Get the 3.6 micron flux density with the bulge subtracted
total_i1_fluxdensity = PhotometricQuantity(10.6552814592, "Jy")
return total_i1_fluxdensity - self.bulge_fluxdensity
# -----------------------------------------------------------------
@lazyproperty
def dust_scale_height(self):
"""
This function ...
:return:
"""
# scale_height = 260.5 * Unit("pc") # first models
# dust_scale_height = 200. * u("pc") # M51
return 0.25 * self.old_scale_height
# -----------------------------------------------------------------
@lazyproperty
def young_scale_height(self):
"""
This function ...
:return:
"""
# Get the scale height
# scale_height = 150 * Unit("pc") # first models
# young_scale_height = 100. * u("pc") # M51
return 0.5 * self.old_scale_height
# -----------------------------------------------------------------
@lazyproperty
def ionizing_scale_height(self):
"""
This function ...
:return:
"""
# Get the scale height
# scale_height = 150 * Unit("pc") # first models
# ionizing_scale_height = 100. * u("pc") # M51
return 0.25 * self.old_scale_height
# -----------------------------------------------------------------
@lazyproperty
def bulge(self):
"""
This function ...
:return:
"""
bulge = load_3d_model(bulge_path)
# No y flattening: this is a mistake in the file
bulge.y_flattening = 1.
return bulge
# -----------------------------------------------------------------
@lazyproperty
def disk(self):
"""
This function ...
:return:
"""
return load_3d_model(disk_path)
# -----------------------------------------------------------------
@lazyproperty
def header(self):
"""
This function ...
:return:
"""
# Determine the path to the header file
return Header.fromtextfile(header_path)
# -----------------------------------------------------------------
@lazyproperty
def wcs(self):
"""
Thisfunction ...
:return:
"""
return CoordinateSystem(header=self.header)
# -----------------------------------------------------------------
@lazyproperty
def old_stars_map(self):
"""
This function ...
:return:
"""
# Old stars
old_map = Frame.from_file(old_map_path)
old_map.wcs = self.wcs
return old_map
# -----------------------------------------------------------------
@lazyproperty
def young_stars_map(self):
"""
This function ...
:return:
"""
# young stars
young_map = Frame.from_file(young_map_path)
young_map.wcs = self.wcs
return young_map
# -----------------------------------------------------------------
@lazyproperty
def ionizing_stars_map(self):
"""
This function ....
:return:
"""
# Ionizing stars
ionizing_map = Frame.from_file(ionizing_map_path)
ionizing_map.wcs = self.wcs
return ionizing_map
# -----------------------------------------------------------------
@lazyproperty
def dust_map(self):
"""
This function ...
:return:
"""
# Dust
dust_map = Frame.from_file(dust_map_path)
dust_map.wcs = self.wcs
return dust_map
# -----------------------------------------------------------------
@lazyproperty
def old_deprojection(self):
"""
This function ...
:return:
"""
return DeprojectionModel3D.from_wcs(self.wcs, self.properties.center, self.properties.distance,
self.properties.position_angle, self.properties.inclination,
old_map_path,
self.old_scale_height)
# -----------------------------------------------------------------
@lazyproperty
def young_deprojection(self):
"""
This function
:return:
"""
return DeprojectionModel3D.from_wcs(self.wcs, self.properties.center, self.properties.distance,
self.properties.position_angle, self.properties.inclination,
young_map_path, self.young_scale_height)
# -----------------------------------------------------------------
@lazyproperty
def ionizing_deprojection(self):
"""
This function ...
:return:
"""
return DeprojectionModel3D.from_wcs(self.wcs, self.properties.center, self.properties.distance,
self.properties.position_angle, self.properties.inclination,
ionizing_map_path, self.ionizing_scale_height)
# -----------------------------------------------------------------
@lazyproperty
def dust_deprojection(self):
"""
This function ...
:return:
"""
return DeprojectionModel3D.from_wcs(self.wcs, self.properties.center, self.properties.distance,
self.properties.position_angle, self.properties.inclination,
dust_map_path, self.dust_scale_height)
# -----------------------------------------------------------------
@lazyproperty
def components(self):
"""
This function ...
:return:
"""
# RETURN
components = {"disk": self.disk, "bulge": self.bulge, "old": self.old_deprojection, "ionizing": self.ionizing_deprojection,
"young": self.young_deprojection, "dust": self.dust_deprojection}
return components
# -----------------------------------------------------------------
def show_components(self):
"""
This function ...
:return:
"""
old_components = {"disk": self.components["disk"], "bulge": self.components["bulge"]}
# Determine the filepath for rendering
filename = "render.html"
filepath = fs.join(introspection.pts_temp_dir, filename)
# Plot, create HTML
box = plot_galaxy_components(old_components, draw=True, show=False)
embed_html(filepath, box)
# Open HTML
fs.open_file(filepath)
# -----------------------------------------------------------------
def render_components_html(self, components, output_path, plot_kwargs=None, render_kwargs=None):
"""
This function ...
:param components:
:param output_path:
:param plot_kwargs:
:param render_kwargs:
:return:
"""
if plot_kwargs is None: plot_kwargs = {}
if render_kwargs is None: render_kwargs = {}
title = "Bulge and disk"
box = plot_galaxy_components(components, draw=True, show=False, **plot_kwargs)
return generate_html(box, title, output_path, **render_kwargs)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
modeling/tests/data.py
|
Python
|
agpl-3.0
| 16,247
|
[
"Galaxy"
] |
6c1a2949b46a2d2d6d47c90dea33894469552c451caeab32efd9940858dbd4c7
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mpp.models import SQLTestCase
'''
Sql test for appendonly checksum feature
'''
class AOCOAlterColumnTestCase(SQLTestCase):
'''
@gucs gp_create_table_random_default_distribution=off
@optimizer_mode off
@tags ORCA
@product_version gpdb: [4.3.4.0-]
'''
sql_dir = 'sql/'
ans_dir = 'expected/'
out_dir = 'output/'
|
CraigHarris/gpdb
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/access_methods/appendonly_checksum/test_appendonly_checksum_sql.py
|
Python
|
apache-2.0
| 1,028
|
[
"ORCA"
] |
d028d1161e99906b462320553db3c7397c556d5fca68074926d9d5364b6e15ac
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBamsignals(RPackage):
"""Extract read count signals from bam files
This package allows to efficiently obtain count vectors from indexed bam
files. It counts the number of reads in given genomic ranges and it
computes reads profiles and coverage profiles. It also handles paired-
end data."""
homepage = "https://bioconductor.org/packages/bamsignals"
git = "https://git.bioconductor.org/packages/bamsignals.git"
version('1.22.0', commit='5f533969c84212406bcb3ebf725ebb6d77e9947a')
version('1.16.0', commit='dba9a4ae1613d2700f122ade1e9b90ca8fce5657')
version('1.14.0', commit='3107d3a35830e879eeddf127a81016ea1ca9b53d')
version('1.12.1', commit='06b6282df377cf9db58e8016be4ac8ddcc960939')
version('1.10.0', commit='7499312ce71e8680680eda10b49d7dff682fc776')
version('1.8.0', commit='b123b83e8e026c9ec91209d4498aff3e95a5de23')
depends_on('r@3.2.0:', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-rcpp@0.10.6:', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r-rhtslib', type=('build', 'run'))
depends_on('r-rhtslib@1.12.1:', when='@1.12.1:', type=('build', 'run'))
depends_on('r-rhtslib@1.13.1:', when='@1.14.0:', type=('build', 'run'))
depends_on('gmake', type='build')
# this is no listed but is needed
depends_on('curl')
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-bamsignals/package.py
|
Python
|
lgpl-2.1
| 1,749
|
[
"Bioconductor"
] |
db854fc1ffa9eadc8eb435dc1d28cd7a05d0b05ad78d17d851c08be963f07849
|
# Library imports
import numpy as np
# Package imports
from Model import Model
class Gaussian_Model(Model):
"""
Signal disruption model based on the log-distance path loss model
where environmental noise is modeled using a Gaussian function.
"""
def __init__(self, arguments):
"""
Initialize the Gaussian model.
"""
super(Gaussian_Model, self).__init__(arguments)
self._sigma = self._settings.get("sigma")
@property
def type(self):
"""
Get the type of the signal disruption model.
The type is equal to the name of the settings group.
"""
return "reconstruction_gaussian_model"
def assign(self, length, source_distances, destination_distances):
"""
Assign weights to all pixels on the grid for a given link.
Provided are the `length` of the link, calculated using the Pythagorean
theorem, and the `source_distances` and `destination_distances`, both
NumPy arrays containing the distances from, respectively, the source
and destination sensor locations to each center of a pixel on the grid.
"""
return self._gaussian((source_distances + destination_distances) - length)
def _gaussian(self, x):
"""
Get the value for a given `x` coordinate of a Gaussian function
with alpha value 1 and mu value 0.
"""
return np.exp(-((x ** 2) / (2 * (self._sigma ** 2))))
|
timvandermeij/mobile-radio-tomography
|
reconstruction/Gaussian_Model.py
|
Python
|
gpl-3.0
| 1,487
|
[
"Gaussian"
] |
f45a1dd576cc16569770801c3812efd213a08e368193aaf4d8d231ff8d840b6a
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: dpkg_selections
short_description: Dpkg package selection selections
description:
- Change dpkg package selection state via --get-selections and --set-selections.
version_added: "2.0"
author: Brian Brazil <brian.brazil@boxever.com>
options:
name:
description:
- Name of the package
required: true
selection:
description:
- The selection state to set the package to.
choices: [ 'install', 'hold', 'deinstall', 'purge' ]
required: true
notes:
- This module won't cause any packages to be installed/removed/purged, use the C(apt) module for that.
'''
EXAMPLES = '''
# Prevent python from being upgraded.
- dpkg_selections:
name: python
selection: hold
'''
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
selection = dict(choices=['install', 'hold', 'deinstall', 'purge'])
),
supports_check_mode=True,
)
dpkg = module.get_bin_path('dpkg', True)
name = module.params['name']
selection = module.params['selection']
# Get current settings.
rc, out, err = module.run_command([dpkg, '--get-selections', name], check_rc=True)
if not out:
current = 'not present'
else:
current = out.split()[1]
changed = current != selection
if module.check_mode or not changed:
module.exit_json(changed=changed, before=current, after=selection)
module.run_command([dpkg, '--set-selections'], data="%s %s" % (name, selection), check_rc=True)
module.exit_json(changed=changed, before=current, after=selection)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
camradal/ansible
|
lib/ansible/modules/packaging/os/dpkg_selections.py
|
Python
|
gpl-3.0
| 2,580
|
[
"Brian"
] |
70cf3d365ab873f8dc31be0e1e61a41b33a687bbd8a7ff81b4037d21dc250dc6
|
"""
"""
from vtk import vtkExtractUnstructuredGrid, vtkGeometryFilter, vtkAppendFilter, vtkPolyData, vtkCleanPolyData
from pcloudpy.core.filters.base import FilterBase
class ExtractPolyData(FilterBase):
"""
Extent Extraction from a Point Cloud
Parameters
----------
extent: tuple, shape=6,
Extent = (xmin, xmax, ymin, ymax, zmin, zmax)
"""
def __init__(self, extent):
super(ExtractPolyData, self).__init__()
self.extent = extent
def set_input(self, input_data):
if isinstance(input_data, vtkPolyData):
super(ExtractPolyData, self).set_input(input_data)
return True
else:
return False
def update(self):
"""
"""
appendFilter = vtkAppendFilter()
appendFilter.AddInput(self.input_)
appendFilter.Update()
extractGrid = vtkExtractUnstructuredGrid()
extractGrid.SetInputData(appendFilter.GetOutput())
extractGrid.SetExtent(self.extent[0], self.extent[1], self.extent[2], self.extent[3], self.extent[4], self.extent[5])
geom = vtkGeometryFilter()
geom.SetInputConnection(extractGrid.GetOutputPort() )
geom.Update()
clean = vtkCleanPolyData()
clean.PointMergingOn()
clean.SetTolerance(0.01)
clean.SetInput(geom.GetOutput())
clean.Update()
self.output_ = clean.GetOutput()
|
mmolero/pcloudpy
|
pcloudpy/core/filters/ExtractPolyData.py
|
Python
|
bsd-3-clause
| 1,428
|
[
"VTK"
] |
a7b2b73228dfdb0e93a7f022fff21792ed5d0446bcda7e05b14460f83f063910
|
# coding: utf8
__version__ = "edofx2csv.py v0.1 - mai 2010"
import sys
from inouk.edofx import OFXParser
from optparse import OptionParser
class StatementTransaction(object):
'''
Used to store a statement transaction
'''
def __init__( self, fitid='', trntype='', dtposted='', trnamt='', name='', memo='' ):
self.fitid = fitid
self.type = trntype
self.date = dtposted
self.amount = trnamt
self.name = name
self.memo = memo
class Statement(object):
'''
Stores one statement
'''
def __init__(self, type=''):
self.type = type
self.currency = ''
self.bank_id = ''
self.branch_id = ''
self.account_id = ''
self.start_date = ''
self.end_date = ''
self.transaction_list = []
self.balance = 0
self.balance_date = ''
def export_as_csv(self, separator=';', header_line = True):
tmp = ""
if header_line:
tmp += 'ACCOUNT_TYPE%s' % ( separator )
tmp += 'BANK_ID%s' % ( separator )
tmp += 'BRANCH_ID%s' % ( separator )
tmp += 'ACCOUNT_ID%s' % ( separator )
tmp += 'FITID%s' % ( separator )
tmp += 'TRANSACTION_TYPE%s' % (separator)
tmp += 'TRANSACTION_DATE%s' % (separator)
tmp += 'TRANSACTION_AMOUNT%s' % ( separator )
tmp += 'TRANSACTION_CREDIT%s' % ( separator )
tmp += 'TRANSACTION_DEBIT%s' % ( separator )
tmp += 'TRANSACTION_NAME%s' % ( separator )
tmp += 'TRANSACTION_MEMO'
tmp += '\n'
for e in self.transaction_list:
tmp += '"%s"%s' % (self.type, separator)
tmp += '"%s"%s' % (self.bank_id, separator)
tmp += '"%s"%s' % (self.branch_id, separator)
tmp += '"%s"%s' % (self.account_id, separator)
tmp += '"%s"%s' % (e.fitid, separator)
tmp += '"%s"%s' % (e.type, separator)
tmp += '"%s"%s' % (e.date.strftime("%d/%m/%Y"), separator)
tmp += str(e.amount).replace('.',',') + separator
tmp += ( str(e.amount) if e.amount > 0 else '' ) + separator
tmp += ( str(e.amount) if e.amount < 0 else '' ) + separator
tmp += '"%s"%s' % (e.name, separator)
tmp += '"%s"' % (e.memo)
tmp += '\n'
return tmp
# Global OFX file structure is (without self closing tags) :
# OFX
# SIGNONMSGSRSV1
# BANKMSGSRSV1
# STMTTRNRS : On per bank account
# TRNUID : = Account number for Credit Agricole SRA French Bank
# STATUS
# STMTRS
# CURDEF
# BANKACCTFROM : account description
# BANKTRANLIST
# DTSTART
# DTEND
# STMTRN* : One per statement
# LEDGERBAL
# AVAILBAL
# CREDITCARDMSGSRSV1
# CCSTMTTRNRS : On by card
# TRNUID : = Account number for CA
# STATUS
# CCSTMTRS
# CURDEF
# CCACCTFROM : credit card description
# BANKTRANLIST
# DTSTART
# DTEND
# STMTRN* : One per statement
# LEDGERBAL
# AVAILBAL
#
#
def build_Statement_tree(OFX):
'''
visit OFXNode tree to build a Dedicated Object Tree
'''
statement_list = []
if OFX.BANKMSGSRSV1 :
# For each account statement...
for aSTMTTRNRS in OFX.BANKMSGSRSV1.STMTTRNRS:
stmt = Statement('CHECKING')
stmt.currency = aSTMTTRNRS.STMTRS.CURDEF.val
stmt.bank_id = aSTMTTRNRS.STMTRS.BANKACCTFROM.BANKID.val
stmt.branch_id = aSTMTTRNRS.STMTRS.BANKACCTFROM.BRANCHID.val
stmt.account_id = aSTMTTRNRS.STMTRS.BANKACCTFROM.ACCTID.val
stmt.start_date = aSTMTTRNRS.STMTRS.BANKTRANLIST.DTSTART.val
stmt.end_date = aSTMTTRNRS.STMTRS.BANKTRANLIST.DTEND.val
# for each transaction in statement
for s in aSTMTTRNRS.STMTRS.BANKTRANLIST.STMTTRN:
st = StatementTransaction()
st.fitid = s.FITID.val
st.type = s.TRNTYPE.val
st.date = s.DTPOSTED.val
st.amount = s.TRNAMT.val
st.name = s.NAME.val
st.memo = s.MEMO.val
stmt.transaction_list.append(st)
stmt.balance = aSTMTTRNRS.STMTRS.LEDGERBAL.BALAMT.val
stmt.balance_date = aSTMTTRNRS.STMTRS.LEDGERBAL.DTASOF.val
statement_list.append(stmt)
if OFX.CREDITCARDMSGSRSV1:
# For each credit card statement...
for aCCSTMTTRNRS in OFX.CREDITCARDMSGSRSV1.CCSTMTTRNRS:
stmt = Statement('CREDIT_CARD')
stmt.currency = aCCSTMTTRNRS.CCSTMTRS.CURDEF.val
stmt.bank_id = ''
stmt.branch_id = ''
stmt.account_id = aCCSTMTTRNRS.CCSTMTRS.CCACCTFROM.ACCTID.val
stmt.start_date = aCCSTMTTRNRS.CCSTMTRS.BANKTRANLIST.DTSTART.val
stmt.end_date = aCCSTMTTRNRS.CCSTMTRS.BANKTRANLIST.DTEND.val
for s in aCCSTMTTRNRS.CCSTMTRS.BANKTRANLIST.STMTTRN:
st = StatementTransaction()
st.fitid = s.FITID.val
st.type = s.TRNTYPE.val
st.date = s.DTPOSTED.val
st.amount = s.TRNAMT.val
st.name = s.NAME.val
st.memo = s.MEMO.val
stmt.transaction_list.append(st)
stmt.balance = aCCSTMTTRNRS.CCSTMTRS.LEDGERBAL.BALAMT.val
stmt.balance_date = aCCSTMTTRNRS.CCSTMTRS.LEDGERBAL.DTASOF.val
statement_list.append(stmt)
return statement_list
def main(source_file):
"""
Takes a multi-account OFX file as input and
output one OFX file per account.
"""
f = open(source_file)
p = OFXParser(f.read())
o = p.parse()
statement_list = build_Statement_tree(o)
for stmt in statement_list:
file_name = 'releve_compte_'+stmt.account_id.strip()+'_du_'+str(stmt.start_date.strftime("%d-%m-%Y"))+'_au_'+str(stmt.end_date.strftime("%d-%m-%Y"))+'.csv'
f = open(file_name,'w')
f.write(stmt.export_as_csv())
f.close()
if __name__ == '__main__':
usage = "usage: %prog ofx_file"
parser = OptionParser(usage, version=__version__)
(options, args) = parser.parse_args()
if not args or len(args) > 1 :
print "18ducks - OFX to CSV converter"
print
print " use ./edofx2csv.py -h or --help for usage instructions."
print
sys.exit(0)
ret = main( args[0] )
sys.exit(ret)
|
cmorisse/inouk.edofx
|
tests/edofx2csv.py
|
Python
|
mit
| 6,985
|
[
"VisIt"
] |
f26bfb03d9cccdb80df4867d65137d4dbf2a3264607ee2c2ddf4fc0842624fe5
|
##
# Copyright 2009-2018 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing ALADIN, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import shutil
import sys
import tempfile
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import apply_regex_substitutions, mkdir
from easybuild.tools.modules import get_software_root, get_software_libdir
from easybuild.tools.ordereddict import OrderedDict
from easybuild.tools.run import run_cmd, run_cmd_qa
class EB_ALADIN(EasyBlock):
"""Support for building/installing ALADIN."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for ALADIN."""
super(EB_ALADIN, self).__init__(*args, **kwargs)
self.conf_file = None
self.conf_filepath = None
self.rootpack_dir = 'UNKNOWN'
self.orig_library_path = None
@staticmethod
def extra_options():
"""Custom easyconfig parameters for ALADIN."""
extra_vars = {
'optional_extra_param': ['default value', "short description", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Custom configuration procedure for ALADIN."""
# unset $LIBRARY_PATH set by modules of dependencies, because it may screw up linking
if 'LIBRARY_PATH' in os.environ:
self.log.debug("Unsetting $LIBRARY_PATH (was: %s)" % os.environ['LIBRARY_PATH'])
self.orig_library_path = os.environ.pop('LIBRARY_PATH')
# build auxiliary libraries
auxlibs_dir = None
my_gnu = None
if self.toolchain.comp_family() == toolchain.GCC:
my_gnu = 'y' # gfortran
for var in ['CFLAGS', 'CXXFLAGS', 'F90FLAGS', 'FFLAGS']:
flags = os.getenv(var)
env.setvar(var, "%s -fdefault-real-8 -fdefault-double-8" % flags)
self.log.info("Updated %s to '%s'" % (var, os.getenv(var)))
elif self.toolchain.comp_family() == toolchain.INTELCOMP:
my_gnu = 'i' # icc/ifort
else:
raise EasyBuildError("Don't know how to set 'my_gnu' variable in auxlibs build script.")
self.log.info("my_gnu set to '%s'" % my_gnu)
tmp_installroot = tempfile.mkdtemp(prefix='aladin_auxlibs_')
try:
cwd = os.getcwd()
os.chdir(self.builddir)
builddirs = os.listdir(self.builddir)
auxlibs_dir = [x for x in builddirs if x.startswith('auxlibs_installer')][0]
os.chdir(auxlibs_dir)
auto_driver = 'driver_automatic'
for line in fileinput.input(auto_driver, inplace=1, backup='.orig.eb'):
line = re.sub(r"^(my_gnu\s*=\s*).*$", r"\1%s" % my_gnu, line)
line = re.sub(r"^(my_r32\s*=\s*).*$", r"\1n", line) # always 64-bit real precision
line = re.sub(r"^(my_readonly\s*=\s*).*$", r"\1y", line) # make libs read-only after build
line = re.sub(r"^(my_installroot\s*=\s*).*$", r"\1%s" % tmp_installroot, line)
sys.stdout.write(line)
run_cmd("./%s" % auto_driver)
os.chdir(cwd)
except OSError, err:
raise EasyBuildError("Failed to build ALADIN: %s", err)
# build gmkpack, update PATH and set GMKROOT
# we build gmkpack here because a config file is generated in the gmkpack isntall path
try:
gmkpack_dir = [x for x in builddirs if x.startswith('gmkpack')][0]
os.chdir(os.path.join(self.builddir, gmkpack_dir))
qa = {
'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'n',
}
run_cmd_qa("./build_gmkpack", qa)
os.chdir(cwd)
paths = os.getenv('PATH').split(':')
paths.append(os.path.join(self.builddir, gmkpack_dir, 'util'))
env.setvar('PATH', ':'.join(paths))
env.setvar('GMKROOT', os.path.join(self.builddir, gmkpack_dir))
except OSError, err:
raise EasyBuildError("Failed to build gmkpack: %s", err)
# generate gmkpack configuration file
self.conf_file = 'ALADIN_%s' % self.version
self.conf_filepath = os.path.join(self.builddir, 'gmkpack_support', 'arch', '%s.x' % self.conf_file)
try:
if os.path.exists(self.conf_filepath):
os.remove(self.conf_filepath)
self.log.info("Removed existing gmpack config file %s" % self.conf_filepath)
archdir = os.path.dirname(self.conf_filepath)
if not os.path.exists(archdir):
mkdir(archdir, parents=True)
except OSError, err:
raise EasyBuildError("Failed to remove existing file %s: %s", self.conf_filepath, err)
mpich = 'n'
known_mpi_libs = [toolchain.MPICH, toolchain.MPICH2, toolchain.INTELMPI]
if self.toolchain.options.get('usempi', None) and self.toolchain.mpi_family() in known_mpi_libs:
mpich = 'y'
qpref = 'Please type the ABSOLUTE name of '
qsuff = ', or ignore (environment variables allowed) :'
qsuff2 = ', or ignore : (environment variables allowed) :'
comp_fam = self.toolchain.comp_family()
if comp_fam == toolchain.GCC:
gribdir = 'GNU'
elif comp_fam == toolchain.INTELCOMP:
gribdir = 'INTEL'
else:
raise EasyBuildError("Don't know which grib lib dir to use for compiler %s", comp_fam)
aux_lib_gribex = os.path.join(tmp_installroot, gribdir, 'lib', 'libgribex.a')
aux_lib_ibm = os.path.join(tmp_installroot, gribdir, 'lib', 'libibmdummy.a')
grib_api_lib = os.path.join(get_software_root('grib_api'), 'lib', 'libgrib_api.a')
grib_api_f90_lib = os.path.join(get_software_root('grib_api'), 'lib', 'libgrib_api_f90.a')
grib_api_inc = os.path.join(get_software_root('grib_api'), 'include')
jasperlib = os.path.join(get_software_root('JasPer'), 'lib', 'libjasper.a')
mpilib = os.path.join(os.getenv('MPI_LIB_DIR'), os.getenv('MPI_LIB_SHARED'))
# netCDF
netcdf = get_software_root('netCDF')
netcdf_fortran = get_software_root('netCDF-Fortran')
if netcdf:
netcdfinc = os.path.join(netcdf, 'include')
if netcdf_fortran:
netcdflib = os.path.join(netcdf_fortran, get_software_libdir('netCDF-Fortran'), 'libnetcdff.a')
else:
netcdflib = os.path.join(netcdf, get_software_libdir('netCDF'), 'libnetcdff.a')
if not os.path.exists(netcdflib):
raise EasyBuildError("%s does not exist", netcdflib)
else:
raise EasyBuildError("netCDF(-Fortran) not available")
ldpaths = [ldflag[2:] for ldflag in os.getenv('LDFLAGS').split(' ')] # LDFLAGS have form '-L/path/to'
lapacklibs = []
for lib in os.getenv('LAPACK_STATIC_LIBS').split(','):
libpaths = [os.path.join(ldpath, lib) for ldpath in ldpaths]
lapacklibs.append([libpath for libpath in libpaths if os.path.exists(libpath)][0])
lapacklib = ' '.join(lapacklibs)
blaslibs = []
for lib in os.getenv('BLAS_STATIC_LIBS').split(','):
libpaths = [os.path.join(ldpath, lib) for ldpath in ldpaths]
blaslibs.append([libpath for libpath in libpaths if os.path.exists(libpath)][0])
blaslib = ' '.join(blaslibs)
qa = {
'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'y',
'Do you want to setup your configuration file for MPICH (y/n) [n] ?': mpich,
'Please type the directory name where to find a dummy file mpif.h or ignore :': os.getenv('MPI_INC_DIR'),
'%sthe library gribex or emos%s' % (qpref, qsuff2): aux_lib_gribex,
'%sthe library ibm%s' % (qpref, qsuff): aux_lib_ibm,
'%sthe library grib_api%s' % (qpref, qsuff): grib_api_lib,
'%sthe library grib_api_f90%s' % (qpref, qsuff): grib_api_f90_lib,
'%sthe JPEG auxilary library if enabled by Grib_api%s' % (qpref, qsuff2): jasperlib,
'%sthe library netcdf%s' % (qpref, qsuff): netcdflib,
'%sthe library lapack%s' % (qpref, qsuff): lapacklib,
'%sthe library blas%s' % (qpref, qsuff): blaslib,
'%sthe library mpi%s' % (qpref, qsuff): mpilib,
'%sa MPI dummy library for serial executions, or ignore :' % qpref: '',
'Please type the directory name where to find grib_api headers, or ignore :': grib_api_inc,
'Please type the directory name where to find fortint.h or ignore :': '',
'Please type the directory name where to find netcdf headers, or ignore :': netcdfinc,
'Do you want to define CANARI (y/n) [y] ?': 'y',
'Please type the name of the script file used to generate a preprocessed blacklist file, or ignore :': '',
'Please type the name of the script file used to recover local libraries (gget), or ignore :': '',
'Please type the options to tune the gnu compilers, or ignore :': os.getenv('F90FLAGS'),
}
f90_seq = os.getenv('F90_SEQ')
if not f90_seq:
# F90_SEQ is only defined when usempi is enabled
f90_seq = os.getenv('F90')
stdqa = OrderedDict([
(r'Confirm library .* is .*', 'y'), # this one needs to be tried first!
(r'.*fortran 90 compiler name .*\s*:\n\(suggestions\s*: .*\)', f90_seq),
(r'.*fortran 90 compiler interfaced with .*\s*:\n\(suggestions\s*: .*\)', f90_seq),
(r'Please type the ABSOLUTE name of .*library.*, or ignore\s*[:]*\s*[\n]*.*', ''),
(r'Please .* to save this draft configuration file :\n.*', '%s.x' % self.conf_file),
])
no_qa = [
".*ignored.",
]
env.setvar('GMKTMP', self.builddir)
env.setvar('GMKFILE', self.conf_file)
run_cmd_qa("gmkfilemaker", qa, std_qa=stdqa, no_qa=no_qa)
# set environment variables for installation dirs
env.setvar('ROOTPACK', os.path.join(self.installdir, 'rootpack'))
env.setvar('ROOTBIN', os.path.join(self.installdir, 'rootpack'))
env.setvar('HOMEPACK', os.path.join(self.installdir, 'pack'))
env.setvar('HOMEBIN', os.path.join(self.installdir, 'pack'))
# patch config file to include right Fortran compiler flags
regex_subs = [(r"^(FRTFLAGS\s*=.*)$", r"\1 %s" % os.getenv('FFLAGS'))]
apply_regex_substitutions(self.conf_filepath, regex_subs)
def build_step(self):
"""No separate build procedure for ALADIN (see install_step)."""
pass
def test_step(self):
"""Custom built-in test procedure for ALADIN."""
if self.cfg['runtest']:
cmd = "test-command"
run_cmd(cmd, simple=True, log_all=True, log_output=True)
def install_step(self):
"""Custom install procedure for ALADIN."""
try:
mkdir(os.getenv('ROOTPACK'), parents=True)
mkdir(os.getenv('HOMEPACK'), parents=True)
except OSError, err:
raise EasyBuildError("Failed to create rootpack dir in %s: %s", err)
# create rootpack
[v1, v2] = self.version.split('_')
(out, _) = run_cmd("source $GMKROOT/util/berootpack && gmkpack -p master -a -r %s -b %s" % (v1, v2), simple=False)
packdir_regexp = re.compile("Creating main pack (.*) \.\.\.")
res = packdir_regexp.search(out)
if res:
self.rootpack_dir = os.path.join('rootpack', res.group(1))
else:
raise EasyBuildError("Failed to determine rootpack dir.")
# copy ALADIN sources to right directory
try:
src_dirs = [d for d in os.listdir(self.builddir) if not (d.startswith('auxlib') or d.startswith('gmk'))]
target = os.path.join(self.installdir, self.rootpack_dir, 'src', 'local')
self.log.info("Copying sources from %s to %s" % (self.builddir, target))
for srcdir in src_dirs:
shutil.copytree(os.path.join(self.builddir, srcdir), os.path.join(target, srcdir))
self.log.info("Copied %s" % srcdir)
except OSError, err:
raise EasyBuildError("Failed to copy ALADIN sources: %s", err)
if self.cfg['parallel']:
env.setvar('GMK_THREADS', str(self.cfg['parallel']))
# build rootpack
run_cmd(os.path.join(self.installdir, self.rootpack_dir, 'ics_master'))
# restore original $LIBRARY_PATH
if self.orig_library_path is not None:
os.environ['LIBRARY_PATH'] = self.orig_library_path
def sanity_check_step(self):
"""Custom sanity check for ALADIN."""
bindir = os.path.join(self.rootpack_dir, 'bin')
libdir = os.path.join(self.rootpack_dir, 'lib')
custom_paths = {
'files': [os.path.join(bindir, x) for x in ['MASTER']] +
[os.path.join(libdir, 'lib%s.local.a' % x) for x in ['aeo', 'ald', 'arp', 'bip',
'bla', 'mpa', 'mse', 'obt',
'odb', 'sat', 'scr', 'sct',
'sur', 'surfex', 'tal', 'tfl',
'uti', 'xla', 'xrd']],
'dirs': [],
}
super(EB_ALADIN, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom guesses for environment variables (PATH, ...) for ALADIN."""
guesses = super(EB_ALADIN, self).make_module_req_guess()
guesses.update({
'PATH': [os.path.join(self.rootpack_dir, 'bin')],
})
return guesses
|
bartoldeman/easybuild-easyblocks
|
easybuild/easyblocks/a/aladin.py
|
Python
|
gpl-2.0
| 15,308
|
[
"NetCDF"
] |
419bea1d15b3fbaead176a5f97c8624dda306dae20cc968118b4ddf7b039f899
|
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Q
from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from djblets.auth.signals import user_registered
from djblets.cache.backend import cache_memoize
from djblets.db.fields import CounterField, JSONField
from djblets.forms.fields import TIMEZONE_CHOICES
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.accounts.managers import (ProfileManager,
ReviewRequestVisitManager,
TrophyManager)
from reviewboard.accounts.trophies import trophies_registry
from reviewboard.admin.read_only import is_site_read_only_for
from reviewboard.avatars import avatar_services
from reviewboard.reviews.models import Group, ReviewRequest
from reviewboard.reviews.signals import (reply_published,
review_published,
review_request_published)
from reviewboard.site.models import LocalSite
from reviewboard.site.signals import local_site_user_added
@python_2_unicode_compatible
class ReviewRequestVisit(models.Model):
"""
A recording of the last time a review request was visited by a user.
Users have one ReviewRequestVisit entry in the database per review
request they've visited. This is used to keep track of any updates
to review requests they've already seen, so that we can intelligently
inform them that new discussions have taken place.
"""
VISIBLE = 'V'
ARCHIVED = 'A'
MUTED = 'M'
VISIBILITY = (
(VISIBLE, 'Visible'),
(ARCHIVED, 'Archived'),
(MUTED, 'Muted'),
)
user = models.ForeignKey(User, related_name='review_request_visits')
review_request = models.ForeignKey(ReviewRequest, related_name='visits')
timestamp = models.DateTimeField(_('last visited'), default=timezone.now)
visibility = models.CharField(max_length=1, choices=VISIBILITY,
default=VISIBLE)
# Set this up with a ReviewRequestVisitManager, which inherits from
# ConcurrencyManager to help prevent race conditions.
objects = ReviewRequestVisitManager()
def __str__(self):
"""Return a string used for the admin site listing."""
return 'Review request visit'
class Meta:
db_table = 'accounts_reviewrequestvisit'
unique_together = ('user', 'review_request')
index_together = [('user', 'visibility')]
verbose_name = _('Review Request Visit')
verbose_name_plural = _('Review Request Visits')
@python_2_unicode_compatible
class Profile(models.Model):
"""User profile which contains some basic configurable settings."""
user = models.ForeignKey(User, unique=True)
# This will redirect new users to the account settings page the first time
# they log in (or immediately after creating an account). This allows
# people to fix their real name and join groups.
first_time_setup_done = models.BooleanField(
default=False,
verbose_name=_("first time setup done"),
help_text=_("Indicates whether the user has already gone through "
"the first time setup process by saving their user "
"preferences."))
# Whether the user wants to receive emails
should_send_email = models.BooleanField(
default=True,
verbose_name=_("send email"),
help_text=_("Indicates whether the user wishes to receive emails."))
should_send_own_updates = models.BooleanField(
default=True,
verbose_name=_("receive emails about own actions"),
help_text=_("Indicates whether the user wishes to receive emails "
"about their own activity."))
collapsed_diffs = models.BooleanField(
default=True,
verbose_name=_("collapsed diffs"),
help_text=_("Indicates whether diffs should be shown in their "
"collapsed state by default."))
wordwrapped_diffs = models.BooleanField(
default=True,
help_text=_("This field is unused and will be removed in a future "
"version."))
syntax_highlighting = models.BooleanField(
default=True,
verbose_name=_("syntax highlighting"),
help_text=_("Indicates whether the user wishes to see "
"syntax highlighting in the diffs."))
is_private = models.BooleanField(
default=False,
verbose_name=_("profile private"),
help_text=_("Indicates whether the user wishes to keep his/her "
"profile private."))
open_an_issue = models.BooleanField(
default=True,
verbose_name=_("opens an issue"),
help_text=_("Indicates whether the user wishes to default "
"to opening an issue or not."))
default_use_rich_text = models.NullBooleanField(
default=None,
verbose_name=_('enable Markdown by default'),
help_text=_('Indicates whether new posts or comments should default '
'to being in Markdown format.'))
# Indicate whether closed review requests should appear in the
# review request lists (excluding the dashboard).
show_closed = models.BooleanField(default=True)
sort_review_request_columns = models.CharField(max_length=256, blank=True)
sort_dashboard_columns = models.CharField(max_length=256, blank=True)
sort_submitter_columns = models.CharField(max_length=256, blank=True)
sort_group_columns = models.CharField(max_length=256, blank=True)
review_request_columns = models.CharField(max_length=256, blank=True)
dashboard_columns = models.CharField(max_length=256, blank=True)
submitter_columns = models.CharField(max_length=256, blank=True)
group_columns = models.CharField(max_length=256, blank=True)
# A list of starred review requests. This allows users to monitor a
# review request and receive e-mails on updates without actually being
# on the reviewer list or commenting on the review. This is similar to
# adding yourself to a CC list.
starred_review_requests = models.ManyToManyField(ReviewRequest, blank=True,
related_name="starred_by")
# A list of watched groups. This is so that users can monitor groups
# without actually joining them, preventing e-mails being sent to the
# user and review requests from entering the Incoming Reviews list.
starred_groups = models.ManyToManyField(Group, blank=True,
related_name="starred_by")
# Allows per-user timezone settings
timezone = models.CharField(choices=TIMEZONE_CHOICES, default='UTC',
max_length=30)
settings = JSONField(null=True, default=dict)
extra_data = JSONField(null=True, default=dict)
objects = ProfileManager()
@property
def should_use_rich_text(self):
"""Get whether rich text should be used by default for this user.
If the user has chosen whether or not to use rich text explicitly,
then that choice will be respected. Otherwise, the system default is
used.
"""
if self.default_use_rich_text is None:
siteconfig = SiteConfiguration.objects.get_current()
return siteconfig.get('default_use_rich_text')
else:
return self.default_use_rich_text
@property
def should_enable_desktop_notifications(self):
"""Return whether desktop notifications should be used for this user.
If the user has chosen whether or not to use desktop notifications
explicitly, then that choice will be respected. Otherwise, we
enable desktop notifications by default.
Returns:
bool:
If the user has set whether they wish to recieve desktop
notifications, then use their preference. Otherwise, we return
``True``.
"""
return (not self.settings or
self.settings.get('enable_desktop_notifications', True))
def star_review_request(self, review_request):
"""Mark a review request as starred.
This will mark a review request as starred for this user and
immediately save to the database.
"""
self.starred_review_requests.add(review_request)
if (review_request.public and
review_request.status in (ReviewRequest.PENDING_REVIEW,
ReviewRequest.SUBMITTED)):
site_profile = \
self.user.get_site_profile(review_request.local_site)
site_profile.increment_starred_public_request_count()
def unstar_review_request(self, review_request):
"""Mark a review request as unstarred.
This will mark a review request as starred for this user and
immediately save to the database.
"""
self.starred_review_requests.remove(review_request)
if (review_request.public and
review_request.status in (ReviewRequest.PENDING_REVIEW,
ReviewRequest.SUBMITTED)):
site_profile = \
self.user.get_site_profile(review_request.local_site)
site_profile.decrement_starred_public_request_count()
def star_review_group(self, review_group):
"""Mark a review group as starred.
This will mark a review group as starred for this user and
immediately save to the database.
"""
self.starred_groups.add(review_group)
def unstar_review_group(self, review_group):
"""Mark a review group as unstarred.
This will mark a review group as starred for this user and
immediately save to the database.
"""
self.starred_groups.remove(review_group)
def __str__(self):
"""Return a string used for the admin site listing."""
return self.user.username
@property
def avatar_service(self):
"""The avatar service the user has selected.
Returns:
djblets.avatars.services.base.AvatarService:
The avatar service.
"""
service_id = self.settings.get('avatars', {}).get('avatar_service_id')
return avatar_services.get_or_default(service_id)
@avatar_service.setter
def avatar_service(self, service):
"""Set the avatar service.
Args:
service (djblets.avatars.services.base.AvatarService):
The avatar service.
"""
self.settings.setdefault('avatars', {})['avatar_service_id'] = \
service.avatar_service_id
def get_display_name(self, viewing_user):
"""Return the name to display to the given user.
If any of the following is True and the user this profile belongs to
has a full name set, the display name will be the the user's full name:
* The viewing user is authenticated and this profile is public.
* The viewing user is the user this profile belongs to.
* The viewing user is an administrator.
* The viewing user is a LocalSite administrator on any LocalSite for
which the user whose this profile belongs to is a user.
Otherwise the display name will be the user's username.
Args:
viewing_user (django.contrib.auth.models.User):
The user who is viewing the profile.
Returns:
unicode:
The name to display.
"""
if (viewing_user is not None and
viewing_user.is_authenticated and
(not self.is_private or
viewing_user.pk == self.user_id or
viewing_user.is_admin_for_user(self.user))):
return self.user.get_full_name() or self.user.username
else:
return self.user.username
def save(self, *args, **kwargs):
"""Save the profile to the database.
The profile will only be saved if the user is not affected by read-only
mode.
Args:
*args (tuple):
Positional arguments to pass through to the superclass.
**kwargs (dict):
Keyword arguments to pass through to the superclass.
"""
if not is_site_read_only_for(self.user):
super(Profile, self).save(*args, **kwargs)
class Meta:
db_table = 'accounts_profile'
verbose_name = _('Profile')
verbose_name_plural = _('Profiles')
@python_2_unicode_compatible
class LocalSiteProfile(models.Model):
"""User profile information specific to a LocalSite."""
user = models.ForeignKey(User, related_name='site_profiles')
profile = models.ForeignKey(Profile, related_name='site_profiles')
local_site = models.ForeignKey(LocalSite, null=True, blank=True,
related_name='site_profiles')
# A dictionary of permission that the user has granted. Any permission
# missing is considered to be False.
permissions = JSONField(null=True)
# Counts for quickly knowing how many review requests are incoming
# (both directly and total), outgoing (pending and total ever made),
# and starred (public).
direct_incoming_request_count = CounterField(
_('direct incoming review request count'),
initializer=lambda p: (
ReviewRequest.objects.to_user_directly(
p.user, local_site=p.local_site).count()
if p.user_id else 0))
total_incoming_request_count = CounterField(
_('total incoming review request count'),
initializer=lambda p: (
ReviewRequest.objects.to_user(
p.user, local_site=p.local_site).count()
if p.user_id else 0))
pending_outgoing_request_count = CounterField(
_('pending outgoing review request count'),
initializer=lambda p: (
ReviewRequest.objects.from_user(
p.user, p.user, local_site=p.local_site).count()
if p.user_id else 0))
total_outgoing_request_count = CounterField(
_('total outgoing review request count'),
initializer=lambda p: (
ReviewRequest.objects.from_user(
p.user, p.user, None, local_site=p.local_site).count()
if p.user_id else 0))
starred_public_request_count = CounterField(
_('starred public review request count'),
initializer=lambda p: (
p.profile.starred_review_requests.public(
user=None, local_site=p.local_site).count()
if p.pk else 0))
def __str__(self):
"""Return a string used for the admin site listing."""
return '%s (%s)' % (self.user.username, self.local_site)
class Meta:
db_table = 'accounts_localsiteprofile'
unique_together = (('user', 'local_site'),
('profile', 'local_site'))
verbose_name = _('Local Site Profile')
verbose_name_plural = _('Local Site Profiles')
class Trophy(models.Model):
"""A trophy represents an achievement given to the user.
It is associated with a ReviewRequest and a User and can be associated
with a LocalSite.
"""
category = models.CharField(max_length=100)
received_date = models.DateTimeField(default=timezone.now)
review_request = models.ForeignKey(ReviewRequest, related_name="trophies")
local_site = models.ForeignKey(LocalSite, null=True,
related_name="trophies")
user = models.ForeignKey(User, related_name="trophies")
objects = TrophyManager()
@cached_property
def trophy_type(self):
"""The TrophyType instance for this trophy."""
return trophies_registry.get_for_category(self.category)
def get_display_text(self):
"""Get the display text for this trophy."""
return self.trophy_type.get_display_text(self)
class Meta:
db_table = 'accounts_trophy'
verbose_name = _('Trophy')
verbose_name_plural = _('Trophies')
#
# The following functions are patched onto the User model.
#
def _is_user_profile_visible(self, user=None):
"""Return whether or not the given user can view this user's profile.
Profiles are hidden from unauthenticated users. For authenticated users, a
profile is visible if one of the following is true:
* The profile is not marked as private.
* The viewing user owns the profile.
* The viewing user is a staff member.
* The viewing user is an administrator on a Local Site which the viewed
user is a member.
Args:
user (django.contrib.auth.models.User, optional):
The user for which visibility to the profile is to be determined.
Returns:
bool:
Whether or not the given user can view the profile.
"""
if user is None or user.is_anonymous:
return False
if hasattr(self, 'is_private'):
# This is an optimization used by the web API. It will set
# is_private on this User instance through a query, saving a
# lookup for each instance.
#
# This must be done because select_related() and
# prefetch_related() won't cache reverse foreign key relations.
is_private = self.is_private
else:
is_private = self.get_profile().is_private
return (not is_private or
user == self or
user.is_admin_for_user(self))
def _should_send_email(self):
"""Get whether a user wants to receive emails.
This is patched into the user object to make it easier to deal with missing
Profile objects.
"""
return self.get_profile().should_send_email
def _should_send_own_updates(self):
"""Get whether a user wants to receive emails about their activity.
This is patched into the user object to make it easier to deal with missing
Profile objects.
"""
return self.get_profile().should_send_own_updates
def _get_profile(self, cached_only=False, create_if_missing=True,
return_is_new=False):
"""Return the profile for the User.
The profile will be cached, preventing queries for future lookups.
If a profile doesn't exist in the database, and a cached-only copy
isn't being returned, then a profile will be created in the database.
Version Changed:
3.0.12:
Added support for ``create_if_missing`` and ``return_is_new``
arguments.
Args:
cached_only (bool, optional):
Whether we should only return the profile cached for the user.
If True, this function will not retrieve an uncached profile or
create one that doesn't exist. Instead, it will return ``None``.
create_if_missing (bool, optional):
Whether to create a site profile if one doesn't already exist.
return_is_new (bool, optional);
If ``True``, the result of the call will be a tuple containing
the profile and a boolean indicating if the profile was
newly-created.
Returns:
Profile or tuple.
The user's profile.
If ``return_is_new`` is ``True``, then this will instead return
``(Profile, is_new)``.
Raises:
Profile.DoesNotExist:
The profile did not exist. This can only be raised if passing
``create_if_missing=False``.
"""
# Note that we use the same cache variable that a select_related() call
# would use, ensuring that we benefit from Django's caching when possible.
profile = getattr(self, '_profile_set_cache', None)
is_new = False
if profile is None and not cached_only:
if create_if_missing:
profile, is_new = Profile.objects.get_or_create(user=self)
else:
# This may raise Profile.DoesNotExist.
profile = Profile.objects.get(user=self)
profile.user = self
self._profile_set_cache = profile
# While modern versions of Review Board set this to an empty dictionary,
# old versions would initialize this to None. Since we don't want to litter
# our code with extra None checks everywhere we use it, normalize it here.
if profile is not None and profile.extra_data is None:
profile.extra_data = {}
if return_is_new:
return profile, is_new
return profile
def _get_site_profile(self, local_site, cached_only=False,
create_if_missing=True, return_is_new=False):
"""Return the LocalSiteProfile for a given LocalSite for the User.
The site profile will be cached, preventing queries for future lookups.
If a site profile doesn't exist in the database, and a cached-only copy
isn't being returned, then a profile will be created in the database,
unless passing ``create_if_missing=False``.
Version Changed:
3.0.12:
* In previous versions, this would not create a site profile if one
didn't already exist. Now it does, unless passing
``create_if_missing=False``. This change was made to standardize
behavior between this and :py:meth:`User.get_profile`.
* Added support for ``cached_only``, ``create_if_missing`` and
``return_is_new`` arguments.
Args:
local_site (reviewboard.site.models.LocalSite):
The LocalSite to return a profile for. This is allowed to be
``None``, which means the profile applies to their global site
account.
cached_only (bool, optional):
Whether we should only return the profile cached for the user.
If True, this function will not retrieve an uncached profile or
create one that doesn't exist. Instead, it will return ``None``.
create_if_missing (bool, optional):
Whether to create a site profile if one doesn't already exist.
return_is_new (bool, optional);
If ``True``, the result of the call will be a tuple containing
the profile and a boolean indicating if the profile was
newly-created.
Returns:
LocalSiteProfile or tuple:
The user's LocalSite profile.
If ``return_is_new`` is ``True``, then this will instead return
``(LocalSiteProfile, is_new)``.
Raises:
LocalSiteProfile.DoesNotExist:
The profile did not exist. This can only be raised if passing
``create_if_missing=False``.
"""
if not hasattr(self, '_site_profiles'):
self._site_profiles = {}
if local_site is None:
local_site_id = None
else:
local_site_id = local_site.pk
is_new = False
site_profile = self._site_profiles.get(local_site_id)
if site_profile is None and not cached_only:
profile = self.get_profile()
if create_if_missing:
site_profile, is_new = LocalSiteProfile.objects.get_or_create(
user=self,
profile=profile,
local_site=local_site)
else:
# This may raise LocalSiteProfile.DoesNotExist.
site_profile = LocalSiteProfile.objects.get(
user=self,
profile=profile,
local_site=local_site)
# Set these directly in order to avoid further lookups.
site_profile.user = self
site_profile.profile = profile
site_profile.local_site = local_site
self._site_profiles[local_site_id] = site_profile
if return_is_new:
return site_profile, is_new
return site_profile
def _is_admin_for_user(self, user):
"""Return whether or not this user is an administrator for the given user.
Results will be cached for this user so that at most one query is done.
Args:
user (django.contrib.auth.models.User):
The user to check.
Returns:
bool:
Whether or not this user is an administrator for the given user.
"""
if self.is_staff:
return True
if not user or user.is_anonymous:
return False
if not hasattr(self, '_cached_admin_for_users'):
self._cached_admin_for_users = cache_memoize(
'%s-admin-for-users' % self.pk,
lambda: tuple(
User.objects
.filter(local_site__admins=self)
.values_list('pk', flat=True)
))
return user.pk in self._cached_admin_for_users
User.is_profile_visible = _is_user_profile_visible
User.get_profile = _get_profile
User.get_site_profile = _get_site_profile
User.should_send_email = _should_send_email
User.should_send_own_updates = _should_send_own_updates
User.is_admin_for_user = _is_admin_for_user
User._meta.ordering = ('username',)
@receiver(review_request_published)
def _call_compute_trophies(sender, review_request, **kwargs):
if review_request.public and not review_request.changedescs.exists():
Trophy.objects.compute_trophies(review_request)
@receiver(review_request_published)
def _call_unarchive_all_for_review_request(sender, review_request, **kwargs):
ReviewRequestVisit.objects.unarchive_all(review_request)
@receiver(review_published)
def _call_unarchive_all_for_review(sender, review, **kwargs):
ReviewRequestVisit.objects.unarchive_all(review.review_request_id)
@receiver(reply_published)
def _call_unarchive_all_for_reply(sender, reply, **kwargs):
ReviewRequestVisit.objects.unarchive_all(reply.review_request_id)
@receiver(user_registered)
@receiver(local_site_user_added)
def _add_default_groups(sender, user, local_site=None, **kwargs):
"""Add user to default groups.
When a user is registered, add the user to global default groups.
When a user is added to a LocalSite, add the user to default groups of the
LocalSite.
"""
if local_site:
default_groups = local_site.groups.filter(is_default_group=True)
else:
default_groups = Group.objects.filter(is_default_group=True,
local_site=None)
for default_group in default_groups:
default_group.users.add(user)
@receiver(m2m_changed, sender=Group.users.through)
def _on_group_user_membership_changed(instance, action, pk_set, reverse,
**kwargs):
"""Handler for when a review group's membership has changed.
When a user is added to or removed from a review group, their
:py:attr:`~LocalSiteProfile.total_incoming_request_count` counter will
be cleared, forcing it to be recomputed on next access. This ensures that
their incoming count will be correct when group memberships change.
Args:
instance (django.db.models.Model):
The instance that was updated. If ``reverse`` is ``True``, then
this will be a :py:class:`~django.contrib.auth.models.User`.
Otherwise, it will be ignored.
action (unicode):
The membership change action. The incoming count is only cleared
if this ``post_add``, ``post_remove``, or ``pre_clear``.
pk_set (set of int):
The user IDs added to the group. If ``reverse`` is ``True``,
then this is ignored in favor of ``instance``.
reverse (bool):
Whether this signal is emitted when adding through the forward
relation (``True`` -- :py:attr:`Group.users
<reviewboard.reviews.models.group.Group.users>`) or the reverse
relation (``False`` -- ``User.review_groups``).
**kwargs (dict):
Additional keyword arguments passed to the signal.
"""
if action in ('post_add', 'post_remove', 'pre_clear'):
q = None
if reverse:
if instance is not None:
q = Q(user=instance)
else:
if pk_set:
q = Q(user__in=pk_set)
if q is not None:
LocalSiteProfile.objects.filter(q).update(
total_incoming_request_count=None)
|
reviewboard/reviewboard
|
reviewboard/accounts/models.py
|
Python
|
mit
| 28,424
|
[
"VisIt"
] |
cce083090650d56970a60ee79f95fab8281a18a6c58ce5bc0f11cb2562789d9c
|
########################################################################
# File : AgentReactor.py
# Author : Adria Casajus
########################################################################
"""
DIRAC class to execute Agents
Agents are the active part any any DIRAC system, they execute in a cyclic
manner looking at the state of the system and reacting to it by taken
appropriated actions
All DIRAC Agents must inherit from the basic class AgentModule
In the most common case, DIRAC Agents are executed using the dirac-agent command.
dirac-agent accepts a list positional arguments. These arguments have the form:
[DIRAC System Name]/[DIRAC Agent Name]
dirac-agent then:
- produces a instance of AgentReactor
- loads the required modules using the AgentReactor.loadAgentModules method
- starts the execution loop using the AgentReactor.go method
Agent modules must be placed under the Agent directory of a DIRAC System.
DIRAC Systems are called XXXSystem where XXX is the [DIRAC System Name], and
must inherit from the base class AgentModule
"""
import time
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.Base.private.ModuleLoader import ModuleLoader
from DIRAC.Core.Utilities import ThreadScheduler
from DIRAC.Core.Base.AgentModule import AgentModule
class AgentReactor:
"""
Main interface to DIRAC Agents. It allows to :
- define a Agents modules to be executed
- define the number of cycles to execute
- steer the execution
Agents are declared via:
- loadAgentModule(): for a single Agent
- loadAgentModules(): for a list of Agents
The number of cycles to execute for a defined Agent can be set via:
- setAgentModuleCyclesToExecute()
The execution of the Agents is done with:
- runNumCycles(): to execute an additional number of cycles
- go():
During the execution of the cycles, each of the Agents can be signaled to stop
by creating a file named "stop_agent" in its Control Directory.
"""
def __init__(self, baseAgentName):
self.__agentModules = {}
self.__loader = ModuleLoader("Agent", PathFinder.getAgentSection, AgentModule)
self.__tasks = {}
self.__baseAgentName = baseAgentName
self.__scheduler = ThreadScheduler.ThreadScheduler(enableReactorThread=False, minPeriod=30)
self.__alive = True
self.__running = False
def loadAgentModules(self, modulesList, hideExceptions=False):
"""
Load all modules required in moduleList
"""
result = self.__loader.loadModules(modulesList, hideExceptions=hideExceptions)
if not result["OK"]:
return result
self.__agentModules = self.__loader.getModules()
for agentName in self.__agentModules:
agentData = self.__agentModules[agentName]
agentData["running"] = False
try:
instanceObj = agentData["classObj"](agentName, agentData["loadName"], self.__baseAgentName)
result = instanceObj.am_initialize()
if not result["OK"]:
return S_ERROR("Error while calling initialize method of %s: %s" % (agentName, result["Message"]))
agentData["instanceObj"] = instanceObj
except Exception as excp:
if not hideExceptions:
gLogger.exception("Can't load agent %s" % agentName, lException=excp)
return S_ERROR("Can't load agent %s: \n %s" % (agentName, excp))
agentPeriod = instanceObj.am_getPollingTime()
result = self.__scheduler.addPeriodicTask(
agentPeriod, instanceObj.am_go, executions=instanceObj.am_getMaxCycles(), elapsedTime=agentPeriod
)
if not result["OK"]:
return result
taskId = result["Value"]
self.__tasks[result["Value"]] = agentName
agentData["taskId"] = taskId
agentData["running"] = True
if not self.__agentModules:
return S_ERROR("No agent module loaded")
return S_OK()
def runNumCycles(self, agentName=None, numCycles=1):
"""
Run all defined agents a given number of cycles
"""
if agentName:
self.loadAgentModules([agentName])
error = ""
for aName in self.__agentModules:
result = self.setAgentModuleCyclesToExecute(aName, numCycles)
if not result["OK"]:
error = "Failed to set cycles to execute"
gLogger.error("%s:" % error, aName)
break
if error:
return S_ERROR(error)
self.go()
return S_OK()
def __finalize(self):
"""
Execute the finalize method of all Agents
"""
for agentName in self.__agentModules:
try:
self.__agentModules[agentName]["instanceObj"].finalize()
except Exception as excp:
gLogger.exception("Failed to execute finalize for Agent: %s" % agentName, lException=excp)
def go(self):
"""
Main method to control the execution of all configured Agents
"""
if self.__running:
return
self.__running = True
try:
while self.__alive:
self.__checkControlDir()
timeToNext = self.__scheduler.executeNextTask()
if timeToNext is None:
gLogger.info("No more agent modules to execute. Exiting")
break
time.sleep(min(max(timeToNext, 0.5), 5))
finally:
self.__running = False
self.__finalize()
def setAgentModuleCyclesToExecute(self, agentName, maxCycles=1):
"""
Set number of cycles to execute for a given agent (previously defined)
"""
if agentName not in self.__agentModules:
return S_ERROR("%s has not been loaded" % agentName)
if maxCycles:
try:
maxCycles += self.__agentModules[agentName]["instanceObj"].am_getCyclesDone()
except Exception as excp:
error = "Can not determine number of cycles to execute"
gLogger.exception("%s: '%s'" % (error, maxCycles), lException=excp)
return S_ERROR(error)
self.__agentModules[agentName]["instanceObj"].am_setOption("MaxCycles", maxCycles)
self.__scheduler.setNumExecutionsForTask(self.__agentModules[agentName]["taskId"], maxCycles)
return S_OK()
def __checkControlDir(self):
"""
Check for the presence of stop_agent file to stop execution of the corresponding Agent
"""
for agentName in self.__agentModules:
if not self.__agentModules[agentName]["running"]:
continue
agent = self.__agentModules[agentName]["instanceObj"]
alive = agent.am_getModuleParam("alive")
if alive:
if agent.am_checkStopAgentFile():
gLogger.info("Found StopAgent file for agent %s" % agentName)
alive = False
if not alive:
gLogger.info("Stopping agent module %s" % (agentName))
self.__scheduler.removeTask(self.__agentModules[agentName]["taskId"])
del self.__tasks[self.__agentModules[agentName]["taskId"]]
self.__agentModules[agentName]["running"] = False
agent.am_removeStopAgentFile()
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/Base/AgentReactor.py
|
Python
|
gpl-3.0
| 7,604
|
[
"DIRAC"
] |
153579693bd3d56aed7a1be44ea02f51db76e9b29f4dfcec351d58fd6fe96814
|
#
# Author: Jorg Bornschein <bornschein@fias.uni-frankfurt.de)
# Lincense: Academic Free License (AFL) v3.0
#
"""
The autotable module provides a simple interface to store data from simulation
runs into efficient HDF5 files onto the filesystem.
These files can later be opened
* in Python: PyTables (http://www.pytables.org/)
* in Matlab: hdf5read( <filename>, <tablename> )
* in pure C or C++: libhdf5 (http://www.hdfgroup.org/HDF5/)
Basic example::
import numpy as np
from pulp.utils import autotable
tbl = autotable.AutoTable('~/testhdf.h5')
for t in range(10):
tbl.append('t', t)
tbl.append('D', np.random.randn(20))
This code creates the file :file:`~/testhdf.h5` with two tables, each having 10
rows: The table *t* will store a single integer within each row, where the
table *D* will store a 20 element vector with gaussian distributed random
numbers in each row.
"""
import numpy as np
import tables
class AutoTable:
"""Store data into HDF5 files"""
def __init__(self, fname=None, compression_level=1):
"""
Create a new autotable object which will write data into a file called
fname.
If fname is not specified (or is None), fname will be derived from
sys.argv[0] by striping the extension and adding ".h5". As a result, the
file will be named like the creating program.
Compression specifies the compression_level that should be applied when storing data.
.. note:: If a file named fname existed previously, its content will be deleted!
"""
self.warnings = True
if fname is None:
fname = self._guess_fname()
self.h5 = tables.openFile(fname, "w")
self.compression_level = compression_level
self.tables = {}
self.types = {}
def close(self):
"""
Close the HDF file behind this AutoTable instance.
"""
self.h5.close()
def append(self, name, value):
"""
Append the dataset *values* into a table called *name*. If a specified
table name does not exist, a new table will be created.
Example:.
tbl.append("T", temp)
tbl.append("image", np.zeros((256,256)) )
"""
if type(value)==np.ma.core.MaskedArray:
value = value.data
if type(value)==str:
self._appendstr(name, value)
return
if np.isscalar(value):
value = np.asarray(value)
if not isinstance(value, np.ndarray):
raise TypeError("Don't know how to handle values of type '%s'", type(value))
# Check if we need to create a new table
if not self.tables.has_key(name):
self._create_table(name, value)
value = value.reshape( (1,)+value.shape )
try:
self.tables[name].append(value)
except ValueError:
raise TypeError('Wrong datatype "%s" for "%s" field' % (value.dtype, name))
self.tables[name].flush()
def assign(self, name, value):
"""
Assigns the dataset *values* into a table called *name*. If the
specified table exist the old data will be overwritten.
Example:.
tbl.assign("T", temp)
tbl.assign("image", np.zeros((256,256)) )
"""
if type(value)==str:
self._appendstr(name, value)
return
if np.isscalar(value):
value = np.asarray(value)
value = value.reshape((1, )+value.shape)
if not isinstance(value, np.ndarray):
raise TypeError("Don't know how to handle values of type '%s'", type(value))
if not self.tables.has_key(name):
pass
else:
if self.warnings:
print "Warning! The previous data with key %s is being overwritten" %name
self._delete_table(name)
try:
for ii in range(value.shape[0]):
self.append(name, value[ii])
except ValueError:
raise TypeError('Wrong datatype for "%s" field'%name)
self.tables[name].flush()
def append_all(self, valdict):
"""
Append the given data to the table.
*valdict* must be a dictionary containig key value pairs, where key
is a string and specifies the table name. The corresponding value must be
an arbitrary numpy compatible value. If a specified table name does not
exist, a a new table will be created.
Example::
tbl.append( { 't':0.23 , 'D':np.zeros((10,10)) )
"""
for name, value in valdict.items():
self.append(name, value)
def appendList(self, name, value):
"""
Append the dataset *values* into a table called *name*. If a specified
table name does not exist, a a new table will be created.
Different from "append", it will add a list of entry into the table. (value, instead
of containing one entry, contains a list of entries.)
Example:.
tbl.append("T", temp)
tbl.append("image", np.zeros((256,256)) )
"""
if type(value)==list and type(value[0])==str:
self._appendstrList(name, value)
return
if np.isscalar(value):
value = np.asarray(value)
if not isinstance(value, np.ndarray):
raise TypeError("Don't know how to handle values of type '%s'", type(value))
# Check if we need to create a new table
if not self.tables.has_key(name):
self._create_table_list(name, value)
value = value.reshape(value.shape )
try:
self.tables[name].append(value)
except ValueError:
raise TypeError('Wrong datatype for "%s" field'%name)
self.tables[name].flush()
def _delete_table(self, name):
"""
Delete a node from the h5-table together with all dictionary entries
that has been created with the node.
"""
self.h5.removeNode('/', name)
del self.tables[name]
del self.types[name]
def _create_table(self, name, example):
"""
Create a new table within the HDF file, where the tables shape and its
datatype are determined by *example*.
"""
type_map = {
np.dtype(np.float64) : tables.Float64Atom(),
np.dtype(np.float32) : tables.Float32Atom(),
np.dtype(np.int) : tables.Int64Atom(),
np.dtype(np.int8) : tables.Int8Atom(),
np.dtype(np.uint8) : tables.UInt8Atom(),
np.dtype(np.int16) : tables.Int16Atom(),
np.dtype(np.uint16) : tables.UInt16Atom(),
np.dtype(np.int32) : tables.Int32Atom(),
np.dtype(np.uint32) : tables.UInt32Atom(),
}
try:
if type(example)==np.ndarray:
h5type = type_map[example.dtype]
elif type(example)==str:
h5type = tables.VLStringAtom()
except KeyError:
raise TypeError("Joerg smells, but not as much as this message. Could not create table %s because of unknown dtype '%s'" % (name, example.dtype) )#+ ", of name: " % example.shape)
if type(example)==np.ndarray:
h5dim = (0,) + example.shape
h5 = self.h5
filters = tables.Filters(complevel=self.compression_level, complib='zlib', shuffle=True)
self.tables[name] = h5.createEArray( h5.root, name, h5type, h5dim, filters=filters )
elif type(example)==str:
h5 = self.h5
filters = tables.Filters(complevel=self.compression_level, complib='zlib', shuffle=True)
self.tables[name] = h5.createVLArray( h5.root, name, h5type, filters=filters )
self.types[name] = type(example)
def _create_table_list(self, name, example):
"""
Create a new table within the HDF file, where the tables shape and its
datatype are determined by *example*.
The modified version for creating table with appendList
"""
type_map = {
np.dtype(np.float64) : tables.Float64Atom(),
np.dtype(np.float32) : tables.Float32Atom(),
np.dtype(np.int) : tables.Int64Atom(),
np.dtype(np.int8) : tables.Int8Atom(),
np.dtype(np.uint8) : tables.UInt8Atom(),
np.dtype(np.int16) : tables.Int16Atom(),
np.dtype(np.uint16) : tables.UInt16Atom(),
np.dtype(np.int32) : tables.Int32Atom(),
np.dtype(np.uint32) : tables.UInt32Atom(),
}
try:
if type(example)==np.ndarray:
h5type = type_map[example.dtype]
elif type(example)==list and type(example[0])==str:
h5type = tables.VLStringAtom()
except KeyError:
raise TypeError("Don't know how to handle dtype '%s'" % example.dtype)
if type(example)==np.ndarray:
h5dim = (0,)+example.shape[1:]
h5 = self.h5
filters = tables.Filters(complevel=self.compression_level, complib='zlib', shuffle=True)
self.tables[name] = h5.createEArray( h5.root, name, h5type, h5dim, filters=filters )
elif type(example)==list and type(example[0])==str:
h5 = self.h5
filters = tables.Filters(complevel=self.compression_level, complib='zlib', shuffle=True)
self.tables[name] = h5.createVLArray( h5.root, name, h5type, filters=filters )
self.types[name] = type(example)
def _guess_fname(self):
"""
Derive an fname from sys.argv[0] by striping the extension and adding ".h5".
As a result, the table will be named just like the executing programm.
"""
import sys
import os.path as path
base, _ = path.splitext(sys.argv[0])
return base+".h5"
def _appendstr(self, name, value):
"""
Removing the numpy specific operation in appending
"""
# Check if we need to create a new table
if not self.tables.has_key(name):
self._create_table(name, value)
try:
self.tables[name].append(value)
except ValueError:
raise TypeError("Could not create table %s because of unknown dtype '%s'" % (name, example.dtype) )
#raise TypeError('Wrong datatype for "%s" field'%name)
self.tables[name].flush()
def _appendstrList(self, name, value):
"""
Removing the numpy specific operation in appending
"""
# Check if we need to create a new table
if not self.tables.has_key(name):
self._create_table_list(name, value)
try:
map(self.tables[name].append,value)
except ValueError:
raise TypeError('Wrong datatype for "%s" field'%name)
self.tables[name].flush()
|
jbornschein/mca-genmodel
|
pulp/utils/autotable.py
|
Python
|
agpl-3.0
| 10,974
|
[
"Gaussian"
] |
54efbfe3e946aec7d2166e3b9e5d6ef56fffea5b0e20adafce1a0381f1c17a63
|
#!/usr/bin/python
from multiprocessing import Pool
import time
import os
import sys
import argparse
from homolog4 import *
from collections import defaultdict
# Copyright(C) 2015 David Ream
# Released under GPL version 3 licence. http://www.gnu.org/licenses/lgpl.html
# Do not remove this comment
# This exists to make the main function easier to read. It contains code to run the argument parser, and does nothing else.
def parser_code():
parser = argparse.ArgumentParser(description="Parse the results of a BLAST -m8 search and organize the results by specific gene blocks. The program will save the results in a directory designated by the user, or the default './blast_parse/'.")
parser.add_argument("-i", "--infolder", dest="infolder", default='./blast_result/', metavar="DIRECTORY",
help="A file that contains the path to every organism database that you are interested in.")
parser.add_argument("-o", "--outfolder", dest="outfolder", metavar="DIRECTORY", default='./blast_parse/',
help="Folder where the BLAST results will be stored. Default is the folder './blast_result/'.")
# rename this option to gene block file... or something more accurately conveying the idea that this is the file
# that is composed: gene_block_name: gene1, gene2, gene3... etc all fields tab delenated.
parser.add_argument("-b", "--gene_block_query", dest="gene_block_query", default='./regulonDB/gene_block_names_and_genes.txt', metavar="FILE",
help="A file that contains the names and genes comprising the gene blocks that are under investigation.")
parser.add_argument("-f", "--filter", dest="filter", default='', metavar="FILE",
help="A file that contains the accession numbers of the organisms that are under investigation.")
parser.add_argument("-n", "--num_proc", dest="num_proc", metavar="INT", default = os.sysconf("SC_NPROCESSORS_CONF"), type=int,
help="Number of processors that you want this script to run on. The default is every CPU that the system has.")
parser.add_argument("-q", "--quiet", dest="quiet", action="store_true", default=False,
help="Suppresses most program text outputs.")
return parser.parse_args()
def check_options(parsed_args):
if os.path.isdir(parsed_args.infolder):
infolder = parsed_args.infolder
else:
print "The infolder directory %s does not exist." % parsed_args.infolder
sys.exit()
# if the directory that the user specifies does not exist, then the program makes it for them.
if not os.path.isdir(parsed_args.outfolder):
os.makedirs(parsed_args.outfolder)
outfolder = parsed_args.outfolder
if outfolder[-1] != '/':
outfolder = outfolder + '/'
if os.path.exists(parsed_args.gene_block_query):
gene_block_query = parsed_args.gene_block_query
else:
print "The gene block query file %s does not exist." % parsed_args.gene_block_query
sys.exit()
if os.path.exists(parsed_args.filter):
filter_file = parsed_args.filter
elif parsed_args.filter == '':
filter_file = parsed_args.filter
else:
print "The filter file %s does not exist." % parsed_args.filter
sys.exit()
# section of code that deals determining the number of CPU cores that will be used by the program
if parsed_args.num_proc > os.sysconf("SC_NPROCESSORS_CONF"):
num_proc = os.sysconf("SC_NPROCESSORS_CONF")
elif parsed_args.num_proc < 1:
num_proc = 1
else:
num_proc = int(parsed_args.num_proc)
quiet = parsed_args.quiet
return infolder, outfolder, gene_block_query, filter_file, num_proc, quiet
#this function will return all of the files that are in a directory. os.walk is recursive traversal.
def returnRecursiveDirFiles(root_dir):
result = []
for path, dir_name, flist in os.walk(root_dir):
for f in flist:
fname = os.path.join(path, f)
if os.path.isfile(fname):
result.append(fname)
return result
# The filter file here i think should be either a vacant value (such as '') or a user defined
# value. I do not think by default it should be given, like I have made the default behavior.
def parallel_blast_parse_dict(in_folder, out_folder, num_proc, filter_file, gene_block_dict):
result = {}
gene_block_out_folder = out_folder
if not os.path.isdir(gene_block_out_folder):
os.makedirs(gene_block_out_folder)
if filter_file != '':
tmp = returnRecursiveDirFiles(in_folder)
nc_list = [i.strip() for i in open(filter_file).readlines()]
fname_list = [i for i in tmp if i.split('/')[-1].split('.')[0] in nc_list]
else:
fname_list = returnRecursiveDirFiles(in_folder)
# this is working correctly...
#print "blast_parse fname_list", fname_list
for fname in fname_list:
for line in [i.strip() for i in open(fname).readlines()]:
try:
hlog = Homolog.from_blast(line)
except:
print "Error in function parallel_blast_parse_dict from script blast_parse.py, conversion from result to Homolog class failed.", line
#hlog.Print()
# this might have to be changed....
#accession = hlog.accession()
try:
accession = hlog.accession()
except:
print "There was an error in the Homolog class, found in line function parallel_blast_parse_dict from script blast_parse.py"
#predicted_gene = hlog.blast_annatation()
predicted_gene = hlog.blast_annotation()
'''
try: # faster implementation than "if predicted_gene in gene_block_dict.keys():"
gene_block = gene_block_dict[predicted_gene]
# Debugging the missing gene block casABCDE12... no idea right now.
if gene_block == 'casABCDE12':
print 'AFDFAFDSF'
if gene_block in result.keys(): # check if the gene block is in the result dict already, if not make a new entry in the else clause
if accession in result[gene_block].keys(): # Check if the organism has been added to the gene block
result[gene_block][accession].append(hlog.ret_str())
else: # if the organim has not been added to the rest yet, add it
result[gene_block].update({accession:[hlog.ret_str()]})
else: # add the gene block to the result
result.update({gene_block: {accession: [hlog.ret_str()]}})
except:
pass
'''
try: # faster implementation than "if predicted_gene in gene_block_dict.keys():"
gene_block = gene_block_dict[predicted_gene]
# Debugging the missing gene block casABCDE12... no idea right now.
# ok, it's there, so omitting this, leaving the comment in for the time being though
#if gene_block == 'casABCDE12':
# print 'AFDFAFDSF'
if gene_block in result.keys(): # check if the gene block is in the result dict already, if not make a new entry in the else clause
if accession in result[gene_block].keys(): # Check if the organism has been added to the gene block
result[gene_block][accession].append(hlog.to_file())
else: # if the organims is not part of the gene block, add it
result[gene_block].update({accession:[hlog.to_file()]})
else: # add the gene_block to the result
result.update({gene_block: {accession: [hlog.to_file()]}})
except:
pass
#print sorted(result.keys()), len(result)
'''
# For the time being, i am going to cause each intermediate step in this pipeline to save in a folder called intermediate_for_debug
intermediate_folder = './intermediate_for_debug/'
if not os.path.isdir(intermediate_folder):
os.makedirs(intermediate_folder)
# For this step i will save the result in 'unfiltered_gene_block/'
unfilter_folder = 'unfiltered_gene_block/'
if not os.path.isdir(intermediate_folder + unfilter_folder):
os.makedirs(intermediate_folder + unfilter_folder)
'''
for gene_block in result.keys():
# this code is omitted because it used to debugging purposes, and is currently unneeded
'''
outfile = intermediate_folder + unfilter_folder + gene_block + '.txt'
#print "outfile", outfile
handle = open(outfile, 'w')
for accession in result[gene_block].keys():
handle.write('\n'.join(result[gene_block][accession]) + '\n')
handle.close()
'''
# save results where i actually want them to go:
#print "plast_parse.py outfile", out_folder + gene_block + '.txt'
handle = open(out_folder + gene_block + '.txt', 'w')
for accession in result[gene_block].keys():
handle.write('\n'.join(result[gene_block][accession]) + '\n')
handle.close()
# I have to figure out a better name for this function. The jist of what I am doing here is as follows:
# First, I will provide a file name that contains all of the hits for every organism that we are interested in.
# Then it sorts this homolog list first by organism, then by locus. (By the required input, the files already have
# been screened for both eval cutoff and gene block membership. The function will then return a dictionary for that
# gene block. The intention is that this data structure will then be used to find the best hit for the locus out of the
# many redundant hits, however this functionality will be handled another function that i have yet to write/test.
def return_gene_block_list(fname):
gene_block = fname.split('/')[-1].split('.')[0]
hlog_list = [Homolog.from_file(i.strip()) for i in open(fname).readlines()]
result_dict = {}
for hlog in hlog_list:
accession = hlog.accession()
locus = hlog.locus()
if accession not in result_dict.keys():
result_dict.update({accession: {}})
if locus not in result_dict[accession].keys():
result_dict[accession].update({locus: [hlog]})
else:
result_dict[accession][locus].append(hlog)
#print result_dict[accession]
return result_dict
# currently not used
'''
# might not use this: will see
def parallel_return_gene_block_list(infolder, outfolder, num_proc):
pool = Pool(processes = num_proc)
organism_dict_for_recovery = dict(pool.map(parallel_gene_block_fasta, genome_of_interest_list))
'''
# This function will take the organism-locus dict (per gene block file) and determine the best homolog.
def best_homolog_list(gene_block_dict, outfile):
result = []
for org in sorted(gene_block_dict.keys()):
for locus in sorted(gene_block_dict[org].keys()):
hlog_list = gene_block_dict[org][locus][1:]
best_hit = gene_block_dict[org][locus][0]
gene_count = defaultdict(int) # i am goign to use this, to see if a locus has more than one predicted gene, and the count ratio
gene_count[best_hit.predicted_gene()] +=1
for hlog in hlog_list:
gene_count[hlog.predicted_gene()] +=1
if best_hit.e_val() > hlog.e_val():
best_hit = hlog
#print gene_count.keys()
result.append(best_hit)
handle = open(outfile, 'w')
handle.write('\n'.join([i.ret_str() for i in result]))
handle.close()
def return_gene_to_gene_block_dict(fname):
gene_block_dict = {}
for line in [i.strip().split('\t') for i in open(fname).readlines()]:
gene_block = line[0]
for gene in line[1:]:
gene_block_dict.update({gene: gene_block})
return gene_block_dict
def main():
start = time.time()
parsed_args = parser_code()
infolder, outfolder, gene_block_query, filter_file, num_proc, quiet = check_options(parsed_args)
if not quiet:
print infolder, outfolder, gene_block_query, filter_file, num_proc, quiet
# This code makes a dictionary mapping gene annotation to the gene block that it belong to
gene_block_dict = return_gene_to_gene_block_dict(gene_block_query)
#print "gene_block_dict", gene_block_dict
#parallel_blast_parse_dict('./blast_parse/organism_raw_info/', './blast_parse/filtered_homologs/', num_proc, './genbank_pathway_lists/nc_filter_file.txt', gene_block_dict)
parallel_blast_parse_dict(infolder, outfolder, num_proc, filter_file, gene_block_dict)
#gene_block_dict = return_gene_block_list('./blast_parse/filtered_homologs/atpIBEFHAGDC.txt')
#best_homolog_list(gene_block_dict, './blast_parse/processed_gene_block_files/atpIBEFHAGDC.txt')
if not quiet:
print time.time() - start
# ./blast_parse.py -f phylo_order.txt
if __name__ == '__main__':
main()
|
ashishjain1988/gbeer_standalone
|
blast_parse.py
|
Python
|
gpl-3.0
| 13,419
|
[
"BLAST"
] |
092d3de4cae42204f39e5b2592e4f6ef97408fec0fd65a5f5977346fc1ff7ce1
|
from nose.tools import assert_equal, assert_almost_equal
import numpy as np
from probfit import (describe, rename, Convolve, Normalized,
Extended, AddPdf, AddPdfNorm, BlindFunc)
from probfit.pdf import gaussian, ugaussian
from probfit._libstat import integrate1d
from probfit.decorator import extended, normalized
def test_describe_normal_function():
def f(x, y, z):
return x + y + z
d = describe(f)
assert_equal(list(d), ['x', 'y', 'z'])
def test_Normalized():
f = ugaussian
g = Normalized(f, (-1, 1))
norm = integrate1d(f, (-1., 1.), 1000, (0., 1.))
assert_almost_equal(g(1., 0., 1.), f(1., 0., 1.) / norm)
def test_normalized_decorator():
@normalized((-1, 1))
def f(x, mean, sigma):
return ugaussian(x, mean, sigma)
g = Normalized(ugaussian, (-1, 1))
assert_equal(describe(f), ['x', 'mean', 'sigma'])
assert_almost_equal(g(1, 0, 1), f(1, 0, 1))
def test_Normalized_cache_hit():
def f(x, y, z) : return 1.*(x + y + z)
def g(x, y, z) : return 1.*(x + y + 2 * z)
nf = Normalized(f, (-10., 10.))
ng = Normalized(g, (-10., 10.))
assert_equal(nf.hit, 0)
nf(1., 2., 3.)
ng(1., 2., 3.)
assert_equal(nf.hit, 0)
nf(3., 2., 3.)
assert_equal(nf.hit, 1)
ng(1., 2., 3.)
assert_equal(ng.hit, 1)
def test_add_pdf():
def f(x, y, z): return x + y + z
def g(x, a, b): return 2 * (x + a + b)
def h(x, c, a): return 3 * (x + c + a)
A = AddPdf(f, g, h)
assert_equal(tuple(describe(A)), ('x', 'y', 'z', 'a', 'b', 'c'))
ret = A(1, 2, 3, 4, 5, 6, 7)
expected = f(1, 2, 3) + g(1, 4, 5) + h(1, 6, 4)
assert_almost_equal(ret, expected)
# wrong integral on purpose
f.integrate = lambda bound, nint, y, z : 1. # unbound method works too
g.integrate = lambda bound, nint, a, b : 2.
h.integrate = lambda bound, nint, c, a : 3.
assert_equal(integrate1d(A, (-10., 10.), 100, (1., 2., 3., 4., 5.)), 6.)
def test_add_pdf_factor():
def f(x, y, z): return x + y + z
def g(x, a, b): return 2 * (x + a + b)
def k1(n1, n2): return 3 * (n1 + n2)
def k2(n1, y): return 4 * (n1 + y)
A = AddPdf(f, g, prefix=['f', 'g'], factors=[k1, k2])
assert_equal(tuple(describe(A)), ('x', 'fy', 'fz', 'ga', 'gb', 'fn1', 'fn2', 'gn1', 'gy'))
ret = A(1, 2, 3, 4, 5, 6, 7, 8, 9)
expected = k1(6, 7) * f(1, 2, 3) + k2(8, 9) * g(1, 4, 5)
assert_almost_equal(ret, expected)
parts = A.eval_parts(1, 2, 3, 4, 5, 6, 7, 8, 9)
assert_almost_equal(parts[0], k1(6, 7) * f(1, 2, 3))
assert_almost_equal(parts[1], k2(8, 9) * g(1, 4, 5))
def test_add_pdf_cache():
def f(x, y, z): return x + y + z
def g(x, a, b): return 2 * (x + a + b)
def h(x, c, a): return 3 * (x + c + a)
A = AddPdf(f, g, h)
assert_equal(tuple(describe(A)), ('x', 'y', 'z', 'a', 'b', 'c'))
ret = A(1, 2, 3, 4, 5, 6, 7)
assert_equal(A.hit, 0)
expected = f(1, 2, 3) + g(1, 4, 5) + h(1, 6, 4)
assert_almost_equal(ret, expected)
ret = A(1, 2, 3, 6, 7, 8, 9)
assert_equal(A.hit, 1)
expected = f(1, 2, 3) + g(1, 6, 7) + h(1, 8, 6)
assert_almost_equal(ret, expected)
def test_extended():
def f(x, y, z): return x + 2 * y + 3 * z
g = Extended(f)
assert_equal(tuple(describe(g)), ('x', 'y', 'z', 'N'))
assert_equal(g(1, 2, 3, 4), 4 * (f(1, 2, 3)))
# extended should use analytical when available
def ana_int(x, y): return y * x ** 2
ana_int_int = lambda b, n, y: 999. # wrong on purpose
ana_int.integrate = ana_int_int
g = Extended(ana_int)
assert_almost_equal(g.integrate((0, 1), 100, 5., 2.), 999.*2.)
# and not fail when it's not available
def no_ana_int(x, y): return y * x ** 2
g = Extended(no_ana_int)
assert_almost_equal(g.integrate((0, 1), 100, 5., 2.), (1.**3) / 3.*5.*2.)
def test_extended_decorator():
def f(x, y, z): return x + 2 * y + 3 * z
@extended()
def g(x, y, z):
return x + 2 * y + 3 * z
assert_equal(tuple(describe(g)), ('x', 'y', 'z', 'N'))
assert_equal(g(1, 2, 3, 4), 4 * (f(1, 2, 3)))
def test_addpdfnorm():
def f(x, y, z): return x + 2 * y + 3 * z
def g(x, z, p): return 4 * x + 5 * z + 6 * z
def p(x, y, q): return 7 * x + 8 * y + 9 * q
h = AddPdfNorm(f, g)
assert_equal(describe(h), ['x', 'y', 'z', 'p', 'f_0'])
q = AddPdfNorm(f, g, p)
assert_equal(describe(q), ['x', 'y', 'z', 'p', 'q', 'f_0', 'f_1'])
assert_almost_equal(h(1, 2, 3, 4, 0.1),
0.1 * f(1, 2, 3) + 0.9 * g(1, 3, 4))
assert_almost_equal(q(1, 2, 3, 4, 5, 0.1, 0.2),
0.1 * f(1, 2, 3) + 0.2 * g(1, 3, 4) + 0.7 * p(1, 2, 5))
def test_addpdfnorm_analytical_integrate():
def f(x, y, z): return x + 2 * y + 3 * z
def g(x, z, p): return 4 * x + 5 * z + 6 * z
def p(x, y, q): return 7 * x + 8 * y + 9 * q
f.integrate = lambda bound, nint, y, z: 1.
g.integrate = lambda bound, nint, z, p: 2.
p.integrate = lambda bound, nint, y, q: 3.
q = AddPdfNorm(f, g, p)
assert_equal(describe(q), ['x', 'y', 'z', 'p', 'q', 'f_0', 'f_1'])
integral = integrate1d(q, (-10., 10.), 100, (1., 2., 3., 4., 0.1, 0.2))
assert_almost_equal(integral, 0.1 * 1. + 0.2 * 2. + 0.7 * 3.)
def test_convolution():
f = gaussian
g = lambda x, mu1, sigma1 : gaussian(x, mu1, sigma1)
h = Convolve(f, g, (-10, 10), nbins=10000)
assert_equal(describe(h), ['x', 'mean', 'sigma', 'mu1', 'sigma1'])
assert_almost_equal(h(1, 0, 1, 1, 2), 0.17839457037411527) # center
assert_almost_equal(h(-1, 0, 1, 1, 2), 0.119581456625684) # left
assert_almost_equal(h(0, 0, 1, 1, 2), 0.1614180824489487) # left
assert_almost_equal(h(2, 0, 1, 1, 2), 0.1614180824489487) # right
assert_almost_equal(h(3, 0, 1, 1, 2), 0.119581456625684) # right
def test_rename():
def f(x, y, z):
return None
assert_equal(describe(f), ['x', 'y', 'z'])
g = rename(f, ['x', 'a', 'b'])
assert_equal(describe(g), ['x', 'a', 'b'])
def test_blindfunc():
np.random.seed(0)
f = BlindFunc(gaussian, 'mean', 'abcd', width=1.5, signflip=True)
arg = f.__shift_arg__((1, 1, 1))
totest = [1., -1.1665264284482637, 1.]
assert_almost_equal(arg[0], totest[0])
assert_almost_equal(arg[1], totest[1])
assert_almost_equal(arg[2], totest[2])
assert_almost_equal(f.__call__(0.5, 1., 1.), 0.0995003913596)
np.random.seed(575345)
f = BlindFunc(gaussian, 'mean', 'abcd', width=1.5, signflip=True)
arg = f.__shift_arg__((1, 1, 1))
assert_almost_equal(arg[0], totest[0])
assert_almost_equal(arg[1], totest[1])
assert_almost_equal(arg[2], totest[2])
assert_almost_equal(f.__call__(0.5, 1., 1.), 0.0995003913596)
|
mtresch/probfit
|
test/testfunctor.py
|
Python
|
mit
| 6,732
|
[
"Gaussian"
] |
54f0914105094cf971d2848c696d95fd7234324379589f3461a2451c57adc5df
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to read data in the graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.input_pipeline.python.ops import input_pipeline_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as var_ops
from tensorflow.python.platform import gfile
from tensorflow.python.summary import summary
from tensorflow.python.training import input as input_ops
from tensorflow.python.training import queue_runner
# Default name for key in the feature dict.
KEY_FEATURE_NAME = '__key__'
def read_batch_examples(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
name=None,
seed=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size`.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` and run the op in a session.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
seed: An integer (optional). Seed used if randomize_input == True.
Returns:
String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
_, examples = read_keyed_batch_examples(
file_pattern=file_pattern,
batch_size=batch_size,
reader=reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
read_batch_size=read_batch_size,
parse_fn=parse_fn,
name=name,
seed=seed)
return examples
def read_keyed_batch_examples(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
name=None,
seed=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size`.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` and run the op in a session.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
seed: An integer (optional). Seed used if randomize_input == True.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
return _read_keyed_batch_examples_helper(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
read_batch_size=read_batch_size,
parse_fn=parse_fn,
setup_shared_queue=False,
name=name,
seed=seed)
def _read_keyed_batch_examples_shared_queue(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
name=None,
seed=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a shared queue for file
names, setup a worker queue that pulls from the shared queue, read `Example`
protos using provided `reader`, use batch queue to create batches of examples
of size `batch_size`. This provides at most once visit guarantees. Note that
this only works if the parameter servers are not pre-empted or restarted or
the session is not restored from a checkpoint since the state of a queue
is not checkpointed and we will end up restarting from the entire list of
files.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` and run the op in a session.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
seed: An integer (optional). Seed used if randomize_input == True.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
return _read_keyed_batch_examples_helper(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
read_batch_size=read_batch_size,
parse_fn=parse_fn,
setup_shared_queue=True,
name=name,
seed=seed)
def _get_file_names(file_pattern, randomize_input):
"""Parse list of file names from pattern, optionally shuffled.
Args:
file_pattern: File glob pattern, or list of strings.
randomize_input: Whether to shuffle the order of file names.
Returns:
List of file names matching `file_pattern`.
Raises:
ValueError: If `file_pattern` is empty, or pattern matches no files.
"""
if isinstance(file_pattern, list):
file_names = file_pattern
if not file_names:
raise ValueError('No files given to dequeue_examples.')
else:
file_names = list(gfile.Glob(file_pattern))
if not file_names:
raise ValueError('No files match %s.' % file_pattern)
# Sort files so it will be deterministic for unit tests. They'll be shuffled
# in `string_input_producer` if `randomize_input` is enabled.
if not randomize_input:
file_names = sorted(file_names)
return file_names
def _get_examples(file_name_queue, reader, num_threads, read_batch_size,
filter_fn, parse_fn):
with ops.name_scope('read'):
example_list = []
for _ in range(num_threads):
if read_batch_size > 1:
keys, examples_proto = reader().read_up_to(file_name_queue,
read_batch_size)
else:
keys, examples_proto = reader().read(file_name_queue)
if filter_fn:
mask = filter_fn(keys, examples_proto)
keys = array_ops.boolean_mask(keys, mask)
examples_proto = array_ops.boolean_mask(examples_proto, mask)
if parse_fn:
parsed_examples = parse_fn(examples_proto)
# Map keys into example map because batch_join doesn't support
# tuple of Tensor + dict.
if isinstance(parsed_examples, dict):
parsed_examples[KEY_FEATURE_NAME] = keys
example_list.append(parsed_examples)
else:
example_list.append((keys, parsed_examples))
else:
example_list.append((keys, examples_proto))
return example_list
def _read_keyed_batch_examples_helper(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
filter_fn=None,
parse_fn=None,
setup_shared_queue=False,
name=None,
seed=None):
"""Adds operations to read, queue, batch `Example` protos.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` and run the op in a session.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
filter_fn: Filtering function, takes both keys as well `Example` Tensors
and returns a boolean mask of the same shape as the input Tensors to
be applied for filtering. If `None`, no filtering is done.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
setup_shared_queue: Whether to set up a shared queue for file names.
name: Name of resulting op.
seed: An integer (optional). Seed used if randomize_input == True.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
# Retrieve files to read.
file_names = _get_file_names(file_pattern, randomize_input)
# Check input parameters are given and reasonable.
if (not queue_capacity) or (queue_capacity <= 0):
raise ValueError('Invalid queue_capacity %s.' % queue_capacity)
if (batch_size is None) or ((not isinstance(batch_size, ops.Tensor)) and
(batch_size <= 0 or batch_size > queue_capacity)):
raise ValueError('Invalid batch_size %s, with queue_capacity %s.' %
(batch_size, queue_capacity))
if (read_batch_size is None) or (
(not isinstance(read_batch_size, ops.Tensor)) and (read_batch_size <= 0)):
raise ValueError('Invalid read_batch_size %s.' % read_batch_size)
if (not num_threads) or (num_threads <= 0):
raise ValueError('Invalid num_threads %s.' % num_threads)
if (num_epochs is not None) and (num_epochs <= 0):
raise ValueError('Invalid num_epochs %s.' % num_epochs)
with ops.name_scope(name, 'read_batch_examples', [file_pattern]) as scope:
with ops.name_scope('file_name_queue') as file_name_queue_scope:
if setup_shared_queue:
file_name_queue = data_flow_ops.FIFOQueue(
capacity=1, dtypes=[dtypes.string], shapes=[[]])
enqueue_op = file_name_queue.enqueue(
input_pipeline_ops.seek_next(
file_names, shuffle=randomize_input, num_epochs=num_epochs,
seed=seed))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(file_name_queue, [enqueue_op]))
else:
file_name_queue = input_ops.string_input_producer(
constant_op.constant(
file_names, name='input'),
shuffle=randomize_input,
num_epochs=num_epochs,
name=file_name_queue_scope,
seed=seed)
example_list = _get_examples(file_name_queue, reader, num_threads,
read_batch_size, filter_fn, parse_fn)
enqueue_many = read_batch_size > 1
if num_epochs is None:
allow_smaller_final_batch = False
else:
allow_smaller_final_batch = True
# Setup batching queue given list of read example tensors.
if randomize_input:
if isinstance(batch_size, ops.Tensor):
min_after_dequeue = int(queue_capacity * 0.4)
else:
min_after_dequeue = max(queue_capacity - (3 * batch_size), batch_size)
queued_examples_with_keys = input_ops.shuffle_batch_join(
example_list,
batch_size,
capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=enqueue_many,
name=scope,
allow_smaller_final_batch=allow_smaller_final_batch,
seed=seed)
else:
queued_examples_with_keys = input_ops.batch_join(
example_list,
batch_size,
capacity=queue_capacity,
enqueue_many=enqueue_many,
name=scope,
allow_smaller_final_batch=allow_smaller_final_batch)
if parse_fn and isinstance(queued_examples_with_keys, dict):
queued_keys = queued_examples_with_keys.pop(KEY_FEATURE_NAME)
return queued_keys, queued_examples_with_keys
return queued_examples_with_keys
def read_keyed_batch_features(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
feature_queue_capacity=100,
num_enqueue_threads=2,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() and run the op in a session.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
feature_queue_capacity: Capacity of the parsed features queue.
num_enqueue_threads: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
with ops.name_scope(name, 'read_batch_features', [file_pattern]) as scope:
keys, examples = read_keyed_batch_examples(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=reader_num_threads,
read_batch_size=batch_size,
parse_fn=parse_fn,
name=scope)
# Parse the example.
feature_map = parsing_ops.parse_example(examples, features)
return queue_parsed_features(
feature_map,
keys=keys,
feature_queue_capacity=feature_queue_capacity,
num_enqueue_threads=num_enqueue_threads,
name=scope)
def _read_keyed_batch_features_shared_queue(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
feature_queue_capacity=100,
num_queue_runners=2,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a shared queue for file
names, setup a worker queue that gets filenames from the shared queue,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() and run the op in a session.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
feature_queue_capacity: Capacity of the parsed features queue.
num_queue_runners: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
with ops.name_scope(name, 'read_batch_features', [file_pattern]) as scope:
keys, examples = _read_keyed_batch_examples_shared_queue(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=reader_num_threads,
read_batch_size=batch_size,
parse_fn=parse_fn,
name=scope)
# Parse the example.
feature_map = parsing_ops.parse_example(examples, features)
return queue_parsed_features(
feature_map,
keys=keys,
feature_queue_capacity=feature_queue_capacity,
num_enqueue_threads=num_queue_runners,
name=scope)
def queue_parsed_features(parsed_features,
keys=None,
feature_queue_capacity=100,
num_enqueue_threads=2,
name=None):
"""Speeds up parsing by using queues to do it asynchronously.
This function adds the tensors in `parsed_features` to a queue, which allows
the parsing (or any other expensive op before this) to be asynchronous wrt the
rest of the training graph. This greatly improves read latency and speeds up
training since the data will already be parsed and ready when each step of
training needs it.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
parsed_features: A dict of string key to `Tensor` or `SparseTensor` objects.
keys: `Tensor` of string keys.
feature_queue_capacity: Capacity of the parsed features queue.
num_enqueue_threads: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` corresponding to `keys` if provided, otherwise `None`.
- A dict of string key to `Tensor` or `SparseTensor` objects corresponding
to `parsed_features`.
Raises:
ValueError: for invalid inputs.
"""
args = list(parsed_features.values())
if keys is not None:
args += [keys]
with ops.name_scope(name, 'queue_parsed_features', args):
# Lets also add preprocessed tensors into the queue types for each item of
# the queue.
tensors_to_enqueue = []
# Each entry contains the key, and a boolean which indicates whether the
# tensor was a sparse tensor.
tensors_mapping = []
# TODO(sibyl-Aix6ihai): Most of the functionality here is about pushing sparse
# tensors into a queue. This could be taken care in somewhere else so others
# can reuse it. Also, QueueBase maybe extended to handle sparse tensors
# directly.
for key in sorted(parsed_features.keys()):
tensor = parsed_features[key]
if isinstance(tensor, sparse_tensor.SparseTensor):
tensors_mapping.append((key, True))
tensors_to_enqueue.extend(
[tensor.indices, tensor.values, tensor.dense_shape])
else:
tensors_mapping.append((key, False))
tensors_to_enqueue.append(tensor)
if keys is not None:
tensors_to_enqueue.append(keys)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(feature_queue_capacity, queue_dtypes)
# Add a summary op to debug if our feature queue is full or not.
summary.scalar('queue/parsed_features/%s/fraction_of_%d_full' %
(input_queue.name, feature_queue_capacity),
math_ops.cast(input_queue.size(), dtypes.float32) *
(1. / feature_queue_capacity))
# Use a single QueueRunner with multiple threads to enqueue so the queue is
# always full. The threads are coordinated so the last batch will not be
# lost.
enqueue_ops = [
input_queue.enqueue(tensors_to_enqueue)
for _ in range(num_enqueue_threads)
]
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
enqueue_ops,
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
dequeued_tensors = input_queue.dequeue()
# Reset shapes on dequeued tensors.
for i in range(len(tensors_to_enqueue)):
dequeued_tensors[i].set_shape(tensors_to_enqueue[i].get_shape())
# Recreate feature mapping according to the original dictionary.
dequeued_parsed_features = {}
index = 0
for key, is_sparse_tensor in tensors_mapping:
if is_sparse_tensor:
# Three tensors are (indices, values, shape).
dequeued_parsed_features[key] = sparse_tensor.SparseTensor(
dequeued_tensors[index], dequeued_tensors[index + 1],
dequeued_tensors[index + 2])
index += 3
else:
dequeued_parsed_features[key] = dequeued_tensors[index]
index += 1
dequeued_keys = None
if keys is not None:
dequeued_keys = dequeued_tensors[-1]
return dequeued_keys, dequeued_parsed_features
def read_batch_features(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
feature_queue_capacity=100,
reader_num_threads=1,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() and run the op in a session.
queue_capacity: Capacity for input queue.
feature_queue_capacity: Capacity of the parsed features queue. Set this
value to a small number, for example 5 if the parsed features are large.
reader_num_threads: The number of threads to read examples.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
_, features = read_keyed_batch_features(
file_pattern,
batch_size,
features,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
feature_queue_capacity=feature_queue_capacity,
reader_num_threads=reader_num_threads,
parse_fn=parse_fn,
name=name)
return features
def read_batch_record_features(file_pattern,
batch_size,
features,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
name='dequeue_record_examples'):
"""Reads TFRecord, queues, batches and parses `Example` proto.
See more detailed description in `read_examples`.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() and run the op in a session.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
name: Name of resulting op.
Returns:
A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
return read_batch_features(
file_pattern=file_pattern,
batch_size=batch_size,
features=features,
reader=io_ops.TFRecordReader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
reader_num_threads=reader_num_threads,
name=name)
|
AsimmHirani/ISpyPi
|
tensorflow/contrib/tensorflow-master/tensorflow/contrib/learn/python/learn/learn_io/graph_io.py
|
Python
|
apache-2.0
| 33,458
|
[
"VisIt"
] |
020d35911da7d1c8b0427f3f2381ae29aa64031754da9fd610f8a12142f4203c
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# sfc_models documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 23 16:05:06 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'sfc_models'
copyright = '2017, Brian Romanchuk'
author = 'Brian Romanchuk'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'sfc_models v0.2.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sfc_modelsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sfc_models.tex', 'sfc\\_models Documentation',
'Brian Romanchuk', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sfc_models', 'sfc_models Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sfc_models', 'sfc_models Documentation',
author, 'sfc_models', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
brianr747/SFC_models
|
docs/conf.py
|
Python
|
apache-2.0
| 9,981
|
[
"Brian"
] |
94c86084c83f6dc40c4742db82b9caac1c0220c3b368084c29aba74c809e082c
|
# this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
# test_sph_in
# test_sph_jn
# test_sph_kn
from __future__ import division, print_function, absolute_import
import itertools
import warnings
import numpy as np
from numpy import (array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp,
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_)
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_approx_equal,
assert_, dec, TestCase, run_module_suite, assert_allclose,
assert_raises, assert_array_almost_equal_nulp)
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipk, zeta
from scipy.special._testutils import assert_tol_equal, with_special_errors, \
assert_func_equal
from scipy._lib._version import NumpyVersion
import math
class TestCephes(TestCase):
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200))
assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497,
rtol=1e-13, atol=0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
assert_allclose(cephes.betaln(-100.3, 1e-200), cephes._gammaln(1e-200))
assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447,
rtol=1e-14, atol=0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
assert_allclose(cephes.betaincinv(0.0342, 171, 0.25),
8.4231316935498957e-21, rtol=3e-12, atol=0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
p = cephes.chndtr(np.linspace(20, 25, 5), 2, 1.07458615e+02)
assert_allclose(p, [1.21805009e-09, 2.81979982e-09, 6.25652736e-09,
1.33520017e-08, 2.74909967e-08],
rtol=1e-6, atol=0)
assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)
assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)
assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))
assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))
assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_diric(self):
# Test behavior near multiples of 2pi. Regression test for issue
# described in gh-4001.
n_odd = [1, 5, 25]
x = np.array(2*np.pi + 5e-5).astype(np.float32)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7)
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
x = np.array(2*np.pi + 1e-15).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
if hasattr(np, 'float128'):
# No float128 available in 32-bit numpy
x = np.array(2*np.pi + 1e-12).astype(np.float128)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19)
n_even = [2, 4, 24]
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15)
# Test at some values not near a multiple of pi
x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi)
octave_result = [0.872677996249965, 0.539344662916632,
0.127322003750035, -0.206011329583298]
assert_almost_equal(special.diric(x, 3), octave_result, decimal=15)
def test_diric_broadcasting(self):
x = np.arange(5)
n = np.array([1, 3, 7])
assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size))
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0),0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0),1.0)
def test_exp1(self):
cephes.exp1(1)
def test_expi(self):
cephes.expi(1)
def test_expn(self):
cephes.expn(1,1)
def test_exp1_reg(self):
# Regression for #834
a = cephes.exp1(-complex(19.9999990))
b = cephes.exp1(-complex(19.9999991))
assert_array_almost_equal(a.imag, b.imag)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
assert_equal(cephes.expm1(np.inf), np.inf)
assert_equal(cephes.expm1(-np.inf), -1)
assert_equal(cephes.expm1(np.nan), np.nan)
# Earlier numpy version don't guarantee that npy_cexp conforms to C99.
@dec.skipif(NumpyVersion(np.__version__) < '1.9.0')
def test_expm1_complex(self):
expm1 = cephes.expm1
assert_equal(expm1(0 + 0j), 0 + 0j)
assert_equal(expm1(complex(np.inf, 0)), complex(np.inf, 0))
assert_equal(expm1(complex(np.inf, 1)), complex(np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 2)), complex(-np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 4)), complex(-np.inf, -np.inf))
assert_equal(expm1(complex(np.inf, 5)), complex(np.inf, -np.inf))
assert_equal(expm1(complex(1, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(0, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.inf, np.inf)), complex(np.inf, np.nan))
assert_equal(expm1(complex(-np.inf, np.inf)), complex(-1, 0))
assert_equal(expm1(complex(-np.inf, np.nan)), complex(-1, 0))
assert_equal(expm1(complex(np.inf, np.nan)), complex(np.inf, np.nan))
assert_equal(expm1(complex(0, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(1, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, 1)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, np.nan)), complex(np.nan, np.nan))
@dec.knownfailureif(True, 'The real part of expm1(z) bad at these points')
def test_expm1_complex_hard(self):
# The real part of this function is difficult to evaluate when
# z.real = -log(cos(z.imag)).
y = np.array([0.1, 0.2, 0.3, 5, 11, 20])
x = -np.log(np.cos(y))
z = x + 1j*y
# evaluate using mpmath.expm1 with dps=1000
expected = np.array([-5.5507901846769623e-17+0.10033467208545054j,
2.4289354732893695e-18+0.20271003550867248j,
4.5235500262585768e-17+0.30933624960962319j,
7.8234305217489006e-17-3.3805150062465863j,
-1.3685191953697676e-16-225.95084645419513j,
8.7175620481291045e-17+2.2371609442247422j])
found = cephes.expm1(z)
# this passes.
assert_array_almost_equal_nulp(found.imag, expected.imag, 3)
# this fails.
assert_array_almost_equal_nulp(found.real, expected.real, 20)
def test_fdtr(self):
assert_equal(cephes.fdtr(1,1,0),0.0)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1,1,0),1.0)
def test_fdtri(self):
# cephes.fdtri(1,1,0.5) #BUG: gives NaN, should be 1
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainc(self):
assert_equal(cephes.gammainc(5,0),0.0)
def test_gammaincc(self):
assert_equal(cephes.gammaincc(5,0),1.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes._gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtr_inf(self):
assert_equal(cephes.gdtr(1,1,np.inf),1.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp1f2(self):
cephes.hyp1f2(1,1,1,1)
def test_hyp2f0(self):
cephes.hyp2f0(1,1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_hyp3f0(self):
assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))
def test_hyperu(self):
assert_equal(cephes.hyperu(0,1,1),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0),1.0)
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
log1p = cephes.log1p
assert_equal(log1p(0), 0.0)
assert_equal(log1p(-1), -np.inf)
assert_equal(log1p(-2), np.nan)
assert_equal(log1p(np.inf), np.inf)
# earlier numpy version don't guarantee that npy_clog conforms to C99
@dec.skipif(NumpyVersion(np.__version__) < '1.9.0')
def test_log1p_complex(self):
log1p = cephes.log1p
c = complex
assert_equal(log1p(0 + 0j), 0 + 0j)
assert_equal(log1p(c(-1, 0)), c(-np.inf, 0))
assert_allclose(log1p(c(1, np.inf)), c(np.inf, np.pi/2))
assert_equal(log1p(c(1, np.nan)), c(np.nan, np.nan))
assert_allclose(log1p(c(-np.inf, 1)), c(np.inf, np.pi))
assert_equal(log1p(c(np.inf, 1)), c(np.inf, 0))
assert_allclose(log1p(c(-np.inf, np.inf)), c(np.inf, 3*np.pi/4))
assert_allclose(log1p(c(np.inf, np.inf)), c(np.inf, np.pi/4))
assert_equal(log1p(c(np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(-np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, np.inf)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, 1)), c(np.nan, np.nan))
assert_equal(log1p(c(np.nan, np.nan)), c(np.nan, np.nan))
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1,1,1,0),0.0)
def test_ncfdtridfd(self):
cephes.ncfdtridfd(1,0.5,0,1)
def __check_ncfdtridfn(self):
cephes.ncfdtridfn(1,0.5,0,1)
def __check_ncfdtrinc(self):
cephes.ncfdtrinc(1,0.5,0,1)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)
assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)
assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))
assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)
assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))
assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))
assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_ndtr(self):
assert_equal(cephes.ndtr(0), 0.5)
assert_almost_equal(cephes.ndtr(1), 0.84134474606)
def test_ndtri(self):
assert_equal(cephes.ndtri(0.5),0.0)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_tol_equal(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
val = cephes.pdtr(0, 1)
assert_almost_equal(val, np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtr([0, 1, 2], 0.0)
assert_array_equal(val, [1, 1, 1])
def test_pdtrc(self):
val = cephes.pdtrc(0, 1)
assert_almost_equal(val, 1 - np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtrc([0, 1, 2], 0.0)
assert_array_equal(val, [0, 0, 0])
def test_pdtri(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
k = cephes.pdtrik(0.5, 1)
assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)
# Edge case: m = 0 or very small.
k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])
assert_array_equal(k, np.zeros((3, 3)))
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_zeta(self):
assert_allclose(zeta(2,2), pi**2/6 - 1, rtol=1e-12)
def test_zetac(self):
assert_equal(cephes.zetac(0),-1.5)
def test_zeta_1arg(self):
assert_allclose(zeta(2), pi**2/6, rtol=1e-12)
assert_allclose(zeta(4), pi**4/90, rtol=1e-12)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry(TestCase):
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),10)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
def test_ai_zeros_big(self):
z, zp, ai_zpx, aip_zx = special.ai_zeros(50000)
ai_z, aip_z, _, _ = special.airy(z)
ai_zp, aip_zp, _, _ = special.airy(zp)
ai_envelope = 1/abs(z)**(1./4)
aip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(ai_zpx, ai_zp, rtol=1e-10)
assert_allclose(aip_zx, aip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.1
assert_allclose(z[:6],
[-2.3381074105, -4.0879494441, -5.5205598281,
-6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10)
assert_allclose(zp[:6],
[-1.0187929716, -3.2481975822, -4.8200992112,
-6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10)
def test_bi_zeros_big(self):
z, zp, bi_zpx, bip_zx = special.bi_zeros(50000)
_, _, bi_z, bip_z = special.airy(z)
_, _, bi_zp, bip_zp = special.airy(zp)
bi_envelope = 1/abs(z)**(1./4)
bip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(bi_zpx, bi_zp, rtol=1e-10)
assert_allclose(bip_zx, bip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.2
assert_allclose(z[:6],
[-1.1737132227, -3.2710933028, -4.8307378417,
-6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10)
assert_allclose(zp[:6],
[-2.2944396826, -4.0731550891, -5.5123957297,
-6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10)
class TestAssocLaguerre(TestCase):
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly(TestCase):
def test_besselpoly(self):
pass
class TestKelvin(TestCase):
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
# Abramowitz & Stegun, Table 9.12
bi = special.bei_zeros(5)
assert_array_almost_equal(bi,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),8)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli(TestCase):
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta(TestCase):
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestCombinatorics(TestCase):
def test_comb(self):
assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])
assert_almost_equal(special.comb(10, 3), 120.)
assert_equal(special.comb(10, 3, exact=True), 120)
assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
assert_allclose([special.comb(20, k, exact=True) for k in range(21)],
special.comb(20, list(range(21))), atol=1e-15)
ii = np.iinfo(int).max + 1
assert_equal(special.comb(ii, ii-1, exact=True), ii)
expected = 100891344545564193334812497256
assert_equal(special.comb(100, 50, exact=True), expected)
def test_comb_with_np_int64(self):
n = 70
k = 30
np_n = np.int64(n)
np_k = np.int64(k)
assert_equal(special.comb(np_n, np_k, exact=True),
special.comb(n, k, exact=True))
def test_comb_zeros(self):
assert_equal(special.comb(2, 3, exact=True), 0)
assert_equal(special.comb(-1, 3, exact=True), 0)
assert_equal(special.comb(2, -1, exact=True), 0)
assert_equal(special.comb(2, -1, exact=False), 0)
assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 120.])
def test_perm(self):
assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])
assert_almost_equal(special.perm(10, 3), 720.)
assert_equal(special.perm(10, 3, exact=True), 720)
def test_perm_zeros(self):
assert_equal(special.perm(2, 3, exact=True), 0)
assert_equal(special.perm(-1, 3, exact=True), 0)
assert_equal(special.perm(2, -1, exact=True), 0)
assert_equal(special.perm(2, -1, exact=False), 0)
assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 720.])
class TestTrigonometric(TestCase):
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
# the sinc implementation and more extensive sinc tests are in numpy
assert_array_equal(special.sinc([0]), 1)
assert_equal(special.sinc(0.0), 1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg(TestCase):
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip(TestCase):
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
assert_equal(special.ellipkm1(0.0), np.inf)
assert_equal(special.ellipkm1(1.0), pi/2)
assert_equal(special.ellipkm1(np.inf), 0.0)
assert_equal(special.ellipkm1(np.nan), np.nan)
assert_equal(special.ellipkm1(-1), np.nan)
assert_allclose(special.ellipk(-10), 0.7908718902387385)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
assert_equal(special.ellipkinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipkinc(pi/2, 1.0), np.inf)
assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0)
assert_equal(special.ellipkinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipkinc(pi/2, 2), np.nan)
assert_equal(special.ellipkinc(0, 0.5), 0.0)
assert_equal(special.ellipkinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipkinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipkinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14)
assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946)
def test_ellipkinc_2(self):
# Regression test for gh-3550
# ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipkinc(phi, mvals)
assert_array_almost_equal_nulp(f, 1.0259330100195334 * np.ones_like(f), 1)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipkinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 5.1296650500976675 * np.ones_like(f1), 2)
def test_ellipkinc_singular(self):
# ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2)
xlog = np.logspace(-300, -17, 25)
xlin = np.linspace(1e-17, 0.1, 25)
xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False)
assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(np.pi/2, 1), np.inf)
assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf)
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
assert_equal(special.ellipe(0.0), pi/2)
assert_equal(special.ellipe(1.0), 1.0)
assert_equal(special.ellipe(-np.inf), np.inf)
assert_equal(special.ellipe(np.nan), np.nan)
assert_equal(special.ellipe(2), np.nan)
assert_allclose(special.ellipe(-10), 3.6391380384177689)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
assert_equal(special.ellipeinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipeinc(pi/2, 1.0), 1.0)
assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf)
assert_equal(special.ellipeinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipeinc(pi/2, 2), np.nan)
assert_equal(special.ellipeinc(0, 0.5), 0.0)
assert_equal(special.ellipeinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf)
assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf)
assert_equal(special.ellipeinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipeinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876)
def test_ellipeinc_2(self):
# Regression test for gh-3550
# ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipeinc(phi, mvals)
assert_array_almost_equal_nulp(f, 0.84442884574781019 * np.ones_like(f), 2)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipeinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 3.3471442287390509 * np.ones_like(f1), 4)
class TestErf(TestCase):
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
old_errors = np.seterr(all='ignore')
try:
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
finally:
np.seterr(**old_errors)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erfcinv(self):
i = special.erfcinv(1)
# Use assert_array_equal instead of assert_equal, so the comparsion
# of -0.0 and 0.0 doesn't fail.
assert_array_equal(i, 0)
def test_erfinv(self):
i = special.erfinv(0)
assert_equal(i,0)
def test_errprint(self):
a = special.errprint()
b = 1-a # a is the state 1-a inverts state
c = special.errprint(b) # returns last state 'a'
assert_equal(a,c)
d = special.errprint(a) # returns to original state
assert_equal(d,b) # makes sure state was returned
# assert_equal(d,1-a)
def test_erf_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -1, 1]
assert_allclose(special.erf(vals), expected, rtol=1e-15)
def test_erfc_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, 2, 0]
assert_allclose(special.erfc(vals), expected, rtol=1e-15)
def test_erfcx_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, np.inf, 0]
assert_allclose(special.erfcx(vals), expected, rtol=1e-15)
def test_erfi_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -np.inf, np.inf]
assert_allclose(special.erfi(vals), expected, rtol=1e-15)
def test_dawsn_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -0.0, 0.0]
assert_allclose(special.dawsn(vals), expected, rtol=1e-15)
def test_wofz_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan + np.nan * 1.j, 0.-0.j, 0.+0.j]
assert_allclose(special.wofz(vals), expected, rtol=1e-15)
class TestEuler(TestCase):
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_allclose(eu0, [1], rtol=1e-15)
assert_allclose(eu1, [1, 0], rtol=1e-15)
assert_allclose(eu2, [1, 0, -1], rtol=1e-15)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
olderr = np.seterr(all='ignore')
try:
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
finally:
np.seterr(**olderr)
assert_almost_equal(errmax, 0.0, 14)
class TestExp(TestCase):
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFactorialFunctions(TestCase):
def test_factorial(self):
# Some known values, float math
assert_array_almost_equal(special.factorial(0), 1)
assert_array_almost_equal(special.factorial(1), 1)
assert_array_almost_equal(special.factorial(2), 2)
assert_array_almost_equal([6., 24., 120.],
special.factorial([3, 4, 5], exact=False))
assert_array_almost_equal(special.factorial([[5, 3], [4, 3]]),
[[120, 6], [24, 6]])
# Some known values, integer math
assert_equal(special.factorial(0, exact=True), 1)
assert_equal(special.factorial(1, exact=True), 1)
assert_equal(special.factorial(2, exact=True), 2)
assert_equal(special.factorial(5, exact=True), 120)
assert_equal(special.factorial(15, exact=True), 1307674368000)
# ndarray shape is maintained
assert_equal(special.factorial([7, 4, 15, 10], exact=True),
[5040, 24, 1307674368000, 3628800])
assert_equal(special.factorial([[5, 3], [4, 3]], True),
[[120, 6], [24, 6]])
# object arrays
assert_equal(special.factorial(np.arange(-3, 22), True),
special.factorial(np.arange(-3, 22), False))
# int64 array
assert_equal(special.factorial(np.arange(-3, 15), True),
special.factorial(np.arange(-3, 15), False))
# int32 array
assert_equal(special.factorial(np.arange(-3, 5), True),
special.factorial(np.arange(-3, 5), False))
# Consistent output for n < 0
for exact in (True, False):
assert_array_equal(0, special.factorial(-3, exact))
assert_array_equal([1, 2, 0, 0],
special.factorial([1, 2, -5, -4], exact))
for n in range(0, 22):
# Compare all with math.factorial
correct = math.factorial(n)
assert_array_equal(correct, special.factorial(n, True))
assert_array_equal(correct, special.factorial([n], True)[0])
assert_allclose(float(correct), special.factorial(n, False))
assert_allclose(float(correct), special.factorial([n], False)[0])
# Compare exact=True vs False, scalar vs array
assert_array_equal(special.factorial(n, True),
special.factorial(n, False))
assert_array_equal(special.factorial([n], True),
special.factorial([n], False))
def test_factorial2(self):
assert_array_almost_equal([105., 384., 945.],
special.factorial2([7, 8, 9], exact=False))
assert_equal(special.factorial2(7, exact=True), 105)
def test_factorialk(self):
assert_equal(special.factorialk(5, 1, exact=True), 120)
assert_equal(special.factorialk(5, 3, exact=True), 10)
class TestFresnel(TestCase):
def test_fresnel(self):
frs = array(special.fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
def test_fresnel_inf1(self):
frs = special.fresnel(np.inf)
assert_equal(frs, (0.5, 0.5))
def test_fresnel_inf2(self):
frs = special.fresnel(-np.inf)
assert_equal(frs, (-0.5, -0.5))
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma(TestCase):
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainc(self):
gama = special.gammainc(.5,.5)
assert_almost_equal(gama,.7,1)
def test_gammaincnan(self):
gama = special.gammainc(-1,1)
assert_(isnan(gama))
def test_gammainczero(self):
# bad arg but zero integration limit
gama = special.gammainc(-1,0)
assert_equal(gama,0.0)
def test_gammaincinf(self):
gama = special.gammainc(0.5, np.inf)
assert_equal(gama,1.0)
def test_gammaincc(self):
gicc = special.gammaincc(.5,.5)
greal = 1 - special.gammainc(.5,.5)
assert_almost_equal(gicc,greal,8)
def test_gammainccnan(self):
gama = special.gammaincc(-1,1)
assert_(isnan(gama))
def test_gammainccinf(self):
gama = special.gammaincc(0.5,np.inf)
assert_equal(gama,0.0)
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_tol_equal(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel(TestCase):
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper(TestCase):
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp0f1_gh5764(self):
# Just checks the point that failed; there's a more systematic
# test in test_mpmath
res = special.hyp0f1(0.8, 0.5 + 0.5*1J)
# The expected value was generated using mpmath
assert_almost_equal(res, 1.6139719776441115 + 1J*0.80893054061790665)
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# http://projects.scipy.org/scipy/scipy/ticket/659
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f1_gh2957(self):
hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)
hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)
assert_almost_equal(hyp1, hyp2, 12)
def test_hyp1f1_gh2282(self):
hyp = special.hyp1f1(0.5, 1.5, -1000)
assert_almost_equal(hyp, 0.028024956081989643, 12)
def test_hyp1f2(self):
pass
def test_hyp2f0(self):
pass
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyp3f0(self):
pass
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel(TestCase):
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_tol_equal(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_tol_equal(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_tol_equal(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_tol_equal(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*np.random.random() - 1
b = 5*np.random.random() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_tol_equal(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
for v in [-120, -100.3, -20., -10., -1., -.5,
0., 1., 12.49, 120., 301]:
for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,
700.6, 1300, 10003]:
yield v, z
# check half-integers; these are problematic points at least
# for cephes/iv
for v in 0.5 + arange(-60, 60):
yield v, 3.5
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_tol_equal(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
olderr = np.seterr(all='ignore')
try:
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
finally:
np.seterr(**olderr)
@dec.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
old_err = np.seterr(all='ignore')
try:
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
finally:
np.seterr(**old_err)
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_tol_equal(special.jv(3, 4), 0.43017147387562193)
assert_tol_equal(special.jv(301, 1300), 0.0183487151115275)
assert_tol_equal(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_tol_equal(special.jv(-1, 1), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1), 0.43109886801837607952)
assert_tol_equal(special.yv(-0.5, 1), 0.6713967071418031)
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
assert_tol_equal(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_tol_equal(special.jv(-1, 1+0j), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1+0j), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1+0j), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1+0j), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1+0j), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1+0j), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1+0j), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1+0j), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_tol_equal(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_tol_equal(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_tol_equal(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_tol_equal(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_tol_equal(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_tol_equal(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_tol_equal(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_tol_equal(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_tol_equal(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_tol_equal(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_tol_equal(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_tol_equal(special.iv(1, 700), 1.528500390233901e302)
assert_tol_equal(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_tol_equal(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_tol_equal(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_tol_equal(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre(TestCase):
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*np.random.random() - 0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre(TestCase):
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c, [1])
assert_equal(leg1.c, [1,0])
assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)
assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0)
class TestLambda(TestCase):
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p(TestCase):
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions(TestCase):
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z, 3)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]])),
7)
def test_clpmn_close_to_real_2(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x),
special.lpmv(m, n, x)]),
7)
def test_clpmn_close_to_real_3(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),
7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
for type in [2, 3]:
assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],
special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
lp = special.lpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
def test_deriv_clpmn(self):
# data inside and outside of the unit circle
zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,
1+1j, -1+1j, -1-1j, 1-1j]
m = 2
n = 3
for type in [2, 3]:
for z in zvals:
for h in [1e-3, 1e-3j]:
approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]
- special.clpmn(m, n, z-0.5*h, type)[0])/h
assert_allclose(special.clpmn(m, n, z, type)[1],
approx_derivative,
rtol=1e-4)
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
olderr = np.seterr(all='ignore')
try:
lp = special.lpmv(-1,-1,.001)
finally:
np.seterr(**olderr)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_gt1(self):
"""algorithm for real arguments changes at 1.0001
test against analytical result for m=2, n=1
"""
x0 = 1.0001
delta = 0.00002
for x in (x0-delta, x0+delta):
lq = special.lqmn(2, 1, x)[0][-1, -1]
expected = 2/(x*x-1)
assert_almost_equal(lq, expected)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu(TestCase):
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
mc = special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
# same problem as above
pass
class TestFresnelIntegral(TestCase):
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq(TestCase):
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder(TestCase):
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
pbv = special.pbdv(1,.2)
derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_tol_equal(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_tol_equal(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_tol_equal(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma(TestCase):
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq(TestCase):
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi(TestCase):
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian(TestCase):
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati(TestCase):
def test_riccati_jn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
jnrl = (special.sph_jn(1,.2)[0]*.2,special.sph_jn(1,.2)[0]+special.sph_jn(1,.2)[1]*.2)
ricjn = special.riccati_jn(1,.2)
assert_array_almost_equal(ricjn,jnrl,8)
def test_riccati_yn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
ynrl = (special.sph_yn(1,.2)[0]*.2,special.sph_yn(1,.2)[0]+special.sph_yn(1,.2)[1]*.2)
ricyn = special.riccati_yn(1,.2)
assert_array_almost_equal(ricyn,ynrl,8)
class TestRound(TestCase):
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
yield (assert_array_almost_equal, sh(0,0,0,0),
0.5/sqrt(pi))
yield (assert_array_almost_equal, sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
yield (assert_array_almost_equal, sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
yield (assert_array_almost_equal, sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
yield (assert_array_almost_equal, sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
yield (assert_array_almost_equal, sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
def test_sph_harm_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt)
assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt)
class TestSpherical(TestCase):
def test_sph_harm(self):
# see test_sph_harm function
pass
def test_sph_in(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
i1n = special.sph_in(1,.2)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_inkn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
spikn = r_[special.sph_in(1,.2) + special.sph_kn(1,.2)]
inkn = r_[special.sph_inkn(1,.2)]
assert_array_almost_equal(inkn,spikn,10)
def test_sph_in_kn_order0(self):
x = 1.
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
sph_i0 = special.sph_in(0, x)
sph_i0_expected = np.array([np.sinh(x)/x,
np.cosh(x)/x-np.sinh(x)/x**2])
assert_array_almost_equal(r_[sph_i0], sph_i0_expected)
sph_k0 = special.sph_kn(0, x)
sph_k0_expected = np.array([0.5*pi*exp(-x)/x,
-0.5*pi*exp(-x)*(1/x+1/x**2)])
assert_array_almost_equal(r_[sph_k0], sph_k0_expected)
sph_i0k0 = special.sph_inkn(0, x)
assert_array_almost_equal(r_[sph_i0+sph_k0],
r_[sph_i0k0],
10)
def test_sph_jn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
s1 = special.sph_jn(2,.2)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_jnyn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
jnyn = r_[special.sph_jn(1,.2) + special.sph_yn(1,.2)] # tuple addition
jnyn1 = r_[special.sph_jnyn(1,.2)]
assert_array_almost_equal(jnyn1,jnyn,9)
def test_sph_kn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
kn = special.sph_kn(2,.2)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
sy1 = special.sph_yn(2,.2)[0][2]
sy2 = special.sph_yn(0,.2)[0][0]
sy3 = special.sph_yn(1,.2)[1][1]
sphpy = (special.sph_yn(1,.2)[0][0]-2*special.sph_yn(2,.2)[0][2])/3 # correct derivative value
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
class TestStruve(object):
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_tol_equal(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_tol_equal(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_tol_equal(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_tol_equal(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_tol_equal(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_tol_equal(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_tol_equal(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_tol_equal(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_ch2_inf():
assert_equal(special.chdtr(0.7,np.inf), 1.0)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
assert_allclose(special.agm(24, 6), 13.4581714817)
assert_allclose(special.agm(1e30, 1), 2.2292230559453832047768593e28)
def test_legacy():
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
# Legacy behavior: truncating arguments to integers
assert_equal(special.bdtrc(1, 2, 0.3), special.bdtrc(1.8, 2.8, 0.3))
assert_equal(special.bdtr(1, 2, 0.3), special.bdtr(1.8, 2.8, 0.3))
assert_equal(special.bdtri(1, 2, 0.3), special.bdtri(1.8, 2.8, 0.3))
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.hyp2f0(1, 2, 0.3, 1), special.hyp2f0(1, 2, 0.3, 1.8))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtrc(1, 0.3), special.pdtrc(1.8, 0.3))
assert_equal(special.pdtr(1, 0.3), special.pdtr(1.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionWarning, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
def test_entr():
def xfunc(x):
if x < 0:
return -np.inf
else:
return -special.xlogy(x, x)
values = (0, 0.5, 1.0, np.inf)
signs = [-1, 1]
arr = []
for sgn, v in itertools.product(signs, values):
arr.append(sgn * v)
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z)
assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13)
def test_kl_div():
def xfunc(x, y):
if x < 0 or y < 0 or (y == 0 and x != 0):
# extension of natural domain to preserve convexity
return np.inf
elif np.isposinf(x) or np.isposinf(y):
# limits within the natural domain
return np.inf
elif x == 0:
return y
else:
return special.xlogy(x, x/y) - x + y
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13)
def test_rel_entr():
def xfunc(x, y):
if x > 0 and y > 0:
return special.xlogy(x, x/y)
elif x == 0 and y >= 0:
return 0
else:
return np.inf
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13)
def test_huber():
assert_equal(special.huber(-1, 1.5), np.inf)
assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5))
assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2))
def xfunc(delta, r):
if delta < 0:
return np.inf
elif np.abs(r) < delta:
return 0.5 * np.square(r)
else:
return delta * (np.abs(r) - 0.5 * delta)
z = np.random.randn(10, 2)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13)
def test_pseudo_huber():
def xfunc(delta, r):
if delta < 0:
return np.inf
elif (not delta) or (not r):
return 0
else:
return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1)
z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]])
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13)
if __name__ == "__main__":
run_module_suite()
|
bkendzior/scipy
|
scipy/special/tests/test_basic.py
|
Python
|
bsd-3-clause
| 132,236
|
[
"Elk"
] |
f545f3f85b28c6c999fed8400e43f7026243ec0bb125f23789c31a431a1a3910
|
#!/usr/bin/env python
# This example reads a volume dataset, extracts two isosurfaces that
# represent the skin and bone, creates three orthogonal planes
# (sagittal, axial, coronal), and displays them.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the renderer, the render window, and the interactor. The
# renderer draws into the render window, the interactor enables mouse-
# and keyboard-based interaction with the scene.
aRenderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(aRenderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# The following reader is used to read a series of 2D slices (images)
# that compose the volume. The slice dimensions are set, and the
# pixel spacing. The data Endianness must also be specified. The reader
# usese the FilePrefix in combination with the slice number to construct
# filenames using the format FilePrefix.%d. (In this case the FilePrefix
# is the root name of the file: quarter.)
v16 = vtk.vtkVolume16Reader()
v16.SetDataDimensions(64, 64)
v16.SetDataByteOrderToLittleEndian()
v16.SetFilePrefix(VTK_DATA_ROOT + "/Data/headsq/quarter")
v16.SetImageRange(1, 93)
v16.SetDataSpacing(3.2, 3.2, 1.5)
# An isosurface, or contour value of 500 is known to correspond to the
# skin of the patient. Once generated, a vtkPolyDataNormals filter is
# is used to create normals for smooth surface shading during rendering.
# The triangle stripper is used to create triangle strips from the
# isosurface these render much faster on may systems.
skinExtractor = vtk.vtkContourFilter()
skinExtractor.SetInputConnection(v16.GetOutputPort())
skinExtractor.SetValue(0, 500)
skinNormals = vtk.vtkPolyDataNormals()
skinNormals.SetInputConnection(skinExtractor.GetOutputPort())
skinNormals.SetFeatureAngle(60.0)
skinStripper = vtk.vtkStripper()
skinStripper.SetInputConnection(skinNormals.GetOutputPort())
skinMapper = vtk.vtkPolyDataMapper()
skinMapper.SetInputConnection(skinStripper.GetOutputPort())
skinMapper.ScalarVisibilityOff()
skin = vtk.vtkActor()
skin.SetMapper(skinMapper)
skin.GetProperty().SetDiffuseColor(1, .49, .25)
skin.GetProperty().SetSpecular(.3)
skin.GetProperty().SetSpecularPower(20)
# An isosurface, or contour value of 1150 is known to correspond to the
# skin of the patient. Once generated, a vtkPolyDataNormals filter is
# is used to create normals for smooth surface shading during rendering.
# The triangle stripper is used to create triangle strips from the
# isosurface these render much faster on may systems.
boneExtractor = vtk.vtkContourFilter()
boneExtractor.SetInputConnection(v16.GetOutputPort())
boneExtractor.SetValue(0, 1150)
boneNormals = vtk.vtkPolyDataNormals()
boneNormals.SetInputConnection(boneExtractor.GetOutputPort())
boneNormals.SetFeatureAngle(60.0)
boneStripper = vtk.vtkStripper()
boneStripper.SetInputConnection(boneNormals.GetOutputPort())
boneMapper = vtk.vtkPolyDataMapper()
boneMapper.SetInputConnection(boneStripper.GetOutputPort())
boneMapper.ScalarVisibilityOff()
bone = vtk.vtkActor()
bone.SetMapper(boneMapper)
bone.GetProperty().SetDiffuseColor(1, 1, .9412)
# An outline provides context around the data.
outlineData = vtk.vtkOutlineFilter()
outlineData.SetInputConnection(v16.GetOutputPort())
mapOutline = vtk.vtkPolyDataMapper()
mapOutline.SetInputConnection(outlineData.GetOutputPort())
outline = vtk.vtkActor()
outline.SetMapper(mapOutline)
outline.GetProperty().SetColor(0, 0, 0)
# Now we are creating three orthogonal planes passing through the
# volume. Each plane uses a different texture map and therefore has
# diferent coloration.
# Start by creatin a black/white lookup table.
bwLut = vtk.vtkLookupTable()
bwLut.SetTableRange(0, 2000)
bwLut.SetSaturationRange(0, 0)
bwLut.SetHueRange(0, 0)
bwLut.SetValueRange(0, 1)
bwLut.Build()
# Now create a lookup table that consists of the full hue circle (from
# HSV).
hueLut = vtk.vtkLookupTable()
hueLut.SetTableRange(0, 2000)
hueLut.SetHueRange(0, 1)
hueLut.SetSaturationRange(1, 1)
hueLut.SetValueRange(1, 1)
hueLut.Build()
# Finally, create a lookup table with a single hue but having a range
# in the saturation of the hue.
satLut = vtk.vtkLookupTable()
satLut.SetTableRange(0, 2000)
satLut.SetHueRange(.6, .6)
satLut.SetSaturationRange(0, 1)
satLut.SetValueRange(1, 1)
satLut.Build()
# Create the first of the three planes. The filter vtkImageMapToColors
# maps the data through the corresponding lookup table created above.
# The vtkImageActor is a type of vtkProp and conveniently displays an
# image on a single quadrilateral plane. It does this using texture
# mapping and as a result is quite fast. (Note: the input image has to
# be unsigned char values, which the vtkImageMapToColors produces.)
# Note also that by specifying the DisplayExtent, the pipeline
# requests data of this extent and the vtkImageMapToColors only
# processes a slice of data.
sagittalColors = vtk.vtkImageMapToColors()
sagittalColors.SetInputConnection(v16.GetOutputPort())
sagittalColors.SetLookupTable(bwLut)
sagittal = vtk.vtkImageActor()
sagittal.GetMapper().SetInputConnection(sagittalColors.GetOutputPort())
sagittal.SetDisplayExtent(32, 32, 0, 63, 0, 92)
# Create the second (axial) plane of the three planes. We use the same
# approach as before except that the extent differs.
axialColors = vtk.vtkImageMapToColors()
axialColors.SetInputConnection(v16.GetOutputPort())
axialColors.SetLookupTable(hueLut)
axial = vtk.vtkImageActor()
axial.GetMapper().SetInputConnection(axialColors.GetOutputPort())
axial.SetDisplayExtent(0, 63, 0, 63, 46, 46)
# Create the third (coronal) plane of the three planes. We use the same
# approach as before except that the extent differs.
coronalColors = vtk.vtkImageMapToColors()
coronalColors.SetInputConnection(v16.GetOutputPort())
coronalColors.SetLookupTable(satLut)
coronal = vtk.vtkImageActor()
coronal.GetMapper().SetInputConnection(coronalColors.GetOutputPort())
coronal.SetDisplayExtent(0, 63, 32, 32, 0, 92)
# It is convenient to create an initial view of the data. The FocalPoint
# and Position form a vector direction. Later on (ResetCamera() method)
# this vector is used to position the camera to look at the data in
# this direction.
aCamera = vtk.vtkCamera()
aCamera.SetViewUp(0, 0, -1)
aCamera.SetPosition(0, 1, 0)
aCamera.SetFocalPoint(0, 0, 0)
aCamera.ComputeViewPlaneNormal()
# Actors are added to the renderer.
aRenderer.AddActor(outline)
aRenderer.AddActor(sagittal)
aRenderer.AddActor(axial)
aRenderer.AddActor(coronal)
#aRenderer.AddActor(axial)
#aRenderer.AddActor(coronal)
aRenderer.AddActor(skin)
aRenderer.AddActor(bone)
# Turn off bone for this example.
bone.VisibilityOff()
# Set skin to semi-transparent.
skin.GetProperty().SetOpacity(0.5)
# An initial camera view is created. The Dolly() method moves
# the camera towards the FocalPoint, thereby enlarging the image.
aRenderer.SetActiveCamera(aCamera)
aRenderer.ResetCamera()
aCamera.Dolly(1.5)
# Set a background color for the renderer and set the size of the
# render window (expressed in pixels).
aRenderer.SetBackground(1, 1, 1)
renWin.SetSize(640, 480)
# Note that when camera movement occurs (as it does in the Dolly()
# method), the clipping planes often need adjusting. Clipping planes
# consist of two planes: near and far along the view direction. The
# near plane clips out objects in front of the plane the far plane
# clips out objects behind the plane. This way only what is drawn
# between the planes is actually rendered.
aRenderer.ResetCameraClippingRange()
# Interact with the data.
iren.Initialize()
renWin.Render()
iren.Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Examples/Medical/Python/Medical3.py
|
Python
|
gpl-3.0
| 7,624
|
[
"VTK"
] |
1179528a7d47a9b535e8b88a0fb1d7adeed92f3cab85a0091e23657b7ff0463e
|
#coding:utf8
'''
Created on 2013-8-7
@author: lan (www.9miao.com)
'''
from twisted.web import resource
from twisted.internet import reactor
from firefly.server.globalobject import GlobalObject
root = GlobalObject().webroot
reactor = reactor
def ErrorBack(reason):
pass
def masterwebHandle(cls):
'''
'''
root.putChild(cls.__name__, cls())
@masterwebHandle
class stop(resource.Resource):
'''stop service'''
def render(self, request):
'''
'''
for child in GlobalObject().root.childsmanager._childs.values():
d = child.callbackChild('serverStop')
d.addCallback(ErrorBack)
reactor.callLater(0.5,reactor.stop)
return "stop"
@masterwebHandle
class reloadmodule(resource.Resource):
'''reload module'''
def render(self, request):
'''
'''
for child in GlobalObject().root.childsmanager._childs.values():
d = child.callbackChild('sreload')
d.addCallback(ErrorBack)
return "reload"
|
yangdw/repo.python
|
src/annotation/Firefly/firefly/master/webapp.py
|
Python
|
mit
| 1,040
|
[
"Firefly"
] |
2c5acdcb594cc2e50f5c6be9b0be623a109508cac418604bd15f3ec7113c7204
|
#!/usr/bin/python
# Copyright 2012 Dorival de Moraes Pedroso. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import subprocess
def Cmd(command, verbose=False, debug=False):
if debug:
print '=================================================='
print cmd
print '=================================================='
spr = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = spr.stdout.read()
err = spr.stderr.read().strip()
if verbose:
print out
print err
return out, err
pkgs = [
("chk", "Checking numerical calculations and testing"),
("io", "Input and output of files and printing and parsing strings"),
("utl", "Utilities such as IntRange and LinSpace and allocators"),
("mpi", "A lightweight wrapper to MPI"),
("la", "Linear algebra routines"),
("plt", "Plotting routines (wrapping matplotlib)"),
("fdm", "A simple finite differences solver"),
("num", "A few numerical methods"),
("fun", "Functions such as y=f(t,x)"),
("ode", "Solvers for ordinary differential equations"),
("opt", "Optimisation algorithms"),
("gm", "Geometry routines"),
("rnd", "Random numbers generator"),
("tsr", "Tensor algebra and calculus"),
("vtk", "3D visualisation with VTK"),
("graph", "graph representation using nodes and links"),
]
odir = 'doc/'
idxfn = odir+'index.html'
licen = open('LICENSE', 'r').read()
def header(title):
return """<html>
<head>
<meta http-equiv=\\"Content-Type\\" content=\\"text/html; charset=utf-8\\">
<title>%s</title>
<link type=\\"text/css\\" rel=\\"stylesheet\\" href=\\"static/style.css\\">
<script type=\\"text/javascript\\" src=\\"static/godocs.js\\"></script>
<style type=\\"text/css\\"></style>
</head>
<body>
<div id=\\"page\\">""" % title
def footer():
return """</div><!-- page -->
<div id=\\"footer\\">
<br /><br />
<hr>
<pre class=\\"copyright\\">
%s</pre><!-- copyright -->
</div><!-- footer -->
</body>
</html>""" % licen
def pkgheader(pkg):
return header('Gosl – package '+pkg[0]) + '<h1>Gosl – <b>%s</b> – %s</h1>' % (pkg[0], pkg[1])
def pkgitem(pkg):
return '<dd><a href=\\"xx%s.html\\"><b>%s</b>: %s</a></dd>' % (pkg[0], pkg[0], pkg[1])
Cmd('echo "'+header('Gosl – Documentation')+'" > '+idxfn)
Cmd('echo "<h1>Gosl – Documentation</h1>" >> '+idxfn)
Cmd('echo "<h2 id=\\"pkg-index\\">Index</h2>\n<div id=\\"manual-nav\\">\n<dl>" >> '+idxfn)
for pkg in pkgs:
fn = odir+'xx'+pkg[0]+'.html'
Cmd('echo "'+pkgheader(pkg)+'" > '+fn)
Cmd('godoc -html github.com/cpmech/gosl/'+pkg[0]+' >> '+fn)
Cmd('echo "'+footer()+'" >> '+fn)
Cmd('echo "'+pkgitem(pkg)+'" >> '+idxfn)
# fix links
Cmd("sed -i -e 's@/src/target@https://github.com/cpmech/gosl/blob/master/"+pkg[0]+"@g' "+fn+"")
Cmd('echo "</dl>\n</div><!-- manual-nav -->" >> '+idxfn)
Cmd('echo "'+footer()+'" >> '+idxfn)
|
PaddySchmidt/gosl
|
xgendoc.py
|
Python
|
bsd-3-clause
| 3,025
|
[
"VTK"
] |
eff20c50c5c304ff840897e5f3c0600e14ebbeddb202e685aea8a76b7cf513e3
|
r"""*Core package definition module for* ``tempvars``.
Context manager for handling temporary variables in
Jupyter Notebook, IPython, etc.
**Author**
Brian Skinn (bskinn@alum.mit.edu)
**File Created**
10 Sep 2017
**Copyright**
\(c) Brian Skinn 2017-2018
**Source Repository**
http://www.github.com/bskinn/tempvars
**Documentation**
http://tempvars.readthedocs.io
**License**
The MIT License; see |license_txt|_ for full license terms
"""
from __future__ import absolute_import
__all__ = ["TempVars"]
from .tempvars import TempVars
__version__ = "1.0.1"
|
bskinn/tempvars
|
tempvars/__init__.py
|
Python
|
mit
| 589
|
[
"Brian"
] |
ccfc9a8a3e37e0c3873474e31ce7aacbc56a076d7e9d4dbab093c1cae62f82c2
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import json
from keystoneauth1 import adapter
import mock
import munch
import requests
import six
from openstack import exceptions
from openstack import format
from openstack import resource
from openstack.tests.unit import base
class FakeResponse(object):
def __init__(self, response, status_code=200, headers=None):
self.body = response
self.status_code = status_code
headers = headers if headers else {'content-type': 'application/json'}
self.headers = requests.structures.CaseInsensitiveDict(headers)
def json(self):
return self.body
class TestComponent(base.TestCase):
class ExampleComponent(resource._BaseComponent):
key = "_example"
# Since we're testing ExampleComponent, which is as isolated as we
# can test _BaseComponent due to it's needing to be a data member
# of a class that has an attribute on the parent class named `key`,
# each test has to implement a class with a name that is the same
# as ExampleComponent.key, which should be a dict containing the
# keys and values to test against.
def test_implementations(self):
self.assertEqual("_body", resource.Body.key)
self.assertEqual("_header", resource.Header.key)
self.assertEqual("_uri", resource.URI.key)
def test_creation(self):
sot = resource._BaseComponent(
"name", type=int, default=1, alternate_id=True, aka="alias")
self.assertEqual("name", sot.name)
self.assertEqual(int, sot.type)
self.assertEqual(1, sot.default)
self.assertEqual("alias", sot.aka)
self.assertTrue(sot.alternate_id)
def test_get_no_instance(self):
sot = resource._BaseComponent("test")
# Test that we short-circuit everything when given no instance.
result = sot.__get__(None, None)
self.assertIs(sot, result)
# NOTE: Some tests will use a default=1 setting when testing result
# values that should be None because the default-for-default is also None.
def test_get_name_None(self):
name = "name"
class Parent(object):
_example = {name: None}
instance = Parent()
sot = TestComponent.ExampleComponent(name, default=1)
# Test that we short-circuit any typing of a None value.
result = sot.__get__(instance, None)
self.assertIsNone(result)
def test_get_default(self):
expected_result = 123
class Parent(object):
_example = {}
instance = Parent()
# NOTE: type=dict but the default value is an int. If we didn't
# short-circuit the typing part of __get__ it would fail.
sot = TestComponent.ExampleComponent("name", type=dict,
default=expected_result)
# Test that we directly return any default value.
result = sot.__get__(instance, None)
self.assertEqual(expected_result, result)
def test_get_name_untyped(self):
name = "name"
expected_result = 123
class Parent(object):
_example = {name: expected_result}
instance = Parent()
sot = TestComponent.ExampleComponent("name")
# Test that we return any the value as it is set.
result = sot.__get__(instance, None)
self.assertEqual(expected_result, result)
# The code path for typing after a raw value has been found is the same.
def test_get_name_typed(self):
name = "name"
value = "123"
class Parent(object):
_example = {name: value}
instance = Parent()
sot = TestComponent.ExampleComponent("name", type=int)
# Test that we run the underlying value through type conversion.
result = sot.__get__(instance, None)
self.assertEqual(int(value), result)
def test_get_name_formatter(self):
name = "name"
value = "123"
expected_result = "one hundred twenty three"
class Parent(object):
_example = {name: value}
class FakeFormatter(format.Formatter):
@classmethod
def deserialize(cls, value):
return expected_result
instance = Parent()
sot = TestComponent.ExampleComponent("name", type=FakeFormatter)
# Mock out issubclass rather than having an actual format.Formatter
# This can't be mocked via decorator, isolate it to wrapping the call.
result = sot.__get__(instance, None)
self.assertEqual(expected_result, result)
def test_set_name_untyped(self):
name = "name"
expected_value = "123"
class Parent(object):
_example = {}
instance = Parent()
sot = TestComponent.ExampleComponent("name")
# Test that we don't run the value through type conversion.
sot.__set__(instance, expected_value)
self.assertEqual(expected_value, instance._example[name])
def test_set_name_typed(self):
expected_value = "123"
class Parent(object):
_example = {}
instance = Parent()
# The type we give to ExampleComponent has to be an actual type,
# not an instance, so we can't get the niceties of a mock.Mock
# instance that would allow us to call `assert_called_once_with` to
# ensure that we're sending the value through the type.
# Instead, we use this tiny version of a similar thing.
class FakeType(object):
calls = []
def __init__(self, arg):
FakeType.calls.append(arg)
sot = TestComponent.ExampleComponent("name", type=FakeType)
# Test that we run the value through type conversion.
sot.__set__(instance, expected_value)
self.assertEqual([expected_value], FakeType.calls)
def test_set_name_formatter(self):
expected_value = "123"
class Parent(object):
_example = {}
instance = Parent()
# As with test_set_name_typed, create a pseudo-Mock to track what
# gets called on the type.
class FakeFormatter(format.Formatter):
calls = []
@classmethod
def serialize(cls, arg):
FakeFormatter.calls.append(arg)
@classmethod
def deserialize(cls, arg):
FakeFormatter.calls.append(arg)
sot = TestComponent.ExampleComponent("name", type=FakeFormatter)
# Test that we run the value through type conversion.
sot.__set__(instance, expected_value)
self.assertEqual([expected_value], FakeFormatter.calls)
def test_delete_name(self):
name = "name"
expected_value = "123"
class Parent(object):
_example = {name: expected_value}
instance = Parent()
sot = TestComponent.ExampleComponent("name")
sot.__delete__(instance)
self.assertNotIn(name, instance._example)
def test_delete_name_doesnt_exist(self):
name = "name"
expected_value = "123"
class Parent(object):
_example = {"what": expected_value}
instance = Parent()
sot = TestComponent.ExampleComponent(name)
sot.__delete__(instance)
self.assertNotIn(name, instance._example)
class TestComponentManager(base.TestCase):
def test_create_basic(self):
sot = resource._ComponentManager()
self.assertEqual(dict(), sot.attributes)
self.assertEqual(set(), sot._dirty)
def test_create_unsynced(self):
attrs = {"hey": 1, "hi": 2, "hello": 3}
sync = False
sot = resource._ComponentManager(attributes=attrs, synchronized=sync)
self.assertEqual(attrs, sot.attributes)
self.assertEqual(set(attrs.keys()), sot._dirty)
def test_create_synced(self):
attrs = {"hey": 1, "hi": 2, "hello": 3}
sync = True
sot = resource._ComponentManager(attributes=attrs, synchronized=sync)
self.assertEqual(attrs, sot.attributes)
self.assertEqual(set(), sot._dirty)
def test_getitem(self):
key = "key"
value = "value"
attrs = {key: value}
sot = resource._ComponentManager(attributes=attrs)
self.assertEqual(value, sot.__getitem__(key))
def test_setitem_new(self):
key = "key"
value = "value"
sot = resource._ComponentManager()
sot.__setitem__(key, value)
self.assertIn(key, sot.attributes)
self.assertIn(key, sot.dirty)
def test_setitem_unchanged(self):
key = "key"
value = "value"
attrs = {key: value}
sot = resource._ComponentManager(attributes=attrs, synchronized=True)
# This shouldn't end up in the dirty list since we're just re-setting.
sot.__setitem__(key, value)
self.assertEqual(value, sot.attributes[key])
self.assertNotIn(key, sot.dirty)
def test_delitem(self):
key = "key"
value = "value"
attrs = {key: value}
sot = resource._ComponentManager(attributes=attrs, synchronized=True)
sot.__delitem__(key)
self.assertIsNone(sot.dirty[key])
def test_iter(self):
attrs = {"key": "value"}
sot = resource._ComponentManager(attributes=attrs)
self.assertItemsEqual(iter(attrs), sot.__iter__())
def test_len(self):
attrs = {"key": "value"}
sot = resource._ComponentManager(attributes=attrs)
self.assertEqual(len(attrs), sot.__len__())
def test_dirty(self):
key = "key"
key2 = "key2"
value = "value"
attrs = {key: value}
sot = resource._ComponentManager(attributes=attrs, synchronized=False)
self.assertEqual({key: value}, sot.dirty)
sot.__setitem__(key2, value)
self.assertEqual({key: value, key2: value}, sot.dirty)
def test_clean(self):
key = "key"
value = "value"
attrs = {key: value}
sot = resource._ComponentManager(attributes=attrs, synchronized=False)
self.assertEqual(attrs, sot.dirty)
sot.clean()
self.assertEqual(dict(), sot.dirty)
class Test_Request(base.TestCase):
def test_create(self):
uri = 1
body = 2
headers = 3
sot = resource._Request(uri, body, headers)
self.assertEqual(uri, sot.url)
self.assertEqual(body, sot.body)
self.assertEqual(headers, sot.headers)
class TestQueryParameters(base.TestCase):
def test_create(self):
location = "location"
mapping = {"first_name": "first-name",
"second_name": {"name": "second-name"},
"third_name": {"name": "third", "type": int}}
sot = resource.QueryParameters(location, **mapping)
self.assertEqual({"location": "location",
"first_name": "first-name",
"second_name": {"name": "second-name"},
"third_name": {"name": "third", "type": int},
"limit": "limit",
"marker": "marker"},
sot._mapping)
def test_transpose_unmapped(self):
def _type(value, rtype):
self.assertIs(rtype, mock.sentinel.resource_type)
return value * 10
location = "location"
mapping = {"first_name": "first-name",
"pet_name": {"name": "pet"},
"answer": {"name": "answer", "type": int},
"complex": {"type": _type}}
sot = resource.QueryParameters(location, **mapping)
result = sot._transpose({"location": "Brooklyn",
"first_name": "Brian",
"pet_name": "Meow",
"answer": "42",
"last_name": "Curtin",
"complex": 1},
mock.sentinel.resource_type)
# last_name isn't mapped and shouldn't be included
self.assertEqual({"location": "Brooklyn", "first-name": "Brian",
"pet": "Meow", "answer": 42, "complex": 10},
result)
def test_transpose_not_in_query(self):
location = "location"
mapping = {"first_name": "first-name",
"pet_name": {"name": "pet"},
"answer": {"name": "answer", "type": int}}
sot = resource.QueryParameters(location, **mapping)
result = sot._transpose({"location": "Brooklyn"},
mock.sentinel.resource_type)
# first_name not being in the query shouldn't affect results
self.assertEqual({"location": "Brooklyn"},
result)
class TestResource(base.TestCase):
def test_initialize_basic(self):
body = {"body": 1}
header = {"header": 2, "Location": "somewhere"}
uri = {"uri": 3}
computed = {"computed": 4}
everything = dict(itertools.chain(
body.items(),
header.items(),
uri.items(),
computed.items(),
))
mock_collect = mock.Mock()
mock_collect.return_value = body, header, uri, computed
with mock.patch.object(resource.Resource,
"_collect_attrs", mock_collect):
sot = resource.Resource(_synchronized=False, **everything)
mock_collect.assert_called_once_with(everything)
self.assertIsNone(sot.location)
self.assertIsInstance(sot._body, resource._ComponentManager)
self.assertEqual(body, sot._body.dirty)
self.assertIsInstance(sot._header, resource._ComponentManager)
self.assertEqual(header, sot._header.dirty)
self.assertIsInstance(sot._uri, resource._ComponentManager)
self.assertEqual(uri, sot._uri.dirty)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_fetch)
self.assertFalse(sot.allow_commit)
self.assertFalse(sot.allow_delete)
self.assertFalse(sot.allow_list)
self.assertFalse(sot.allow_head)
self.assertEqual('PUT', sot.commit_method)
self.assertEqual('POST', sot.create_method)
def test_repr(self):
a = {"a": 1}
b = {"b": 2}
c = {"c": 3}
d = {"d": 4}
class Test(resource.Resource):
def __init__(self):
self._body = mock.Mock()
self._body.attributes.items = mock.Mock(
return_value=a.items())
self._header = mock.Mock()
self._header.attributes.items = mock.Mock(
return_value=b.items())
self._uri = mock.Mock()
self._uri.attributes.items = mock.Mock(
return_value=c.items())
self._computed = mock.Mock()
self._computed.attributes.items = mock.Mock(
return_value=d.items())
the_repr = repr(Test())
# Don't test the arguments all together since the dictionary order
# they're rendered in can't be depended on, nor does it matter.
self.assertIn("openstack.tests.unit.test_resource.Test", the_repr)
self.assertIn("a=1", the_repr)
self.assertIn("b=2", the_repr)
self.assertIn("c=3", the_repr)
self.assertIn("d=4", the_repr)
def test_equality(self):
class Example(resource.Resource):
x = resource.Body("x")
y = resource.Header("y")
z = resource.URI("z")
e1 = Example(x=1, y=2, z=3)
e2 = Example(x=1, y=2, z=3)
e3 = Example(x=0, y=0, z=0)
self.assertEqual(e1, e2)
self.assertNotEqual(e1, e3)
self.assertNotEqual(e1, None)
def test__update(self):
sot = resource.Resource()
body = "body"
header = "header"
uri = "uri"
computed = "computed"
sot._collect_attrs = mock.Mock(
return_value=(body, header, uri, computed))
sot._body.update = mock.Mock()
sot._header.update = mock.Mock()
sot._uri.update = mock.Mock()
sot._computed.update = mock.Mock()
args = {"arg": 1}
sot._update(**args)
sot._collect_attrs.assert_called_once_with(args)
sot._body.update.assert_called_once_with(body)
sot._header.update.assert_called_once_with(header)
sot._uri.update.assert_called_once_with(uri)
sot._computed.update.assert_called_with(computed)
def test__consume_attrs(self):
serverside_key1 = "someKey1"
clientside_key1 = "some_key1"
serverside_key2 = "someKey2"
clientside_key2 = "some_key2"
value1 = "value1"
value2 = "value2"
mapping = {serverside_key1: clientside_key1,
serverside_key2: clientside_key2}
other_key = "otherKey"
other_value = "other"
attrs = {clientside_key1: value1,
serverside_key2: value2,
other_key: other_value}
sot = resource.Resource()
result = sot._consume_attrs(mapping, attrs)
# Make sure that the expected key was consumed and we're only
# left with the other stuff.
self.assertDictEqual({other_key: other_value}, attrs)
# Make sure that after we've popped our relevant client-side
# key off that we are returning it keyed off of its server-side
# name.
self.assertDictEqual({serverside_key1: value1,
serverside_key2: value2}, result)
def test__mapping_defaults(self):
# Check that even on an empty class, we get the expected
# built-in attributes.
self.assertIn("location", resource.Resource._computed_mapping())
self.assertIn("name", resource.Resource._body_mapping())
self.assertIn("id", resource.Resource._body_mapping())
def test__mapping_overrides(self):
# Iterating through the MRO used to wipe out overrides of mappings
# found in base classes.
new_name = "MyName"
new_id = "MyID"
class Test(resource.Resource):
name = resource.Body(new_name)
id = resource.Body(new_id)
mapping = Test._body_mapping()
self.assertEqual("name", mapping["MyName"])
self.assertEqual("id", mapping["MyID"])
def test__body_mapping(self):
class Test(resource.Resource):
x = resource.Body("x")
y = resource.Body("y")
z = resource.Body("z")
self.assertIn("x", Test._body_mapping())
self.assertIn("y", Test._body_mapping())
self.assertIn("z", Test._body_mapping())
def test__header_mapping(self):
class Test(resource.Resource):
x = resource.Header("x")
y = resource.Header("y")
z = resource.Header("z")
self.assertIn("x", Test._header_mapping())
self.assertIn("y", Test._header_mapping())
self.assertIn("z", Test._header_mapping())
def test__uri_mapping(self):
class Test(resource.Resource):
x = resource.URI("x")
y = resource.URI("y")
z = resource.URI("z")
self.assertIn("x", Test._uri_mapping())
self.assertIn("y", Test._uri_mapping())
self.assertIn("z", Test._uri_mapping())
def test__getattribute__id_in_body(self):
id = "lol"
sot = resource.Resource(id=id)
result = getattr(sot, "id")
self.assertEqual(result, id)
def test__getattribute__id_with_alternate(self):
id = "lol"
class Test(resource.Resource):
blah = resource.Body("blah", alternate_id=True)
sot = Test(blah=id)
result = getattr(sot, "id")
self.assertEqual(result, id)
def test__getattribute__id_without_alternate(self):
class Test(resource.Resource):
id = None
sot = Test()
self.assertIsNone(sot.id)
def test__alternate_id_None(self):
self.assertEqual("", resource.Resource._alternate_id())
def test__alternate_id(self):
class Test(resource.Resource):
alt = resource.Body("the_alt", alternate_id=True)
self.assertEqual("the_alt", Test._alternate_id())
value1 = "lol"
sot = Test(alt=value1)
self.assertEqual(sot.alt, value1)
self.assertEqual(sot.id, value1)
value2 = "rofl"
sot = Test(the_alt=value2)
self.assertEqual(sot.alt, value2)
self.assertEqual(sot.id, value2)
def test__alternate_id_from_other_property(self):
class Test(resource.Resource):
foo = resource.Body("foo")
bar = resource.Body("bar", alternate_id=True)
# NOTE(redrobot): My expectation looking at the Test class defined
# in this test is that because the alternate_id parameter is
# is being set to True on the "bar" property of the Test class,
# then the _alternate_id() method should return the name of that "bar"
# property.
self.assertEqual("bar", Test._alternate_id())
sot = Test(bar='bunnies')
self.assertEqual(sot.id, 'bunnies')
self.assertEqual(sot.bar, 'bunnies')
sot = Test(id='chickens', bar='bunnies')
self.assertEqual(sot.id, 'chickens')
self.assertEqual(sot.bar, 'bunnies')
def test__get_id_instance(self):
class Test(resource.Resource):
id = resource.Body("id")
value = "id"
sot = Test(id=value)
self.assertEqual(value, sot._get_id(sot))
def test__get_id_instance_alternate(self):
class Test(resource.Resource):
attr = resource.Body("attr", alternate_id=True)
value = "id"
sot = Test(attr=value)
self.assertEqual(value, sot._get_id(sot))
def test__get_id_value(self):
value = "id"
self.assertEqual(value, resource.Resource._get_id(value))
def test__attributes(self):
class Test(resource.Resource):
foo = resource.Header('foo')
bar = resource.Body('bar', aka='_bar')
bar_local = resource.Body('bar_remote')
sot = Test()
self.assertEqual(
sorted(['foo', 'bar', '_bar', 'bar_local',
'id', 'name', 'location']),
sorted(sot._attributes())
)
self.assertEqual(
sorted(['foo', 'bar', 'bar_local', 'id', 'name', 'location']),
sorted(sot._attributes(include_aliases=False))
)
self.assertEqual(
sorted(['foo', 'bar', '_bar', 'bar_remote',
'id', 'name', 'location']),
sorted(sot._attributes(remote_names=True))
)
self.assertEqual(
sorted(['bar', '_bar', 'bar_local', 'id', 'name', 'location']),
sorted(sot._attributes(
components=tuple([resource.Body, resource.Computed])))
)
self.assertEqual(
('foo',),
tuple(sot._attributes(components=tuple([resource.Header])))
)
def test__attributes_iterator(self):
class Parent(resource.Resource):
foo = resource.Header('foo')
bar = resource.Body('bar', aka='_bar')
class Child(Parent):
foo1 = resource.Header('foo1')
bar1 = resource.Body('bar1')
sot = Child()
expected = ['foo', 'bar', 'foo1', 'bar1']
for attr, component in sot._attributes_iterator():
if attr in expected:
expected.remove(attr)
self.assertEqual([], expected)
expected = ['foo', 'foo1']
# Check we iterate only over headers
for attr, component in sot._attributes_iterator(
components=tuple([resource.Header])):
if attr in expected:
expected.remove(attr)
self.assertEqual([], expected)
def test_to_dict(self):
class Test(resource.Resource):
foo = resource.Header('foo')
bar = resource.Body('bar', aka='_bar')
res = Test(id='FAKE_ID')
expected = {
'id': 'FAKE_ID',
'name': None,
'location': None,
'foo': None,
'bar': None,
'_bar': None
}
self.assertEqual(expected, res.to_dict())
def test_to_dict_nested(self):
class Test(resource.Resource):
foo = resource.Header('foo')
bar = resource.Body('bar')
a_list = resource.Body('a_list')
class Sub(resource.Resource):
sub = resource.Body('foo')
sub = Sub(id='ANOTHER_ID', foo='bar')
res = Test(
id='FAKE_ID',
bar=sub,
a_list=[sub])
expected = {
'id': 'FAKE_ID',
'name': None,
'location': None,
'foo': None,
'bar': {
'id': 'ANOTHER_ID',
'name': None,
'sub': 'bar',
'location': None,
},
'a_list': [{
'id': 'ANOTHER_ID',
'name': None,
'sub': 'bar',
'location': None,
}],
}
self.assertEqual(expected, res.to_dict())
a_munch = res.to_dict(_to_munch=True)
self.assertEqual(a_munch.bar.id, 'ANOTHER_ID')
self.assertEqual(a_munch.bar.sub, 'bar')
self.assertEqual(a_munch.a_list[0].id, 'ANOTHER_ID')
self.assertEqual(a_munch.a_list[0].sub, 'bar')
def test_to_dict_no_body(self):
class Test(resource.Resource):
foo = resource.Header('foo')
bar = resource.Body('bar')
res = Test(id='FAKE_ID')
expected = {
'location': None,
'foo': None,
}
self.assertEqual(expected, res.to_dict(body=False))
def test_to_dict_no_header(self):
class Test(resource.Resource):
foo = resource.Header('foo')
bar = resource.Body('bar')
res = Test(id='FAKE_ID')
expected = {
'id': 'FAKE_ID',
'name': None,
'bar': None,
'location': None,
}
self.assertEqual(expected, res.to_dict(headers=False))
def test_to_dict_ignore_none(self):
class Test(resource.Resource):
foo = resource.Header('foo')
bar = resource.Body('bar')
res = Test(id='FAKE_ID', bar='BAR')
expected = {
'id': 'FAKE_ID',
'bar': 'BAR',
}
self.assertEqual(expected, res.to_dict(ignore_none=True))
def test_to_dict_with_mro(self):
class Parent(resource.Resource):
foo = resource.Header('foo')
bar = resource.Body('bar', aka='_bar')
class Child(Parent):
foo_new = resource.Header('foo_baz_server')
bar_new = resource.Body('bar_baz_server')
res = Child(id='FAKE_ID', bar='test')
expected = {
'foo': None,
'bar': 'test',
'_bar': 'test',
'foo_new': None,
'bar_new': None,
'id': 'FAKE_ID',
'location': None,
'name': None
}
self.assertEqual(expected, res.to_dict())
def test_json_dumps_from_resource(self):
class Test(resource.Resource):
foo = resource.Body('foo_remote')
res = Test(foo='bar')
expected = '{"foo": "bar", "id": null, "location": null, "name": null}'
actual = json.dumps(res, sort_keys=True)
self.assertEqual(expected, actual)
response = FakeResponse({
'foo': 'new_bar'})
res._translate_response(response)
expected = ('{"foo": "new_bar", "id": null, '
'"location": null, "name": null}')
actual = json.dumps(res, sort_keys=True)
self.assertEqual(expected, actual)
def test_items(self):
class Test(resource.Resource):
foo = resource.Body('foo')
bar = resource.Body('bar')
foot = resource.Body('foot')
data = {
'foo': 'bar',
'bar': 'foo\n',
'foot': 'a:b:c:d'
}
res = Test(**data)
for k, v in res.items():
expected = data.get(k)
if expected:
self.assertEqual(v, expected)
def test_access_by_aka(self):
class Test(resource.Resource):
foo = resource.Header('foo_remote', aka='foo_alias')
res = Test(foo='bar', name='test')
self.assertEqual('bar', res['foo_alias'])
self.assertEqual('bar', res.foo_alias)
self.assertTrue('foo' in res.keys())
self.assertTrue('foo_alias' in res.keys())
expected = munch.Munch({
'id': None,
'name': 'test',
'location': None,
'foo': 'bar',
'foo_alias': 'bar'
})
actual = munch.Munch(res)
self.assertEqual(expected, actual)
self.assertEqual(expected, res.toDict())
self.assertEqual(expected, res.to_dict())
self.assertDictEqual(expected, res)
self.assertDictEqual(expected, dict(res))
def test_to_dict_value_error(self):
class Test(resource.Resource):
foo = resource.Header('foo')
bar = resource.Body('bar')
res = Test(id='FAKE_ID')
err = self.assertRaises(
ValueError,
res.to_dict,
body=False, headers=False, computed=False)
self.assertEqual(
'At least one of `body`, `headers` or `computed` must be True',
six.text_type(err))
def test_to_dict_with_mro_no_override(self):
class Parent(resource.Resource):
header = resource.Header('HEADER')
body = resource.Body('BODY')
class Child(Parent):
# The following two properties are not supposed to be overridden
# by the parent class property values.
header = resource.Header('ANOTHER_HEADER')
body = resource.Body('ANOTHER_BODY')
res = Child(id='FAKE_ID', body='BODY_VALUE', header='HEADER_VALUE')
expected = {
'body': 'BODY_VALUE',
'header': 'HEADER_VALUE',
'id': 'FAKE_ID',
'location': None,
'name': None
}
self.assertEqual(expected, res.to_dict())
def test_new(self):
class Test(resource.Resource):
attr = resource.Body("attr")
value = "value"
sot = Test.new(attr=value)
self.assertIn("attr", sot._body.dirty)
self.assertEqual(value, sot.attr)
def test_existing(self):
class Test(resource.Resource):
attr = resource.Body("attr")
value = "value"
sot = Test.existing(attr=value)
self.assertNotIn("attr", sot._body.dirty)
self.assertEqual(value, sot.attr)
def test_from_munch_new(self):
class Test(resource.Resource):
attr = resource.Body("body_attr")
value = "value"
orig = munch.Munch(body_attr=value)
sot = Test._from_munch(orig, synchronized=False)
self.assertIn("body_attr", sot._body.dirty)
self.assertEqual(value, sot.attr)
def test_from_munch_existing(self):
class Test(resource.Resource):
attr = resource.Body("body_attr")
value = "value"
orig = munch.Munch(body_attr=value)
sot = Test._from_munch(orig)
self.assertNotIn("body_attr", sot._body.dirty)
self.assertEqual(value, sot.attr)
def test__prepare_request_with_id(self):
class Test(resource.Resource):
base_path = "/something"
body_attr = resource.Body("x")
header_attr = resource.Header("y")
the_id = "id"
body_value = "body"
header_value = "header"
sot = Test(id=the_id, body_attr=body_value, header_attr=header_value,
_synchronized=False)
result = sot._prepare_request(requires_id=True)
self.assertEqual("something/id", result.url)
self.assertEqual({"x": body_value, "id": the_id}, result.body)
self.assertEqual({"y": header_value}, result.headers)
def test__prepare_request_missing_id(self):
sot = resource.Resource(id=None)
self.assertRaises(exceptions.InvalidRequest,
sot._prepare_request, requires_id=True)
def test__prepare_request_with_key(self):
key = "key"
class Test(resource.Resource):
base_path = "/something"
resource_key = key
body_attr = resource.Body("x")
header_attr = resource.Header("y")
body_value = "body"
header_value = "header"
sot = Test(body_attr=body_value, header_attr=header_value,
_synchronized=False)
result = sot._prepare_request(requires_id=False, prepend_key=True)
self.assertEqual("/something", result.url)
self.assertEqual({key: {"x": body_value}}, result.body)
self.assertEqual({"y": header_value}, result.headers)
def test__prepare_request_with_patch(self):
class Test(resource.Resource):
commit_jsonpatch = True
base_path = "/something"
x = resource.Body("x")
y = resource.Body("y")
the_id = "id"
sot = Test.existing(id=the_id, x=1, y=2)
sot.x = 3
result = sot._prepare_request(requires_id=True, patch=True)
self.assertEqual("something/id", result.url)
self.assertEqual([{'op': 'replace', 'path': '/x', 'value': 3}],
result.body)
def test__prepare_request_with_patch_not_synchronized(self):
class Test(resource.Resource):
commit_jsonpatch = True
base_path = "/something"
x = resource.Body("x")
y = resource.Body("y")
the_id = "id"
sot = Test.new(id=the_id, x=1)
result = sot._prepare_request(requires_id=True, patch=True)
self.assertEqual("something/id", result.url)
self.assertEqual([{'op': 'add', 'path': '/x', 'value': 1}],
result.body)
def test__translate_response_no_body(self):
class Test(resource.Resource):
attr = resource.Header("attr")
response = FakeResponse({}, headers={"attr": "value"})
sot = Test()
sot._translate_response(response, has_body=False)
self.assertEqual(dict(), sot._header.dirty)
self.assertEqual("value", sot.attr)
def test__translate_response_with_body_no_resource_key(self):
class Test(resource.Resource):
attr = resource.Body("attr")
body = {"attr": "value"}
response = FakeResponse(body)
sot = Test()
sot._filter_component = mock.Mock(side_effect=[body, dict()])
sot._translate_response(response, has_body=True)
self.assertEqual("value", sot.attr)
self.assertEqual(dict(), sot._body.dirty)
self.assertEqual(dict(), sot._header.dirty)
def test__translate_response_with_body_with_resource_key(self):
key = "key"
class Test(resource.Resource):
resource_key = key
attr = resource.Body("attr")
body = {"attr": "value"}
response = FakeResponse({key: body})
sot = Test()
sot._filter_component = mock.Mock(side_effect=[body, dict()])
sot._translate_response(response, has_body=True)
self.assertEqual("value", sot.attr)
self.assertEqual(dict(), sot._body.dirty)
self.assertEqual(dict(), sot._header.dirty)
def test_cant_do_anything(self):
class Test(resource.Resource):
allow_create = False
allow_fetch = False
allow_commit = False
allow_delete = False
allow_head = False
allow_list = False
sot = Test()
# The first argument to all of these operations is the session,
# but we raise before we get to it so just pass anything in.
self.assertRaises(exceptions.MethodNotSupported, sot.create, "")
self.assertRaises(exceptions.MethodNotSupported, sot.fetch, "")
self.assertRaises(exceptions.MethodNotSupported, sot.delete, "")
self.assertRaises(exceptions.MethodNotSupported, sot.head, "")
# list is a generator so you need to begin consuming
# it in order to exercise the failure.
the_list = sot.list("")
self.assertRaises(exceptions.MethodNotSupported, next, the_list)
# Update checks the dirty list first before even trying to see
# if the call can be made, so fake a dirty list.
sot._body = mock.Mock()
sot._body.dirty = mock.Mock(return_value={"x": "y"})
self.assertRaises(exceptions.MethodNotSupported, sot.commit, "")
def test_unknown_attrs_under_props_create(self):
class Test(resource.Resource):
properties = resource.Body("properties")
_store_unknown_attrs_as_properties = True
sot = Test.new(**{
'dummy': 'value',
})
self.assertDictEqual({'dummy': 'value'}, sot.properties)
self.assertDictEqual(
{'dummy': 'value'}, sot.to_dict()['properties']
)
self.assertDictEqual(
{'dummy': 'value'}, sot['properties']
)
self.assertEqual('value', sot['properties']['dummy'])
sot = Test.new(**{
'dummy': 'value',
'properties': 'a,b,c'
})
self.assertDictEqual(
{'dummy': 'value', 'properties': 'a,b,c'},
sot.properties
)
self.assertDictEqual(
{'dummy': 'value', 'properties': 'a,b,c'},
sot.to_dict()['properties']
)
sot = Test.new(**{'properties': None})
self.assertIsNone(sot.properties)
self.assertIsNone(sot.to_dict()['properties'])
def test_unknown_attrs_not_stored(self):
class Test(resource.Resource):
properties = resource.Body("properties")
sot = Test.new(**{
'dummy': 'value',
})
self.assertIsNone(sot.properties)
def test_unknown_attrs_not_stored1(self):
class Test(resource.Resource):
_store_unknown_attrs_as_properties = True
sot = Test.new(**{
'dummy': 'value',
})
self.assertRaises(KeyError, sot.__getitem__, 'properties')
def test_unknown_attrs_under_props_set(self):
class Test(resource.Resource):
properties = resource.Body("properties")
_store_unknown_attrs_as_properties = True
sot = Test.new(**{
'dummy': 'value',
})
sot['properties'] = {'dummy': 'new_value'}
self.assertEqual('new_value', sot['properties']['dummy'])
sot.properties = {'dummy': 'new_value1'}
self.assertEqual('new_value1', sot['properties']['dummy'])
def test_unknown_attrs_prepare_request_unpacked(self):
class Test(resource.Resource):
properties = resource.Body("properties")
_store_unknown_attrs_as_properties = True
# Unknown attribute given as root attribute
sot = Test.new(**{
'dummy': 'value',
'properties': 'a,b,c'
})
request_body = sot._prepare_request(requires_id=False).body
self.assertEqual('value', request_body['dummy'])
self.assertEqual('a,b,c', request_body['properties'])
# properties are already a dict
sot = Test.new(**{
'properties': {
'properties': 'a,b,c',
'dummy': 'value'
}
})
request_body = sot._prepare_request(requires_id=False).body
self.assertEqual('value', request_body['dummy'])
self.assertEqual('a,b,c', request_body['properties'])
def test_unknown_attrs_prepare_request_no_unpack_dict(self):
# if props type is not None - ensure no unpacking is done
class Test(resource.Resource):
properties = resource.Body("properties", type=dict)
sot = Test.new(**{
'properties': {
'properties': 'a,b,c',
'dummy': 'value'
}
})
request_body = sot._prepare_request(requires_id=False).body
self.assertDictEqual(
{'dummy': 'value', 'properties': 'a,b,c'},
request_body['properties'])
def test_unknown_attrs_prepare_request_patch_unpacked(self):
class Test(resource.Resource):
properties = resource.Body("properties")
_store_unknown_attrs_as_properties = True
commit_jsonpatch = True
sot = Test.existing(**{
'dummy': 'value',
'properties': 'a,b,c'
})
sot._update(**{'properties': {'dummy': 'new_value'}})
request_body = sot._prepare_request(requires_id=False, patch=True).body
self.assertDictEqual(
{
u'path': u'/dummy',
u'value': u'new_value',
u'op': u'replace'
},
request_body[0])
def test_unknown_attrs_under_props_translate_response(self):
class Test(resource.Resource):
properties = resource.Body("properties")
_store_unknown_attrs_as_properties = True
body = {'dummy': 'value', 'properties': 'a,b,c'}
response = FakeResponse(body)
sot = Test()
sot._translate_response(response, has_body=True)
self.assertDictEqual(
{'dummy': 'value', 'properties': 'a,b,c'},
sot.properties
)
class TestResourceActions(base.TestCase):
def setUp(self):
super(TestResourceActions, self).setUp()
self.service_name = "service"
self.base_path = "base_path"
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
resources_key = 'resources'
allow_create = True
allow_fetch = True
allow_head = True
allow_commit = True
allow_delete = True
allow_list = True
self.test_class = Test
self.request = mock.Mock(spec=resource._Request)
self.request.url = "uri"
self.request.body = "body"
self.request.headers = "headers"
self.response = FakeResponse({})
self.sot = Test(id="id")
self.sot._prepare_request = mock.Mock(return_value=self.request)
self.sot._translate_response = mock.Mock()
self.session = mock.Mock(spec=adapter.Adapter)
self.session.create = mock.Mock(return_value=self.response)
self.session.get = mock.Mock(return_value=self.response)
self.session.put = mock.Mock(return_value=self.response)
self.session.patch = mock.Mock(return_value=self.response)
self.session.post = mock.Mock(return_value=self.response)
self.session.delete = mock.Mock(return_value=self.response)
self.session.head = mock.Mock(return_value=self.response)
self.session.session = self.session
self.session._get_connection = mock.Mock(return_value=self.cloud)
self.session.default_microversion = None
self.session.retriable_status_codes = None
self.endpoint_data = mock.Mock(max_microversion='1.99',
min_microversion=None)
self.session.get_endpoint_data.return_value = self.endpoint_data
def _test_create(self, cls, requires_id=False, prepend_key=False,
microversion=None, base_path=None, params=None):
id = "id" if requires_id else None
sot = cls(id=id)
sot._prepare_request = mock.Mock(return_value=self.request)
sot._translate_response = mock.Mock()
params = params or {}
result = sot.create(self.session, prepend_key=prepend_key,
base_path=base_path, **params)
sot._prepare_request.assert_called_once_with(
requires_id=requires_id, prepend_key=prepend_key,
base_path=base_path)
if requires_id:
self.session.put.assert_called_once_with(
self.request.url,
json=self.request.body, headers=self.request.headers,
microversion=microversion, params=params)
else:
self.session.post.assert_called_once_with(
self.request.url,
json=self.request.body, headers=self.request.headers,
microversion=microversion, params=params)
self.assertEqual(sot.microversion, microversion)
sot._translate_response.assert_called_once_with(self.response,
has_body=sot.has_body)
self.assertEqual(result, sot)
def test_put_create(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
create_method = 'PUT'
self._test_create(Test, requires_id=True, prepend_key=True)
def test_put_create_with_microversion(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
create_method = 'PUT'
_max_microversion = '1.42'
self._test_create(Test, requires_id=True, prepend_key=True,
microversion='1.42')
def test_put_create_with_params(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
create_method = 'PUT'
self._test_create(Test, requires_id=True, prepend_key=True,
params={'answer': 42})
def test_post_create(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
create_method = 'POST'
self._test_create(Test, requires_id=False, prepend_key=True)
def test_post_create_base_path(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
create_method = 'POST'
self._test_create(Test, requires_id=False, prepend_key=True,
base_path='dummy')
def test_post_create_with_params(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
create_method = 'POST'
self._test_create(Test, requires_id=False, prepend_key=True,
params={'answer': 42})
def test_fetch(self):
result = self.sot.fetch(self.session)
self.sot._prepare_request.assert_called_once_with(
requires_id=True, base_path=None)
self.session.get.assert_called_once_with(
self.request.url, microversion=None, params={})
self.assertIsNone(self.sot.microversion)
self.sot._translate_response.assert_called_once_with(self.response)
self.assertEqual(result, self.sot)
def test_fetch_with_params(self):
result = self.sot.fetch(self.session, fields='a,b')
self.sot._prepare_request.assert_called_once_with(
requires_id=True, base_path=None)
self.session.get.assert_called_once_with(
self.request.url, microversion=None, params={'fields': 'a,b'})
self.assertIsNone(self.sot.microversion)
self.sot._translate_response.assert_called_once_with(self.response)
self.assertEqual(result, self.sot)
def test_fetch_with_microversion(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_fetch = True
_max_microversion = '1.42'
sot = Test(id='id')
sot._prepare_request = mock.Mock(return_value=self.request)
sot._translate_response = mock.Mock()
result = sot.fetch(self.session)
sot._prepare_request.assert_called_once_with(
requires_id=True, base_path=None)
self.session.get.assert_called_once_with(
self.request.url, microversion='1.42', params={})
self.assertEqual(sot.microversion, '1.42')
sot._translate_response.assert_called_once_with(self.response)
self.assertEqual(result, sot)
def test_fetch_not_requires_id(self):
result = self.sot.fetch(self.session, False)
self.sot._prepare_request.assert_called_once_with(
requires_id=False, base_path=None)
self.session.get.assert_called_once_with(
self.request.url, microversion=None, params={})
self.sot._translate_response.assert_called_once_with(self.response)
self.assertEqual(result, self.sot)
def test_fetch_base_path(self):
result = self.sot.fetch(self.session, False, base_path='dummy')
self.sot._prepare_request.assert_called_once_with(
requires_id=False,
base_path='dummy')
self.session.get.assert_called_once_with(
self.request.url, microversion=None, params={})
self.sot._translate_response.assert_called_once_with(self.response)
self.assertEqual(result, self.sot)
def test_head(self):
result = self.sot.head(self.session)
self.sot._prepare_request.assert_called_once_with(base_path=None)
self.session.head.assert_called_once_with(
self.request.url,
microversion=None)
self.assertIsNone(self.sot.microversion)
self.sot._translate_response.assert_called_once_with(
self.response, has_body=False)
self.assertEqual(result, self.sot)
def test_head_base_path(self):
result = self.sot.head(self.session, base_path='dummy')
self.sot._prepare_request.assert_called_once_with(base_path='dummy')
self.session.head.assert_called_once_with(
self.request.url,
microversion=None)
self.assertIsNone(self.sot.microversion)
self.sot._translate_response.assert_called_once_with(
self.response, has_body=False)
self.assertEqual(result, self.sot)
def test_head_with_microversion(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_head = True
_max_microversion = '1.42'
sot = Test(id='id')
sot._prepare_request = mock.Mock(return_value=self.request)
sot._translate_response = mock.Mock()
result = sot.head(self.session)
sot._prepare_request.assert_called_once_with(base_path=None)
self.session.head.assert_called_once_with(
self.request.url,
microversion='1.42')
self.assertEqual(sot.microversion, '1.42')
sot._translate_response.assert_called_once_with(
self.response, has_body=False)
self.assertEqual(result, sot)
def _test_commit(self, commit_method='PUT', prepend_key=True,
has_body=True, microversion=None,
commit_args=None, expected_args=None, base_path=None):
self.sot.commit_method = commit_method
# Need to make sot look dirty so we can attempt an update
self.sot._body = mock.Mock()
self.sot._body.dirty = mock.Mock(return_value={"x": "y"})
self.sot.commit(self.session, prepend_key=prepend_key,
has_body=has_body, base_path=base_path,
**(commit_args or {}))
self.sot._prepare_request.assert_called_once_with(
prepend_key=prepend_key, base_path=base_path)
if commit_method == 'PATCH':
self.session.patch.assert_called_once_with(
self.request.url,
json=self.request.body, headers=self.request.headers,
microversion=microversion, **(expected_args or {}))
elif commit_method == 'POST':
self.session.post.assert_called_once_with(
self.request.url,
json=self.request.body, headers=self.request.headers,
microversion=microversion, **(expected_args or {}))
elif commit_method == 'PUT':
self.session.put.assert_called_once_with(
self.request.url,
json=self.request.body, headers=self.request.headers,
microversion=microversion, **(expected_args or {}))
self.assertEqual(self.sot.microversion, microversion)
self.sot._translate_response.assert_called_once_with(
self.response, has_body=has_body)
def test_commit_put(self):
self._test_commit(commit_method='PUT', prepend_key=True, has_body=True)
def test_commit_patch(self):
self._test_commit(
commit_method='PATCH', prepend_key=False, has_body=False)
def test_commit_base_path(self):
self._test_commit(commit_method='PUT', prepend_key=True, has_body=True,
base_path='dummy')
def test_commit_patch_retry_on_conflict(self):
self._test_commit(
commit_method='PATCH',
commit_args={'retry_on_conflict': True},
expected_args={'retriable_status_codes': {409}})
def test_commit_put_retry_on_conflict(self):
self._test_commit(
commit_method='PUT',
commit_args={'retry_on_conflict': True},
expected_args={'retriable_status_codes': {409}})
def test_commit_patch_no_retry_on_conflict(self):
self.session.retriable_status_codes = {409, 503}
self._test_commit(
commit_method='PATCH',
commit_args={'retry_on_conflict': False},
expected_args={'retriable_status_codes': {503}})
def test_commit_put_no_retry_on_conflict(self):
self.session.retriable_status_codes = {409, 503}
self._test_commit(
commit_method='PATCH',
commit_args={'retry_on_conflict': False},
expected_args={'retriable_status_codes': {503}})
def test_commit_not_dirty(self):
self.sot._body = mock.Mock()
self.sot._body.dirty = dict()
self.sot._header = mock.Mock()
self.sot._header.dirty = dict()
self.sot.commit(self.session)
self.session.put.assert_not_called()
def test_patch_with_sdk_names(self):
class Test(resource.Resource):
allow_patch = True
id = resource.Body('id')
attr = resource.Body('attr')
nested = resource.Body('renamed')
other = resource.Body('other')
test_patch = [{'path': '/attr', 'op': 'replace', 'value': 'new'},
{'path': '/nested/dog', 'op': 'remove'},
{'path': '/nested/cat', 'op': 'add', 'value': 'meow'}]
expected = [{'path': '/attr', 'op': 'replace', 'value': 'new'},
{'path': '/renamed/dog', 'op': 'remove'},
{'path': '/renamed/cat', 'op': 'add', 'value': 'meow'}]
sot = Test.existing(id=1, attr=42, nested={'dog': 'bark'})
sot.patch(self.session, test_patch)
self.session.patch.assert_called_once_with(
'/1', json=expected, headers=mock.ANY, microversion=None)
def test_patch_with_server_names(self):
class Test(resource.Resource):
allow_patch = True
id = resource.Body('id')
attr = resource.Body('attr')
nested = resource.Body('renamed')
other = resource.Body('other')
test_patch = [{'path': '/attr', 'op': 'replace', 'value': 'new'},
{'path': '/renamed/dog', 'op': 'remove'},
{'path': '/renamed/cat', 'op': 'add', 'value': 'meow'}]
sot = Test.existing(id=1, attr=42, nested={'dog': 'bark'})
sot.patch(self.session, test_patch)
self.session.patch.assert_called_once_with(
'/1', json=test_patch, headers=mock.ANY, microversion=None)
def test_patch_with_changed_fields(self):
class Test(resource.Resource):
allow_patch = True
attr = resource.Body('attr')
nested = resource.Body('renamed')
other = resource.Body('other')
sot = Test.existing(id=1, attr=42, nested={'dog': 'bark'})
sot.attr = 'new'
sot.patch(self.session, {'path': '/renamed/dog', 'op': 'remove'})
expected = [{'path': '/attr', 'op': 'replace', 'value': 'new'},
{'path': '/renamed/dog', 'op': 'remove'}]
self.session.patch.assert_called_once_with(
'/1', json=expected, headers=mock.ANY, microversion=None)
def test_delete(self):
result = self.sot.delete(self.session)
self.sot._prepare_request.assert_called_once_with()
self.session.delete.assert_called_once_with(
self.request.url,
microversion=None)
self.sot._translate_response.assert_called_once_with(
self.response, has_body=False)
self.assertEqual(result, self.sot)
def test_delete_with_microversion(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_delete = True
_max_microversion = '1.42'
sot = Test(id='id')
sot._prepare_request = mock.Mock(return_value=self.request)
sot._translate_response = mock.Mock()
result = sot.delete(self.session)
sot._prepare_request.assert_called_once_with()
self.session.delete.assert_called_once_with(
self.request.url,
microversion='1.42')
sot._translate_response.assert_called_once_with(
self.response, has_body=False)
self.assertEqual(result, sot)
# NOTE: As list returns a generator, testing it requires consuming
# the generator. Wrap calls to self.sot.list in a `list`
# and then test the results as a list of responses.
def test_list_empty_response(self):
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.json.return_value = {"resources": []}
self.session.get.return_value = mock_response
result = list(self.sot.list(self.session))
self.session.get.assert_called_once_with(
self.base_path,
headers={"Accept": "application/json"},
params={},
microversion=None)
self.assertEqual([], result)
def test_list_one_page_response_paginated(self):
id_value = 1
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.links = {}
mock_response.json.return_value = {"resources": [{"id": id_value}]}
self.session.get.return_value = mock_response
# Ensure that we break out of the loop on a paginated call
# that still only results in one page of data.
results = list(self.sot.list(self.session, paginated=True))
self.assertEqual(1, len(results))
self.assertEqual(1, len(self.session.get.call_args_list))
self.assertEqual(id_value, results[0].id)
self.assertIsInstance(results[0], self.test_class)
def test_list_one_page_response_not_paginated(self):
id_value = 1
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.json.return_value = {"resources": [{"id": id_value}]}
self.session.get.return_value = mock_response
results = list(self.sot.list(self.session, paginated=False))
self.session.get.assert_called_once_with(
self.base_path,
headers={"Accept": "application/json"},
params={},
microversion=None)
self.assertEqual(1, len(results))
self.assertEqual(id_value, results[0].id)
self.assertIsInstance(results[0], self.test_class)
def test_list_one_page_response_resources_key(self):
key = "resources"
class Test(self.test_class):
resources_key = key
id_value = 1
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.json.return_value = {key: [{"id": id_value}]}
mock_response.links = []
self.session.get.return_value = mock_response
sot = Test()
results = list(sot.list(self.session))
self.session.get.assert_called_once_with(
self.base_path,
headers={"Accept": "application/json"},
params={},
microversion=None)
self.assertEqual(1, len(results))
self.assertEqual(id_value, results[0].id)
self.assertIsInstance(results[0], self.test_class)
def test_list_response_paginated_without_links(self):
ids = [1, 2]
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.links = {}
mock_response.json.return_value = {
"resources": [{"id": ids[0]}],
"resources_links": [{
"href": "https://example.com/next-url",
"rel": "next",
}]
}
mock_response2 = mock.Mock()
mock_response2.status_code = 200
mock_response2.links = {}
mock_response2.json.return_value = {
"resources": [{"id": ids[1]}],
}
self.session.get.side_effect = [mock_response, mock_response2]
results = list(self.sot.list(self.session, paginated=True))
self.assertEqual(2, len(results))
self.assertEqual(ids[0], results[0].id)
self.assertEqual(ids[1], results[1].id)
self.assertEqual(
mock.call('base_path',
headers={'Accept': 'application/json'}, params={},
microversion=None),
self.session.get.mock_calls[0])
self.assertEqual(
mock.call('https://example.com/next-url',
headers={'Accept': 'application/json'}, params={},
microversion=None),
self.session.get.mock_calls[1])
self.assertEqual(2, len(self.session.get.call_args_list))
self.assertIsInstance(results[0], self.test_class)
def test_list_response_paginated_with_links(self):
ids = [1, 2]
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.links = {}
mock_response.json.side_effect = [
{
"resources": [{"id": ids[0]}],
"resources_links": [{
"href": "https://example.com/next-url",
"rel": "next",
}]
}, {
"resources": [{"id": ids[1]}],
}]
self.session.get.return_value = mock_response
results = list(self.sot.list(self.session, paginated=True))
self.assertEqual(2, len(results))
self.assertEqual(ids[0], results[0].id)
self.assertEqual(ids[1], results[1].id)
self.assertEqual(
mock.call('base_path',
headers={'Accept': 'application/json'}, params={},
microversion=None),
self.session.get.mock_calls[0])
self.assertEqual(
mock.call('https://example.com/next-url',
headers={'Accept': 'application/json'}, params={},
microversion=None),
self.session.get.mock_calls[2])
self.assertEqual(2, len(self.session.get.call_args_list))
self.assertIsInstance(results[0], self.test_class)
def test_list_response_paginated_with_links_and_query(self):
q_limit = 1
ids = [1, 2]
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.links = {}
mock_response.json.side_effect = [
{
"resources": [{"id": ids[0]}],
"resources_links": [{
"href": "https://example.com/next-url?limit=%d" % q_limit,
"rel": "next",
}]
}, {
"resources": [{"id": ids[1]}],
}, {
"resources": [],
}]
self.session.get.return_value = mock_response
class Test(self.test_class):
_query_mapping = resource.QueryParameters("limit")
results = list(Test.list(self.session, paginated=True, limit=q_limit))
self.assertEqual(2, len(results))
self.assertEqual(ids[0], results[0].id)
self.assertEqual(ids[1], results[1].id)
self.assertEqual(
mock.call('base_path',
headers={'Accept': 'application/json'}, params={
'limit': q_limit,
},
microversion=None),
self.session.get.mock_calls[0])
self.assertEqual(
mock.call('https://example.com/next-url',
headers={'Accept': 'application/json'}, params={
'limit': [str(q_limit)],
},
microversion=None),
self.session.get.mock_calls[2])
self.assertEqual(3, len(self.session.get.call_args_list))
self.assertIsInstance(results[0], self.test_class)
def test_list_response_paginated_with_microversions(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
resources_key = 'resources'
allow_list = True
_max_microversion = '1.42'
ids = [1, 2]
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.links = {}
mock_response.json.return_value = {
"resources": [{"id": ids[0]}],
"resources_links": [{
"href": "https://example.com/next-url",
"rel": "next",
}]
}
mock_response2 = mock.Mock()
mock_response2.status_code = 200
mock_response2.links = {}
mock_response2.json.return_value = {
"resources": [{"id": ids[1]}],
}
self.session.get.side_effect = [mock_response, mock_response2]
results = list(Test.list(self.session, paginated=True))
self.assertEqual(2, len(results))
self.assertEqual(ids[0], results[0].id)
self.assertEqual(ids[1], results[1].id)
self.assertEqual(
mock.call('base_path',
headers={'Accept': 'application/json'}, params={},
microversion='1.42'),
self.session.get.mock_calls[0])
self.assertEqual(
mock.call('https://example.com/next-url',
headers={'Accept': 'application/json'}, params={},
microversion='1.42'),
self.session.get.mock_calls[1])
self.assertEqual(2, len(self.session.get.call_args_list))
self.assertIsInstance(results[0], Test)
self.assertEqual('1.42', results[0].microversion)
def test_list_multi_page_response_not_paginated(self):
ids = [1, 2]
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.json.side_effect = [
{"resources": [{"id": ids[0]}]},
{"resources": [{"id": ids[1]}]},
]
self.session.get.return_value = mock_response
results = list(self.sot.list(self.session, paginated=False))
self.assertEqual(1, len(results))
self.assertEqual(ids[0], results[0].id)
self.assertIsInstance(results[0], self.test_class)
def test_list_query_params(self):
id = 1
qp = "query param!"
qp_name = "query-param"
uri_param = "uri param!"
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.links = {}
mock_response.json.return_value = {"resources": [{"id": id}]}
mock_empty = mock.Mock()
mock_empty.status_code = 200
mock_empty.links = {}
mock_empty.json.return_value = {"resources": []}
self.session.get.side_effect = [mock_response, mock_empty]
class Test(self.test_class):
_query_mapping = resource.QueryParameters(query_param=qp_name)
base_path = "/%(something)s/blah"
something = resource.URI("something")
results = list(Test.list(self.session, paginated=True,
query_param=qp, something=uri_param))
self.assertEqual(1, len(results))
# Look at the `params` argument to each of the get calls that
# were made.
self.assertEqual(
self.session.get.call_args_list[0][1]["params"],
{qp_name: qp})
self.assertEqual(self.session.get.call_args_list[0][0][0],
Test.base_path % {"something": uri_param})
def test_invalid_list_params(self):
id = 1
qp = "query param!"
qp_name = "query-param"
uri_param = "uri param!"
mock_response = mock.Mock()
mock_response.json.side_effect = [[{"id": id}],
[]]
self.session.get.return_value = mock_response
class Test(self.test_class):
_query_mapping = resource.QueryParameters(query_param=qp_name)
base_path = "/%(something)s/blah"
something = resource.URI("something")
try:
list(Test.list(self.session, paginated=True, query_param=qp,
something=uri_param, something_wrong=True))
self.assertFail('The above line should fail')
except exceptions.InvalidResourceQuery as err:
self.assertEqual(str(err), 'Invalid query params: something_wrong')
def test_allow_invalid_list_params(self):
qp = "query param!"
qp_name = "query-param"
uri_param = "uri param!"
mock_empty = mock.Mock()
mock_empty.status_code = 200
mock_empty.links = {}
mock_empty.json.return_value = {"resources": []}
self.session.get.side_effect = [mock_empty]
class Test(self.test_class):
_query_mapping = resource.QueryParameters(query_param=qp_name)
base_path = "/%(something)s/blah"
something = resource.URI("something")
list(Test.list(self.session, paginated=True, query_param=qp,
allow_unknown_params=True, something=uri_param,
something_wrong=True))
self.session.get.assert_called_once_with(
"/{something}/blah".format(something=uri_param),
headers={'Accept': 'application/json'},
microversion=None,
params={qp_name: qp}
)
def test_values_as_list_params(self):
id = 1
qp = "query param!"
qp_name = "query-param"
uri_param = "uri param!"
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.links = {}
mock_response.json.return_value = {"resources": [{"id": id}]}
mock_empty = mock.Mock()
mock_empty.status_code = 200
mock_empty.links = {}
mock_empty.json.return_value = {"resources": []}
self.session.get.side_effect = [mock_response, mock_empty]
class Test(self.test_class):
_query_mapping = resource.QueryParameters(query_param=qp_name)
base_path = "/%(something)s/blah"
something = resource.URI("something")
results = list(Test.list(self.session, paginated=True,
something=uri_param, **{qp_name: qp}))
self.assertEqual(1, len(results))
# Look at the `params` argument to each of the get calls that
# were made.
self.assertEqual(
self.session.get.call_args_list[0][1]["params"],
{qp_name: qp})
self.assertEqual(self.session.get.call_args_list[0][0][0],
Test.base_path % {"something": uri_param})
def test_values_as_list_params_precedence(self):
id = 1
qp = "query param!"
qp2 = "query param!!!!!"
qp_name = "query-param"
uri_param = "uri param!"
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.links = {}
mock_response.json.return_value = {"resources": [{"id": id}]}
mock_empty = mock.Mock()
mock_empty.status_code = 200
mock_empty.links = {}
mock_empty.json.return_value = {"resources": []}
self.session.get.side_effect = [mock_response, mock_empty]
class Test(self.test_class):
_query_mapping = resource.QueryParameters(query_param=qp_name)
base_path = "/%(something)s/blah"
something = resource.URI("something")
results = list(Test.list(self.session, paginated=True, query_param=qp2,
something=uri_param, **{qp_name: qp}))
self.assertEqual(1, len(results))
# Look at the `params` argument to each of the get calls that
# were made.
self.assertEqual(
self.session.get.call_args_list[0][1]["params"],
{qp_name: qp2})
self.assertEqual(self.session.get.call_args_list[0][0][0],
Test.base_path % {"something": uri_param})
def test_list_multi_page_response_paginated(self):
ids = [1, 2]
resp1 = mock.Mock()
resp1.status_code = 200
resp1.links = {}
resp1.json.return_value = {
"resources": [{"id": ids[0]}],
"resources_links": [{
"href": "https://example.com/next-url",
"rel": "next",
}],
}
resp2 = mock.Mock()
resp2.status_code = 200
resp2.links = {}
resp2.json.return_value = {
"resources": [{"id": ids[1]}],
"resources_links": [{
"href": "https://example.com/next-url",
"rel": "next",
}],
}
resp3 = mock.Mock()
resp3.status_code = 200
resp3.links = {}
resp3.json.return_value = {
"resources": []
}
self.session.get.side_effect = [resp1, resp2, resp3]
results = self.sot.list(self.session, paginated=True)
result0 = next(results)
self.assertEqual(result0.id, ids[0])
self.session.get.assert_called_with(
self.base_path,
headers={"Accept": "application/json"},
params={},
microversion=None)
result1 = next(results)
self.assertEqual(result1.id, ids[1])
self.session.get.assert_called_with(
'https://example.com/next-url',
headers={"Accept": "application/json"},
params={},
microversion=None)
self.assertRaises(StopIteration, next, results)
self.session.get.assert_called_with(
'https://example.com/next-url',
headers={"Accept": "application/json"},
params={},
microversion=None)
def test_list_multi_page_no_early_termination(self):
# This tests verifies that multipages are not early terminated.
# APIs can set max_limit to the number of items returned in each
# query. If that max_limit is smaller than the limit given by the
# user, the return value would contain less items than the limit,
# but that doesn't stand to reason that there are no more records,
# we should keep trying to get more results.
ids = [1, 2, 3, 4]
resp1 = mock.Mock()
resp1.status_code = 200
resp1.links = {}
resp1.json.return_value = {
# API's max_limit is set to 2.
"resources": [{"id": ids[0]}, {"id": ids[1]}],
}
resp2 = mock.Mock()
resp2.status_code = 200
resp2.links = {}
resp2.json.return_value = {
# API's max_limit is set to 2.
"resources": [{"id": ids[2]}, {"id": ids[3]}],
}
resp3 = mock.Mock()
resp3.status_code = 200
resp3.json.return_value = {
"resources": [],
}
self.session.get.side_effect = [resp1, resp2, resp3]
results = self.sot.list(self.session, limit=3, paginated=True)
# First page constains only two items, less than the limit given
result0 = next(results)
self.assertEqual(result0.id, ids[0])
result1 = next(results)
self.assertEqual(result1.id, ids[1])
self.session.get.assert_called_with(
self.base_path,
headers={"Accept": "application/json"},
params={"limit": 3},
microversion=None)
# Second page contains another two items
result2 = next(results)
self.assertEqual(result2.id, ids[2])
result3 = next(results)
self.assertEqual(result3.id, ids[3])
self.session.get.assert_called_with(
self.base_path,
headers={"Accept": "application/json"},
params={"limit": 3, "marker": 2},
microversion=None)
# Ensure we're done after those four items
self.assertRaises(StopIteration, next, results)
# Ensure we've given the last try to get more results
self.session.get.assert_called_with(
self.base_path,
headers={"Accept": "application/json"},
params={"limit": 3, "marker": 4},
microversion=None)
# Ensure we made three calls to get this done
self.assertEqual(3, len(self.session.get.call_args_list))
def test_list_multi_page_inferred_additional(self):
# If we explicitly request a limit and we receive EXACTLY that
# amount of results and there is no next link, we make one additional
# call to check to see if there are more records and the service is
# just sad.
# NOTE(mordred) In a perfect world we would not do this. But it's 2018
# and I don't think anyone has any illusions that we live in a perfect
# world anymore.
ids = [1, 2, 3]
resp1 = mock.Mock()
resp1.status_code = 200
resp1.links = {}
resp1.json.return_value = {
"resources": [{"id": ids[0]}, {"id": ids[1]}],
}
resp2 = mock.Mock()
resp2.status_code = 200
resp2.links = {}
resp2.json.return_value = {"resources": [{"id": ids[2]}]}
self.session.get.side_effect = [resp1, resp2]
results = self.sot.list(self.session, limit=2, paginated=True)
# Get the first page's two items
result0 = next(results)
self.assertEqual(result0.id, ids[0])
result1 = next(results)
self.assertEqual(result1.id, ids[1])
self.session.get.assert_called_with(
self.base_path,
headers={"Accept": "application/json"},
params={"limit": 2},
microversion=None)
result2 = next(results)
self.assertEqual(result2.id, ids[2])
self.session.get.assert_called_with(
self.base_path,
headers={"Accept": "application/json"},
params={'limit': 2, 'marker': 2},
microversion=None)
# Ensure we're done after those three items
# In python3.7, PEP 479 is enabled for all code, and StopIteration
# raised directly from code is turned into a RuntimeError.
# Something about how mock is implemented triggers that here.
self.assertRaises((StopIteration, RuntimeError), next, results)
# Ensure we only made two calls to get this done
self.assertEqual(3, len(self.session.get.call_args_list))
def test_list_multi_page_header_count(self):
class Test(self.test_class):
resources_key = None
pagination_key = 'X-Container-Object-Count'
self.sot = Test()
# Swift returns a total number of objects in a header and we compare
# that against the total number returned to know if we need to fetch
# more objects.
ids = [1, 2, 3]
resp1 = mock.Mock()
resp1.status_code = 200
resp1.links = {}
resp1.headers = {'X-Container-Object-Count': 3}
resp1.json.return_value = [{"id": ids[0]}, {"id": ids[1]}]
resp2 = mock.Mock()
resp2.status_code = 200
resp2.links = {}
resp2.headers = {'X-Container-Object-Count': 3}
resp2.json.return_value = [{"id": ids[2]}]
self.session.get.side_effect = [resp1, resp2]
results = self.sot.list(self.session, paginated=True)
# Get the first page's two items
result0 = next(results)
self.assertEqual(result0.id, ids[0])
result1 = next(results)
self.assertEqual(result1.id, ids[1])
self.session.get.assert_called_with(
self.base_path,
headers={"Accept": "application/json"},
params={},
microversion=None)
result2 = next(results)
self.assertEqual(result2.id, ids[2])
self.session.get.assert_called_with(
self.base_path,
headers={"Accept": "application/json"},
params={'marker': 2},
microversion=None)
# Ensure we're done after those three items
self.assertRaises(StopIteration, next, results)
# Ensure we only made two calls to get this done
self.assertEqual(2, len(self.session.get.call_args_list))
def test_list_multi_page_link_header(self):
# Swift returns a total number of objects in a header and we compare
# that against the total number returned to know if we need to fetch
# more objects.
ids = [1, 2, 3]
resp1 = mock.Mock()
resp1.status_code = 200
resp1.links = {
'next': {'uri': 'https://example.com/next-url', 'rel': 'next'}}
resp1.headers = {}
resp1.json.return_value = {
"resources": [{"id": ids[0]}, {"id": ids[1]}],
}
resp2 = mock.Mock()
resp2.status_code = 200
resp2.links = {}
resp2.headers = {}
resp2.json.return_value = {"resources": [{"id": ids[2]}]}
self.session.get.side_effect = [resp1, resp2]
results = self.sot.list(self.session, paginated=True)
# Get the first page's two items
result0 = next(results)
self.assertEqual(result0.id, ids[0])
result1 = next(results)
self.assertEqual(result1.id, ids[1])
self.session.get.assert_called_with(
self.base_path,
headers={"Accept": "application/json"},
params={},
microversion=None)
result2 = next(results)
self.assertEqual(result2.id, ids[2])
self.session.get.assert_called_with(
'https://example.com/next-url',
headers={"Accept": "application/json"},
params={},
microversion=None)
# Ensure we're done after those three items
self.assertRaises(StopIteration, next, results)
# Ensure we only made two calls to get this done
self.assertEqual(2, len(self.session.get.call_args_list))
class TestResourceFind(base.TestCase):
result = 1
class Base(resource.Resource):
@classmethod
def existing(cls, **kwargs):
response = mock.Mock()
response.status_code = 404
raise exceptions.ResourceNotFound(
'Not Found', response=response)
@classmethod
def list(cls, session, **params):
return None
class OneResult(Base):
@classmethod
def _get_one_match(cls, *args):
return TestResourceFind.result
class NoResults(Base):
@classmethod
def _get_one_match(cls, *args):
return None
class OneResultWithQueryParams(OneResult):
_query_mapping = resource.QueryParameters('name')
def setUp(self):
super(TestResourceFind, self).setUp()
self.no_results = self.NoResults
self.one_result = self.OneResult
self.one_result_with_qparams = self.OneResultWithQueryParams
def test_find_short_circuit(self):
value = 1
class Test(resource.Resource):
@classmethod
def existing(cls, **kwargs):
mock_match = mock.Mock()
mock_match.fetch.return_value = value
return mock_match
result = Test.find(self.cloud.compute, "name")
self.assertEqual(result, value)
def test_no_match_raise(self):
self.assertRaises(exceptions.ResourceNotFound, self.no_results.find,
self.cloud.compute, "name", ignore_missing=False)
def test_no_match_return(self):
self.assertIsNone(
self.no_results.find(
self.cloud.compute, "name", ignore_missing=True))
def test_find_result_name_not_in_query_parameters(self):
with mock.patch.object(self.one_result, 'existing',
side_effect=self.OneResult.existing) \
as mock_existing, \
mock.patch.object(self.one_result, 'list',
side_effect=self.OneResult.list) \
as mock_list:
self.assertEqual(
self.result,
self.one_result.find(self.cloud.compute, "name"))
mock_existing.assert_called_once_with(id='name',
connection=mock.ANY)
mock_list.assert_called_once_with(mock.ANY)
def test_find_result_name_in_query_parameters(self):
self.assertEqual(
self.result,
self.one_result_with_qparams.find(self.cloud.compute, "name"))
def test_match_empty_results(self):
self.assertIsNone(resource.Resource._get_one_match("name", []))
def test_no_match_by_name(self):
the_name = "Brian"
match = mock.Mock(spec=resource.Resource)
match.name = the_name
result = resource.Resource._get_one_match("Richard", [match])
self.assertIsNone(result, match)
def test_single_match_by_name(self):
the_name = "Brian"
match = mock.Mock(spec=resource.Resource)
match.name = the_name
result = resource.Resource._get_one_match(the_name, [match])
self.assertIs(result, match)
def test_single_match_by_id(self):
the_id = "Brian"
match = mock.Mock(spec=resource.Resource)
match.id = the_id
result = resource.Resource._get_one_match(the_id, [match])
self.assertIs(result, match)
def test_single_match_by_alternate_id(self):
the_id = "Richard"
class Test(resource.Resource):
other_id = resource.Body("other_id", alternate_id=True)
match = Test(other_id=the_id)
result = Test._get_one_match(the_id, [match])
self.assertIs(result, match)
def test_multiple_matches(self):
the_id = "Brian"
match = mock.Mock(spec=resource.Resource)
match.id = the_id
self.assertRaises(
exceptions.DuplicateResource,
resource.Resource._get_one_match, the_id, [match, match])
class TestWaitForStatus(base.TestCase):
def test_immediate_status(self):
status = "loling"
res = mock.Mock(spec=['id', 'status'])
res.status = status
result = resource.wait_for_status(
self.cloud.compute, res, status, "failures", "interval", "wait")
self.assertTrue(result, res)
def test_immediate_status_case(self):
status = "LOLing"
res = mock.Mock(spec=['id', 'status'])
res.status = status
result = resource.wait_for_status(
self.cloud.compute, res, 'lOling', "failures", "interval", "wait")
self.assertTrue(result, res)
def test_immediate_status_different_attribute(self):
status = "loling"
res = mock.Mock(spec=['id', 'mood'])
res.mood = status
result = resource.wait_for_status(
self.cloud.compute, res, status, "failures", "interval", "wait",
attribute='mood')
self.assertTrue(result, res)
def _resources_from_statuses(self, *statuses, **kwargs):
attribute = kwargs.pop('attribute', 'status')
assert not kwargs, 'Unexpected keyword arguments: %s' % kwargs
resources = []
for status in statuses:
res = mock.Mock(spec=['id', 'fetch', attribute])
setattr(res, attribute, status)
resources.append(res)
for index, res in enumerate(resources[:-1]):
res.fetch.return_value = resources[index + 1]
return resources
def test_status_match(self):
status = "loling"
# other gets past the first check, two anothers gets through
# the sleep loop, and the third matches
resources = self._resources_from_statuses(
"first", "other", "another", "another", status)
result = resource.wait_for_status(
mock.Mock(), resources[0], status, None, 1, 5)
self.assertEqual(result, resources[-1])
def test_status_match_with_none(self):
status = "loling"
# apparently, None is a correct state in some cases
resources = self._resources_from_statuses(
None, "other", None, "another", status)
result = resource.wait_for_status(
mock.Mock(), resources[0], status, None, 1, 5)
self.assertEqual(result, resources[-1])
def test_status_match_different_attribute(self):
status = "loling"
resources = self._resources_from_statuses(
"first", "other", "another", "another", status,
attribute='mood')
result = resource.wait_for_status(
mock.Mock(), resources[0], status, None, 1, 5,
attribute='mood')
self.assertEqual(result, resources[-1])
def test_status_fails(self):
failure = "crying"
resources = self._resources_from_statuses("success", "other", failure)
self.assertRaises(
exceptions.ResourceFailure,
resource.wait_for_status,
mock.Mock(), resources[0], "loling", [failure], 1, 5)
def test_status_fails_different_attribute(self):
failure = "crying"
resources = self._resources_from_statuses("success", "other", failure,
attribute='mood')
self.assertRaises(
exceptions.ResourceFailure,
resource.wait_for_status,
mock.Mock(), resources[0], "loling", [failure.upper()], 1, 5,
attribute='mood')
def test_timeout(self):
status = "loling"
res = mock.Mock()
# The first "other" gets past the first check, and then three
# pairs of "other" statuses run through the sleep counter loop,
# after which time should be up. This is because we have a
# one second interval and three second waiting period.
statuses = ["other"] * 7
type(res).status = mock.PropertyMock(side_effect=statuses)
self.assertRaises(exceptions.ResourceTimeout,
resource.wait_for_status,
self.cloud.compute, res, status, None, 0.01, 0.1)
def test_no_sleep(self):
res = mock.Mock()
statuses = ["other"]
type(res).status = mock.PropertyMock(side_effect=statuses)
self.assertRaises(exceptions.ResourceTimeout,
resource.wait_for_status,
self.cloud.compute, res, "status", None, 0, -1)
class TestWaitForDelete(base.TestCase):
def test_success(self):
response = mock.Mock()
response.headers = {}
response.status_code = 404
res = mock.Mock()
res.fetch.side_effect = [
None, None,
exceptions.ResourceNotFound('Not Found', response)]
result = resource.wait_for_delete(self.cloud.compute, res, 1, 3)
self.assertEqual(result, res)
def test_timeout(self):
res = mock.Mock()
res.status = 'ACTIVE'
res.fetch.return_value = res
self.assertRaises(
exceptions.ResourceTimeout,
resource.wait_for_delete,
self.cloud.compute, res, 0.1, 0.3)
@mock.patch.object(resource.Resource, '_get_microversion_for', autospec=True)
class TestAssertMicroversionFor(base.TestCase):
session = mock.Mock()
res = resource.Resource()
def test_compatible(self, mock_get_ver):
mock_get_ver.return_value = '1.42'
self.assertEqual(
'1.42',
self.res._assert_microversion_for(self.session, 'fetch', '1.6'))
mock_get_ver.assert_called_once_with(self.res, self.session, 'fetch')
def test_incompatible(self, mock_get_ver):
mock_get_ver.return_value = '1.1'
self.assertRaisesRegex(exceptions.NotSupported,
'1.6 is required, but 1.1 will be used',
self.res._assert_microversion_for,
self.session, 'fetch', '1.6')
mock_get_ver.assert_called_once_with(self.res, self.session, 'fetch')
def test_custom_message(self, mock_get_ver):
mock_get_ver.return_value = '1.1'
self.assertRaisesRegex(exceptions.NotSupported,
'boom.*1.6 is required, but 1.1 will be used',
self.res._assert_microversion_for,
self.session, 'fetch', '1.6',
error_message='boom')
mock_get_ver.assert_called_once_with(self.res, self.session, 'fetch')
def test_none(self, mock_get_ver):
mock_get_ver.return_value = None
self.assertRaisesRegex(exceptions.NotSupported,
'1.6 is required, but the default version',
self.res._assert_microversion_for,
self.session, 'fetch', '1.6')
mock_get_ver.assert_called_once_with(self.res, self.session, 'fetch')
class TestTagMixin(base.TestCase):
def setUp(self):
super(TestTagMixin, self).setUp()
self.service_name = "service"
self.base_path = "base_path"
class Test(resource.Resource, resource.TagMixin):
service = self.service_name
base_path = self.base_path
resources_key = 'resources'
allow_create = True
allow_fetch = True
allow_head = True
allow_commit = True
allow_delete = True
allow_list = True
self.test_class = Test
self.request = mock.Mock(spec=resource._Request)
self.request.url = "uri"
self.request.body = "body"
self.request.headers = "headers"
self.response = FakeResponse({})
self.sot = Test.new(id="id", tags=[])
self.sot._prepare_request = mock.Mock(return_value=self.request)
self.sot._translate_response = mock.Mock()
self.session = mock.Mock(spec=adapter.Adapter)
self.session.get = mock.Mock(return_value=self.response)
self.session.put = mock.Mock(return_value=self.response)
self.session.delete = mock.Mock(return_value=self.response)
def test_tags_attribute(self):
res = self.sot
self.assertTrue(hasattr(res, 'tags'))
self.assertIsInstance(res.tags, list)
def test_fetch_tags(self):
res = self.sot
sess = self.session
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.links = {}
mock_response.json.return_value = {'tags': ['blue1', 'green1']}
sess.get.side_effect = [mock_response]
result = res.fetch_tags(sess)
# Check tags attribute is updated
self.assertEqual(['blue1', 'green1'], res.tags)
# Check the passed resource is returned
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/tags'
sess.get.assert_called_once_with(url)
def test_set_tags(self):
res = self.sot
sess = self.session
# Set some initial value to check rewrite
res.tags.extend(['blue_old', 'green_old'])
result = res.set_tags(sess, ['blue', 'green'])
# Check tags attribute is updated
self.assertEqual(['blue', 'green'], res.tags)
# Check the passed resource is returned
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/tags'
sess.put.assert_called_once_with(
url,
json={'tags': ['blue', 'green']}
)
def test_remove_all_tags(self):
res = self.sot
sess = self.session
# Set some initial value to check removal
res.tags.extend(['blue_old', 'green_old'])
result = res.remove_all_tags(sess)
# Check tags attribute is updated
self.assertEqual([], res.tags)
# Check the passed resource is returned
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/tags'
sess.delete.assert_called_once_with(url)
def test_remove_single_tag(self):
res = self.sot
sess = self.session
res.tags.extend(['blue', 'dummy'])
result = res.remove_tag(sess, 'dummy')
# Check tags attribute is updated
self.assertEqual(['blue'], res.tags)
# Check the passed resource is returned
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/tags/dummy'
sess.delete.assert_called_once_with(url)
def test_check_tag_exists(self):
res = self.sot
sess = self.session
sess.get.side_effect = [FakeResponse(None, 202)]
result = res.check_tag(sess, 'blue')
# Check tags attribute is updated
self.assertEqual([], res.tags)
# Check the passed resource is returned
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/tags/blue'
sess.get.assert_called_once_with(url)
def test_check_tag_not_exists(self):
res = self.sot
sess = self.session
mock_response = mock.Mock()
mock_response.status_code = 404
mock_response.links = {}
mock_response.content = None
sess.get.side_effect = [mock_response]
# ensure we get 404
self.assertRaises(
exceptions.NotFoundException,
res.check_tag,
sess,
'dummy',
)
def test_add_tag(self):
res = self.sot
sess = self.session
# Set some initial value to check add
res.tags.extend(['blue', 'green'])
result = res.add_tag(sess, 'lila')
# Check tags attribute is updated
self.assertEqual(['blue', 'green', 'lila'], res.tags)
# Check the passed resource is returned
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/tags/lila'
sess.put.assert_called_once_with(url)
def test_tagged_resource_always_created_with_empty_tag_list(self):
res = self.sot
self.assertIsNotNone(res.tags)
self.assertEqual(res.tags, list())
|
openstack/python-openstacksdk
|
openstack/tests/unit/test_resource.py
|
Python
|
apache-2.0
| 100,885
|
[
"Brian"
] |
87762767b4539fe2f970ebba9354586bbe0e1c3a2be2a353bdb6c05f1192039b
|
#!/usr/bin/env python
"""
Blast
Genome library
Classes to handle Blast analysis against a local database
"""
import logging
import os
import subprocess
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO # Python 2
else:
from io import StringIO # Python 3
__author__ = "Marco Galardini"
################################################################################
# Log setup
logger = logging.getLogger('ductape.blast')
################################################################################
# Classes
# Useful class for parsing
class BlastHit:
def __init__(self,query,align,hsp):
'''
Query, Alignment and Hsp are all Biopython objects derived from
Blast results parsing
'''
self.query = query.query
self.query_id = query.query.split(' ')[0]
self.query_len = int(query.query_length)
self.hit = align.hit_id
self.hit_desc = align.hit_def
self.hit_len = int(align.length)
self.identity = float(hsp.identities) / float(hsp.align_length)
self.align_len = int(hsp.align_length)
self.mismatches = int(hsp.align_length - hsp.identities - hsp.gaps)
self.gaps = int(hsp.gaps)
self.query_start = int(hsp.query_start)
self.query_end = int(hsp.query_end)
self.subjct_start = int(hsp.sbjct_start)
self.subjct_end = int(hsp.sbjct_end)
self.evalue = float(hsp.expect)
self.bits = float(hsp.bits)
def getHomologyIndex(self):
'''
Get an Index useful for stating the quality of the homology measure
'''
import math
HI=( (math.pow(self.identity,2)*(float(self.hit_len)) /
(float(self.query_len))*(float(self.align_len)/float(self.query_len)))
)
return HI
def getHitCoverage(self):
'''
Get the hit coverage
'''
return float(float(self.align_len)/float(self.hit_len))
def getQueryCoverage(self):
'''
Get the query coverage
'''
return float(float(self.align_len)/float(self.query_len))
def getKO(self):
'''
Assuming that this hit derives from a KEGG DB
Returns the KO ID
'''
import re
a=re.search("K[0-9]{1,}",
self.hit_desc)
if a is not None:
return a.group()
else:
return None
class Blaster(object):
def __init__(self, useDisk=False):
self._hits = None
self._out = ''
# No-disk
self._useDisk = bool(useDisk)
self.retrieved = ''
self.query = ''
self.out = ''
def createDB(self,seqFile,dbType,outFile='BlastDB',parseIDs=True,
title='Generic Blast DB'):
'''Generation of a Blast DB'''
cmd = ('makeblastdb -in %s -dbtype %s -out %s -title "%s"')
cmd = cmd%(seqFile,dbType,outFile,title)
if parseIDs:
cmd = cmd+' -parse_seqids'
logger.debug('Create Blast DB cmd: %s'%cmd)
proc = subprocess.Popen(cmd,shell=(sys.platform!="win32"),
stdin=subprocess.PIPE,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = proc.communicate()
return_code = proc.returncode
if return_code != 0:
logger.warning('Blast DB creation failed with error %d'
%return_code)
logger.warning('%s'%str(out[1]))
return bool(not return_code)
def retrieveFromDB(self, db, accession, out='out.fsa', isFile=False):
'''Retrieve the desired sequence(s) from a Blast DB'''
if not isFile:
cmd=('blastdbcmd -db %s -entry "%s" -long_seqids'
%(db,accession))
else:
cmd=('blastdbcmd -db %s -entry_batch "%s" -long_seqids'
%(db,accession))
if self._useDisk:
cmd += ' > %s'%out
logger.debug('BlastDBcmd cmd: %s'%cmd)
proc = subprocess.Popen(cmd,shell=(sys.platform!="win32"),
stdin=subprocess.PIPE,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = proc.communicate()
if not self._useDisk:
self.retrieved = out[0]
return_code = proc.returncode
if return_code != 0:
logger.warning('BlastDBcmd failed with error %d'
%return_code)
logger.warning('%s'%str(out[1]))
return bool(not return_code)
def runBlast(self, queryFile, db, outFile='', evalue = 10,
task = '', ncpus = 1, additional = '', outfmt='5'):
'''Run Blast with the desired parameters'''
# Create the command line
from Bio.Blast.Applications import NcbiblastpCommandline
self._out = outFile
cmd = NcbiblastpCommandline(db=db,
evalue=float(evalue),
outfmt=outfmt,
num_threads=ncpus)
if self._useDisk:
cmd.set_parameter('query', queryFile)
if outFile != '':
cmd.set_parameter('out', outFile)
if task != '':
cmd.set_parameter('task', task)
if additional !='':
cmd = str(cmd)+' '+additional
cmd=str(cmd)
logger.debug('Run Blast cmd: %s'%cmd)
# Run Blast and check the return code
proc = subprocess.Popen(cmd,shell=(sys.platform!="win32"),
stdin=subprocess.PIPE,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if not self._useDisk:
if isinstance(self.query, str):
proc.stdin.write(self.query.encode())
else:
proc.stdin.write(self.query)
out = proc.communicate()
if not self._useDisk:
self.out = out[0]
return_code = proc.returncode
if return_code != 0:
logger.warning('Run Blast failed with error %d'
%return_code)
logger.warning('%s'%str(out[1]))
return bool(not return_code)
def parseBlast(self, fileOut):
'''Parse the xml blast output -- default file is self._out'''
from Bio.Blast import NCBIXML
if self._useDisk:
self._out = fileOut
handle = open(fileOut)
else:
handle = StringIO(self.out.decode('utf-8'))
self._hits = NCBIXML.parse(handle)
def getHits(self,expect=10.0):
'''Returns a Generator query -> BlastObj'''
if self._hits == None:
self.parseBlast(self._out)
for BlastQuery in self._hits:
hits = []
for alignment in BlastQuery.alignments:
for hsp in alignment.hsps:
if float(hsp.expect) > expect:continue
# Save the hit details
h=BlastHit(BlastQuery,alignment,hsp)
hits.append(h)
yield hits
class RunBBH(object):
def __init__(self, query, queryid,
source, target, targetorg,
evalue, matrix, short = False, uniqueid = 1,
kegg = False, ko_entry = None, ko_id = None, useDisk=True):
for x in [query, queryid, source, target, targetorg,
evalue, matrix, short, uniqueid,
kegg, ko_entry, ko_id, useDisk]:
if isinstance(x, bytes):
x = x.decode('utf-8')
self.query = query
self.queryid = queryid
self.source = source
self.target = target
self.targetorg = targetorg
self.evalue = evalue
self.matrix = matrix
self.short = short
self.uniqueid = uniqueid
self.kegg = kegg
self.ko_entry = ko_entry
self.ko_id = ko_id
self.useDisk = bool(useDisk)
self.out = self.queryid + '_' + str(self.uniqueid) +'.xml'
self.blaster = Blaster(useDisk=self.useDisk)
self.additional = (' -soft_masking true -dbsize 500000000 '+
'-use_sw_tback -max_target_seqs 1 -matrix %s'%self.matrix)
if not self.useDisk:
self.blaster.query = self.query
self.queryreturn = ''
else:
self.queryreturn = self.query + '_' + str(self.uniqueid) + '_return'
def _firstRun(self):
if self.short:
res = self.blaster.runBlast(self.query, self.target, self.out,
evalue = self.evalue,
task='blastp-short',
additional=self.additional)
else:
res = self.blaster.runBlast(self.query, self.target, self.out,
evalue = self.evalue,
additional = self.additional)
return res
def _secondRun(self, hit_len = None):
# Second Blast run
if not hit_len:
if self.short:
hit_len = 29
else:
hit_len = 100
if not self.useDisk:
self.blaster.query = self.blaster.retrieved
if hit_len < 30:
res = self.blaster.runBlast(self.queryreturn, self.source, self.out,
evalue = self.evalue,
task='blastp-short',
additional=self.additional)
else:
res = self.blaster.runBlast(self.queryreturn, self.source, self.out,
evalue = self.evalue,
additional=self.additional)
return res
def __call__(self):
if not self.kegg:
# First Blast run
res = self._firstRun()
if not res:
if self.useDisk:
try:
os.remove(self.out)
except:pass
return [None, self.targetorg, False]
self.blaster.parseBlast(self.out)
for hits in self.blaster.getHits(self.evalue):
if len(hits) == 0:
break
targethit = hits[0]
if not self.blaster.retrieveFromDB(self.target, targethit.hit,
out=self.queryreturn):
if self.useDisk:
try:
os.remove(self.out)
except:pass
return [None, self.targetorg, False]
# Second Blast run
res = self._secondRun(targethit.hit_len)
break
else:
if not self.blaster.retrieveFromDB(self.target, self.ko_entry,
out=self.queryreturn):
if self.useDisk:
try:
os.remove(self.out)
except:
pass
return [None, self.targetorg, False]
res = self._secondRun()
if not res:
if self.useDisk:
try:
os.remove(self.out)
os.remove(self.queryreturn)
except:pass
return [None, self.targetorg, False]
self.blaster.parseBlast(self.out)
for hits in self.blaster.getHits(self.evalue):
if len(hits) == 0:
return [None, self.targetorg, True]
sourcehit = hits[0]
if self.queryid == sourcehit.hit:
if self.useDisk:
os.remove(self.out)
os.remove(self.queryreturn)
if self.kegg:
return [self.ko_id,self.queryid, True]
else:
return [sourcehit.query_id.replace('lcl|',''),
self.targetorg, True]
else:
if self.useDisk:
os.remove(self.out)
os.remove(self.queryreturn)
return [None, self.targetorg, True]
if self.useDisk:
os.remove(self.out)
return [None, self.targetorg, True]
|
combogenomics/DuctApe
|
ductape/genome/blast.py
|
Python
|
bsd-2-clause
| 12,458
|
[
"BLAST",
"Biopython"
] |
b6fe81b7ecec83799c1f224dcce08bc64a3dc5620d409cbc1f040415076ae8d0
|
#!/usr/bin/env python
from omics_pipe.parameters.default_parameters import default_parameters
from omics_pipe.utils import *
p = Bunch(default_parameters)
def varscan2_segmentation(sample, varscan2_segmentation_flag):
'''Runs DNAcopy from Bioconductor on Varscan2 copynumber output to apply circular binary segmentation.
input:
.varscan_cnv.copynumber
output:
.varscan_cnv.events.tsv
citation:
parameters from parameters file:
CNV_DIR:
R_VERSION:
'''
spawn_job(jobname = 'varscan2_segmentation', SAMPLE = sample, LOG_PATH = p.OMICSPIPE["LOG_PATH"], RESULTS_EMAIL = p.OMICSPIPE["EMAIL"],
SCHEDULER = p.OMICSPIPE["SCHEDULER"], walltime = p.CNV["WALLTIME"], queue = p.OMICSPIPE["QUEUE"], nodes = p.CNV["NODES"], ppn = p.CNV["CPU"],
memory = p.CNV["MEMORY"], script = "/varscan2_segmentation.sh", args_list = [sample, p.CNV["R_VERSION"], p.CNV["CNV_DIR"]])
job_status(jobname = 'varscan2_segmentation', resultspath = p.CNV["CNV_DIR"] + "/" + sample, SAMPLE = sample, outputfilename = sample +
".varscan_cnv.events.tsv", FLAG_PATH = p.OMICSPIPE["FLAG_PATH"])
return
if __name__ == '__main__':
varscan2_segmentation(sample, varscan2_segmentation_flag)
sys.exit(0)
|
adammaikai/PipeMaster
|
modules/varscan2_segmentation.py
|
Python
|
mit
| 1,285
|
[
"Bioconductor"
] |
2fd8e6f443d192f2762f73f72c86228f29aa6cc1a0e7f4b51e7524381f5deb09
|
import os
from astropy.io import ascii
from astropy.table import Table
from astropy.coordinates import Distance, Angle, SkyCoord
from astropy import units as u
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
# Change path so that we can import functions from the 'modules/' folder.
sys.path.insert(0, sys.path[0].replace('aux_', ''))
import readData
import MCs_data
def zDist(N):
"""
This function generates a uniform spread of vertical distances, in the
range (-z_dist, +z_dist).
"""
# Define maximum vertical distance (in parsec)
z_dist = 5000.
# Generate N random z' vertical distances, in parsec.
# To generate the *same* values each time the code is executed, fix the
# random seed to any integer value.
# np.random.seed(12345)
z_prime = np.random.uniform(-z_dist, z_dist, N)
return z_prime
def invertDist(incl, theta, ra_0, dec_0, D_0, ra, dec, z_prime):
"""
Inverted distance in parsecs (D) from Eq (7) in
van der Marel & Cioni (2001) using Eqs (1), (2), (3).
"""
# Express everything in radians.
incl, theta = np.deg2rad(incl), np.deg2rad(theta)
ra_0, dec_0, ra, dec = ra_0.rad, dec_0.rad, np.deg2rad(ra), np.deg2rad(dec)
# cos(rho)
A = np.cos(dec) * np.cos(dec_0) * np.cos(ra - ra_0) +\
np.sin(dec) * np.sin(dec_0)
# sin(rho) * cos(phi)
B = -np.cos(dec) * np.sin(ra - ra_0)
# sin(rho) * sin(phi)
C = np.sin(dec) * np.cos(dec_0) -\
np.cos(dec) * np.sin(dec_0) * np.cos(ra - ra_0)
# Eq (7)
D = (z_prime - D_0.value * np.cos(incl)) /\
(np.sin(incl) * (C * np.cos(theta) - B * np.sin(theta)) -
A * np.cos(incl))
return D
def rho_phi(ra, dec, glx_ctr):
"""
Obtain the angular distance between (ra, dec) coordinates and the center
of the galaxy (rho), and its position angle (phi).
"""
# Store clusters' (ra, dec) coordinates in degrees.
coords = SkyCoord(list(zip(*[ra, dec])), unit=(u.deg, u.deg))
rho = coords.separation(glx_ctr)
# Position angle between center and coordinates. This is the angle between
# the positive y axis (North) counter-clockwise towards the negative x
# axis (East).
Phi = glx_ctr.position_angle(coords)
# This is the angle measured counter-clockwise from the x positive axis
# (West).
phi = Phi + Angle('90d')
return rho, phi
def xyz_coords(rho, phi, D_0, r_dist):
'''
Obtain coordinates in the (x,y,z) system of van der Marel & Cioni (2001),
Eq (5).
Values (x, y,z) returned in Kpc.
'''
d_kpc = Distance((10**(0.2 * (np.asarray(r_dist) + 5.))) / 1000.,
unit=u.kpc)
x = d_kpc * np.sin(rho.radian) * np.cos(phi.radian)
y = d_kpc * np.sin(rho.radian) * np.sin(phi.radian)
z = D_0.kpc * u.kpc - d_kpc * np.cos(rho.radian)
x, y, z = x.value, y.value, z.value
return np.array([x, y, z])
def outData(gal, gal_data, dist_mod, e_dm):
"""
Write data to output 'xxx_input_synth.dat' file ('xxx' stands for the
processed galaxy.)
"""
data = Table(
[gal_data['Name'], gal_data['ra'], gal_data['dec'], dist_mod, e_dm,
gal_data['log(age)']],
names=['Name', 'ra', 'dec', 'dist_mod', 'e_dm', 'log(age)'])
with open(gal.lower() + "_input_synth.dat", 'w') as f:
ascii.write(data, f, format='fixed_width', delimiter=' ')
def inv_trans_eqs(x_p, y_p, z_p, theta, inc):
"""
Inverse set of equations. Transform inclined plane system (x',y',z')
into face on sky system (x,y,z).
"""
x = x_p * np.cos(theta) - y_p * np.cos(inc) * np.sin(theta) -\
z_p * np.sin(inc) * np.sin(theta)
y = x_p * np.sin(theta) + y_p * np.cos(inc) * np.cos(theta) +\
z_p * np.sin(inc) * np.cos(theta)
z = -1. * y_p * np.sin(inc) + z_p * np.cos(inc)
return x, y, z
def make_plot(gal_name, incl, theta, cl_xyz, dm):
"""
Original link for plotting intersecting planes:
http://stackoverflow.com/a/14825951/1391441
"""
# Make plot.
fig = plt.figure()
ax = Axes3D(fig)
# Placement 0, 0 is the bottom left, 1, 1 is the top right.
ax.text2D(
0.4, 0.95, r"${}:\;(\Theta, i) = ({}, {})$".format(
gal_name, theta - 90., incl),
transform=ax.transAxes, fontsize=15, color='red')
# Express in radians for calculations.
incl, theta = np.deg2rad(incl), np.deg2rad(theta)
# Plot clusters.
x_cl, y_cl, z_cl = cl_xyz
SC = ax.scatter(x_cl, z_cl, y_cl, c=dm, s=50)
min_X, max_X = min(x_cl) - 2., max(x_cl) + 2.
min_Y, max_Y = min(y_cl) - 2., max(y_cl) + 2.
min_Z, max_Z = min(z_cl) - 2., max(z_cl) + 2.
# x,y plane.
X, Y = np.meshgrid([min_X, max_X], [min_Y, max_Y])
Z = np.zeros((2, 2))
# Plot x,y plane.
ax.plot_surface(X, Z, Y, color='gray', alpha=.1, linewidth=0, zorder=1)
# Axis of x,y plane.
# x axis.
ax.plot([min_X, max_X], [0., 0.], [0., 0.], ls='--', c='k', zorder=4)
# Arrow head pointing in the positive x direction.
ax.quiver(max_X, 0., 0., max_X, 0., 0., arrow_length_ratio=.5,
length=.1, color='k')
ax.text(max_X, 0., -.5, 'x', 'x')
# y axis.
ax.plot([0., 0.], [0., 0.], [0., max_Y], ls='--', c='k')
# Arrow head pointing in the positive y direction.
ax.quiver(0., 0., max_Y, 0., 0., max_Y, arrow_length_ratio=.8,
length=.1, color='k')
ax.plot([0., 0.], [0., 0.], [min_Y, 0.], ls='--', c='k')
ax.text(-.5, 0., max_Y, 'y', 'y')
#
# A plane is a*x+b*y+c*z+d=0, [a,b,c] is the normal.
a, b, c, d = -1. * np.sin(theta) * np.sin(incl),\
np.cos(theta) * np.sin(incl), np.cos(incl), 0.
# print('a/c,b/c,1,d/c:', a / c, b / c, 1., d / c)
# Rotated plane.
X2_t, Y2_t = np.meshgrid([min_X, max_X], [0, max_Y])
Z2_t = (-a * X2_t - b * Y2_t) / c
X2_b, Y2_b = np.meshgrid([min_X, max_X], [min_Y, 0])
Z2_b = (-a * X2_b - b * Y2_b) / c
# Top half of first x',y' inclined plane.
ax.plot_surface(X2_t, Z2_t, Y2_t, color='red', alpha=.1, lw=0, zorder=3)
# Bottom half of inclined plane.
ax.plot_surface(X2_t, Z2_b, Y2_b, color='red', alpha=.1, lw=0, zorder=-1)
# Axis of x',y' plane.
# x' axis.
x_min, y_min, z_min = inv_trans_eqs(min_X, 0., 0., theta, incl)
x_max, y_max, z_max = inv_trans_eqs(max_X, 0., 0., theta, incl)
ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='b')
# Arrow head pointing in the positive x' direction.
ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
arrow_length_ratio=.7)
ax.text(x_max, z_max, y_max - .5, "x'", 'x', color='b')
# y' axis.
x_min, y_min, z_min = inv_trans_eqs(0., min_Y, 0., theta, incl)
x_max, y_max, z_max = inv_trans_eqs(0., max_Y, 0., theta, incl)
ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='g')
# Arrow head pointing in the positive y' direction.
ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
arrow_length_ratio=.9, color='g')
ax.text(x_max - .5, z_max, y_max, "y'", 'y', color='g')
# # z' axis.
# x_min, y_min, z_min = inv_trans_eqs(0., 0, min_Z, theta, incl)
# x_max, y_max, z_max = inv_trans_eqs(0., 0, max_Z, theta, incl)
# ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='y')
# # Arrow head pointing in the positive z' direction.
# ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
# arrow_length_ratio=.9, color='y')
# ax.text(x_max - .5, z_max, y_max, "z'", 'z', color='y')
ax.set_xlabel('x (Kpc)')
ax.set_ylabel('z (Kpc)')
ax.set_ylim(max_Y, min_Y)
ax.set_zlabel('y (Kpc)')
plt.colorbar(SC, shrink=0.9, aspect=25)
ax.axis('equal')
ax.axis('tight')
# This controls the initial orientation of the displayed 3D plot.
# ‘elev’ stores the elevation angle in the z plane. ‘azim’ stores the
# azimuth angle in the x,y plane.
ax.view_init(elev=0., azim=-90.)
plt.show()
# plt.savefig()
def main():
"""
"""
# Define inclination angles (i, Theta) (SMC first, LMC second).
# 'Theta' is the PA (position angle) measured from the North (positive
# y axis in van der Marel et al. 2002, Fig 3)
rot_angles = ((60, 150.), (30, 140.))
# Root path.
r_path = os.path.realpath(__file__)[:-30]
# Read input data for both galaxies from file (smc_data, lmc_data)
gal_data = readData.main(r_path)
for gal, gal_name in enumerate(['SMC', 'LMC']):
print("Generating data for {}".format(gal_name))
incl, Theta = rot_angles[gal]
# 'theta' is the position angle measured from the West (positive
# x axis), used by Eq (7) in van der Marel & Cioni (2001).
theta = Theta + 90.
# Center coordinates and distance for this galaxy.
gal_center, D_0, e_gal_dist = MCs_data.MCs_data(gal)
ra_0, dec_0 = gal_center.ra, gal_center.dec
# Center coordinates for observed clusters in this galaxy.
ra, dec = gal_data[gal]['ra'], gal_data[gal]['dec']
# Generate N random vertical distances (z'), in parsec.
z_prime = zDist(len(ra))
# Distance to clusters in parsecs.
D = invertDist(incl, theta, ra_0, dec_0, D_0, ra, dec, z_prime)
# Convert to distance moduli.
dist_mod = np.round(-5. + 5. * np.log10(D), 2)
# This line below uses the actual distance moduli found by ASteCA.
# dist_mod = gal_data[gal]['dist_mod']
# Random errors for distance moduli.
e_dm = np.round(np.random.uniform(.03, .09, len(ra)), 2)
# Store data in output file.
outData(gal_name, gal_data[gal], dist_mod, e_dm)
print("Output data stored")
# Obtain angular projected distance and position angle for the
# clusters in the galaxy.
rho, phi = rho_phi(ra, dec, gal_center)
cl_xyz = xyz_coords(rho, phi, D_0, dist_mod)
make_plot(gal_name, incl, theta, cl_xyz, dist_mod)
print("Plot saved.")
if __name__ == '__main__':
main()
|
Gabriel-p/mcs_rot_angles
|
aux_modules/validation_set.py
|
Python
|
gpl-3.0
| 10,176
|
[
"Galaxy"
] |
4fe745aa6660235217f7cedbeb61f53bacf7d2b730b4caaa273f2640874c3c2d
|
# -*- coding: utf-8 -*-
'''
Created on 04 Mar 2017
@author: Guilherme Stiebler
Copyright © 2017 Guilherme Stiebler, Éric Piel, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
'''
from __future__ import division
import logging
import numpy
import cv2
from scipy import ndimage
from odemis import model
from odemis.util import img
if int(cv2.__version__[0]) <= 2:
cv2.ORB_create = cv2.ORB
# Sift is not installed by default, check first if it's available
if hasattr(cv2, 'SIFT'):
cv2.SIFT_create = cv2.SIFT
# The brute-force matcher works in theory a bit better than the Flann-based one,
# but slower. In practice, it doesn't seem to show better results, and if they
# are many keypoints (eg, 2000) the slow-down can be a couple of seconds.
USE_BF = False # Use BruteForce matcher
USE_KNN = True # Use k-nearest neighbour matching method
# Missing defines from OpenCV
FLANN_INDEX_LINEAR = 0
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_KMEANS = 2
FLANN_INDEX_LSH = 6
def FindTransform(ima, imb, fd_type=None):
"""
ima(DataArray of shape YaXa with uint8): Image to be aligned
imb(DataArray of shape YbXb with uint8): Base image
Note that the shape doesn't have to be any relationship with the shape of the
first dimension(doesn't even need to be the same ratio)
fd_type(None or str): Feature detector type. Must be 'SIFT' or 'ORB'. ORB is faster,
but SIFT usually has better results. If None, it will pick the best available.
return (ndarray of shape 3, 3): transformation matrix to align the first image on the
base image. (right column is translation)
raises:
ValueError: if no good transformation is found.
"""
# Instantiate the feature detector and the matcher
# TODO: try BRISK, AZAKE and other detectors?
if fd_type is None:
for fd in ("SIFT", "ORB"):
if hasattr(cv2, "%s_create" % fd):
fd_type = fd
break
if fd_type == "ORB":
feature_detector = cv2.ORB_create()
if USE_BF:
matcher = cv2.BFMatcher(normType=cv2.NORM_HAMMING)
else:
index_params = dict(algorithm=FLANN_INDEX_LSH,
table_number=6, # 12
key_size=12, # 20
multi_probe_level=1) # 2
search_params = {}
matcher = cv2.FlannBasedMatcher(index_params, search_params)
elif fd_type == "SIFT":
# Extra arguments for SIFT
# contrastThreshold = 0.04
# edgeThreshold = 10
# sigma = 1.6 # TODO: no need for Gaussian as preprocess already does it?
feature_detector = cv2.SIFT_create(nfeatures=2000) # avoid going crazy on keypoints
if USE_BF:
matcher = cv2.BFMatcher(normType=cv2.NORM_L2)
else:
# Note: with KDTree, every call returns slightly different matches,
# which is quite annoying for reproducibility
# index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
index_params = dict(algorithm=FLANN_INDEX_KMEANS)
search_params = dict(checks=32) # default value
matcher = cv2.FlannBasedMatcher(index_params, search_params)
else:
raise ValueError("Unknown feature detector %s" % (fd_type,))
logging.debug("Using feature detector %s", fd_type)
# find and compute the descriptors
ima_kp, ima_des = feature_detector.detectAndCompute(ima, None)
imb_kp, imb_des = feature_detector.detectAndCompute(imb, None)
logging.debug("Found %d and %d keypoints", len(ima_kp), len(imb_kp))
# run the matcher of the detected features
if USE_KNN:
# For each keypoint, return up to k(=2) best ones in the other image
matches = matcher.knnMatch(ima_des, imb_des, k=2)
# store all the good matches as per Lowe's ratio test
dist_ratio = 0.75
selected_matches = [m[0] for m in matches
if len(m) == 2 and m[0].distance < m[1].distance * dist_ratio]
else:
# For each keypoint, pick the closest one in the other image
matches = matcher.match(ima_des, imb_des)
# Pick up to the best 10 matches
min_dist = 100 # almost random value
selected_matches = [m for m in matches if m.distance < min_dist]
selected_matches.sort(key=lambda m: m.distance)
selected_matches = selected_matches[:10]
logging.debug("Found %d matches and %d good ones", len(matches), len(selected_matches))
if len(selected_matches) < 5:
raise ValueError("Less than 5 common features (%d) detected on the images" %
(len(selected_matches),))
# get keypoints for selected matches
selected_ima_kp = [list(ima_kp[m.queryIdx].pt) for m in selected_matches]
selected_imb_kp = [list(imb_kp[m.trainIdx].pt) for m in selected_matches]
selected_ima_kp = numpy.array([selected_ima_kp])
selected_imb_kp = numpy.array([selected_imb_kp])
ima_mkp = [ima_kp[m.queryIdx] for m in selected_matches]
imb_mkp = [imb_kp[m.trainIdx] for m in selected_matches]
# testing detecting the matching points automatically
try:
mat, mask = cv2.findHomography(selected_ima_kp, selected_imb_kp, cv2.RANSAC)
except Exception:
raise ValueError("The images does not match")
if mat is None:
raise ValueError("The images does not match")
return mat, ima_kp, imb_kp, ima_mkp, imb_mkp
def preprocess(im, invert, flip, crop, gaussian_sigma, eqhis):
'''
Typical preprocessing steps needed before performing keypoint matching
im (DataArray): Input image
invert (bool): Invert the brightness levels of the image
flip (tuple(bool, bool)): Determine if the image should be flipped on the X and Y axis
crop (tuple(t,b,l,r): Crop values in pixels
gaussian_sigma (int): Blur intensity
eqhis (bool): If True, an histogram equalisation is performed (and data type)
is set to uint8
return (DataArray of same shape): Processed image
'''
try:
metadata = im.metadata
except AttributeError:
metadata = {}
flip_x, flip_y = flip
# flip on X axis
if flip_x:
im = im[:, ::-1]
# flip on Y axis
if flip_y:
im = im[::-1, :]
crop_top, crop_bottom, crop_left, crop_right = crop
# remove the bar
im = im[crop_top:im.shape[0] - crop_bottom, crop_left:im.shape[1] - crop_right]
# Invert the image brightness
if invert:
# mn = im.min()
mx = im.max()
im = mx - im
# equalize histogram
if eqhis:
if im.dtype != numpy.uint8:
# OpenCV histogram equalisation only works on uint8 data
rgb_im = img.DataArray2RGB(im)
im = rgb_im[:, :, 0]
im = cv2.equalizeHist(im)
# blur the image using a gaussian filter
if gaussian_sigma:
im = ndimage.gaussian_filter(im, sigma=gaussian_sigma)
# return a new DataArray with the metadata of the original image
return model.DataArray(im, metadata)
|
delmic/odemis
|
src/odemis/acq/align/keypoint.py
|
Python
|
gpl-2.0
| 7,625
|
[
"Gaussian"
] |
f6ec299677407369c09fe25bd7ea671b0263fdaf7bd0fa25d48ca0455cd26e11
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.estimators.glrm import H2OGeneralizedLowRankEstimator
from h2o.estimators.kmeans import H2OKMeansEstimator
from h2o.transforms.decomposition import H2OPCA
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.estimators.word2vec import H2OWord2vecEstimator
from h2o.estimators.deepwater import H2ODeepWaterEstimator
model_within_max_runtime = []
max_runtime_secs_small=1e-63 # small max_runtime_secs to make sure model did not get to run.
def algo_max_runtime_secs():
'''
This pyunit test is written to ensure that the various model will not crash if the max_runtime_secs
is set to be too short. See PUBDEV-4802.
'''
global model_within_max_runtime
seed = 12345
# word2vec
train = h2o.import_file(pyunit_utils.locate("bigdata/laptop/text8.gz"), header=1, col_types=["string"])
used = train[0:170000, 0]
w2v_model = H2OWord2vecEstimator()
grabRuntimeInfo(w2v_model, used, [], 0)
cleanUp([train, used, w2v_model])
# kmeans
training1_data = h2o.import_file(path=pyunit_utils.locate("smalldata/gridsearch/kmeans_8_centers_3_coords.csv"))
x_indices = list(range(training1_data.ncol))
model = H2OKMeansEstimator(k=10)
grabRuntimeInfo(model, training1_data, x_indices)
cleanUp([training1_data, model])
# PCA, pca_method=Power
training1_data = h2o.import_file(path=pyunit_utils.locate("smalldata/gridsearch/pca1000by25.csv"))
x_indices = list(range(training1_data.ncol))
model = H2OPCA(k=10, transform="STANDARDIZE", pca_method="Power", compute_metrics=True)
grabRuntimeInfo(model, training1_data, x_indices)
cleanUp([model])
# PCA, pca_method=Randomized
model = H2OPCA(k=10, transform="STANDARDIZE", pca_method="Randomized", compute_metrics=True)
grabRuntimeInfo(model, training1_data, x_indices)
cleanUp([model])
# PCA, pca_method=GLRM
model = H2OPCA(k=10, transform="STANDARDIZE", pca_method="GLRM", compute_metrics=True, use_all_factor_levels=True)
grabRuntimeInfo(model, training1_data, x_indices)
cleanUp([model])
# deeplearning
training1_data = h2o.import_file(path=pyunit_utils.locate("smalldata/gridsearch/gaussian_training1_set.csv"))
y_index = training1_data.ncol-1
x_indices = list(range(y_index))
model = H2ODeepLearningEstimator(distribution='gaussian', seed=seed, hidden=[10, 10, 10])
grabRuntimeInfo(model, training1_data, x_indices, y_index)
cleanUp([training1_data, model])
# stack ensemble, stacking part is not iterative
print("******************** Skip testing stack ensemble. Not an iterative algo.")
# GBM run
training1_data = h2o.import_file(path=pyunit_utils.locate("smalldata/gridsearch/multinomial_training1_set.csv"))
y_index = training1_data.ncol-1
x_indices = list(range(y_index))
training1_data[y_index] = training1_data[y_index].round().asfactor()
model = H2OGradientBoostingEstimator(distribution="multinomial", seed=seed)
grabRuntimeInfo(model, training1_data, x_indices, y_index)
cleanUp([model])
# GLM run
model = H2OGeneralizedLinearEstimator(family='multinomial', seed=seed)
grabRuntimeInfo(model, training1_data, x_indices, y_index)
cleanUp([model])
# naivebayes, not iterative
print("******************** Skip testing Naives Bayes. Not an iterative algo.")
# random foreset
model = H2ORandomForestEstimator(ntrees=100, score_tree_interval=0)
grabRuntimeInfo(model, training1_data, x_indices)
cleanUp([model, training1_data])
# deepwater
if H2ODeepWaterEstimator.available():
training1_data = h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/ecology_model.csv"))
training1_data = training1_data.drop('Site')
training1_data['Angaus'] = training1_data['Angaus'].asfactor()
y_index = "Angaus"
x_indices = list(range(1, training1_data.ncol))
model = H2ODeepWaterEstimator(epochs=50, hidden=[4096, 4096, 4096], hidden_dropout_ratios=[0.2, 0.2, 0.2])
grabRuntimeInfo(model, training1_data, x_indices, y_index)
cleanUp([training1_data, model])
# GLRM, do not make sense to stop in the middle of an iteration
training1_data = h2o.import_file(path=pyunit_utils.locate("smalldata/gridsearch/glrmdata1000x25.csv"))
x_indices = list(range(training1_data.ncol))
model = H2OGeneralizedLowRankEstimator(k=10, loss="Quadratic", gamma_x=0.3, gamma_y=0.3, transform="STANDARDIZE",
recover_svd=True)
grabRuntimeInfo(model, training1_data, x_indices)
cleanUp([training1_data, model])
if sum(model_within_max_runtime)>0:
sys.exit(1)
def grabRuntimeInfo(model, training_data, x_indices, y_index=0):
'''
This function will train the passed model with the max_runtime_secs set to be too short. Want to make sure
a warning message is received.
:param model: model to be evaluated
:param training_data: H2OFrame containing training dataset
:param x_indices: prediction input indices to model.train
:param y_index: response index to model.train
:return: None
'''
global max_runtime_secs_small
unsupervised = ("glrm" in model.algo) or ("pca" in model.algo) or ("kmeans" in model.algo)
if unsupervised:
model.train(x=x_indices, training_frame=training_data, max_runtime_secs=max_runtime_secs_small)
model.model_performance(training_data).show()
else:
if ('word2vec' in model.algo):
model.train(training_frame=training_data, max_runtime_secs=max_runtime_secs_small)
else:
model.train(x=x_indices, y=y_index, training_frame=training_data, max_runtime_secs=max_runtime_secs_small)
model.model_performance(training_data).show()
print("Model: {0}, \nActual_model_runtime_sec: {1}, "
"\nNumber of epochs/iterations/trees : {2}".format(model.algo,
model._model_json["output"]["run_time"]/1000.0,
checkIteration(model)))
def checkIteration(model):
if model._model_json["output"]["scoring_history"] != None:
epochList=pyunit_utils.extract_scoring_history_field(model, "epochs")
if (epochList==None): # return the scoring history length as number of iteration estimate
return len(model._model_json["output"]["scoring_history"].cell_values)
return epochList[-1]
elif "epochs" in model._model_json["output"]:
return model._model_json["output"]["epochs"]
else:
return 0
def cleanUp(eleList):
for ele in eleList:
h2o.remove(ele)
if __name__ == "__main__":
pyunit_utils.standalone_test(algo_max_runtime_secs)
else:
algo_max_runtime_secs()
|
mathemage/h2o-3
|
h2o-py/tests/testdir_jira/pyunit_pubdev_4802_short_max_runtime_secs.py
|
Python
|
apache-2.0
| 7,133
|
[
"Gaussian"
] |
bb15d6cc08420c429543676467411754115f07848833d810961f8d9e4765dcf1
|
# -*- coding: utf-8 -*-
"""
created on Mon Jan 23 10:11:19 2017
@author: cattaert
modified June 7 2017 (D.Cataert)
in procedure "loadParams", the actual number of Loeb params is set to 41
modified June29, 2017 (D.Cataert)
line 220 parameter name error corrected ("sensColChartNbs")
modified July 17, 2017 (D.Cataert)
line 50: the script was modified to get the correct rootname directory
animatsimdir = readAnimatLabDir()
if animatsimdir != "":
subdir = os.path.split(animatsimdir)[-1]
print subdir
rootname = os.path.dirname(animatsimdir)
rootname += "/"
folders = FolderOrg(animatlab_rootFolder=rootname,
subdir=subdir)
this is the same modification as made in GUI_AnimatLabOptimization.py
modified August 24, 2017 (D.Cataert)
in actualiseSaveAprojFromAsimFile(asimFileName):
[asimSimSet, asimtab_stims] = getSimSetFromAsim(optSet,
seriesStimParam,
seriesSynParam,
seriesSynFRParam,
asimFileName)
getSimSetFromAsim call was acutalized according to the new format
adopted in optimization.py
modified August 28, 2017 (D.Cataert):
new procedure to get the AnimatLab V2 program path
def readAnimatLabV2ProgDir():
filename = "animatlabV2ProgDir.txt"
try:
fic = open(filename, 'r')
directory = fic.readline()
fic.close()
except:
directory = ""
# print "First instance: Root directory will be created from GUI"
return directory
modified September 1, 2017 (D.Cataert):
Handle motorstims to set automatically muscle and spindle parameters
two procedures added: makemvtsavechart and setMusclesandSpindles
(still in progress)
modified September 4, 2017 (D.Cataert):
a new parameter has been added that indicate the chart number(in case there
are several charts) from which measurements are made
The number of parameters is now 42 (instead of 41)
modified September 21, 2017:
best .asim file form CMAe is now saved in "finalModel" folder and copied to
"CMAeBestSimFiles" folder
best .asim file form Loeb is now saved in "finalModel" folder and copied to
"LoebBestSimFiles" folder
modified September 28, 2017:
A threshod for CMAe mse allows to save the asim files in the
"CMAeBestSimFiles" folder (the file name is indicated in the las column of
"CMAeFitCourse.txt" file)
modified October 12, 2017 (D.Cataert):
added a CMAeFitCourse file that contains "trial, eval, mse, coactpenality
and coact" values plus the names of the .asim and chart files when the
eval (i.e. mse + coactpenality) is below the optSet.seuilMSEsave value
added also a "LoebFitCourse.txt" file containing trial, eval, mse,
coactpenality and coact values plus the names of the .asim and chart files
modified October 13, 2017 (D.Cataert):
added a distance calculation on parameter space
modified October 16, 2017 (D.Cataert):
modified distance calculus (gives the exact distance instead of sqarre)
modified October 17, 2017 (D.Cataert):
two new procedures created:
initializeExecLoeb(): now initialize Loeb procedure before running it
continueLoeb(nbepoch = 1) : allows to go on previous Loebrun for a
given number of nbepoch
modified October 17, 2017 (D.Cataert):
procedure continueLoeb(nbepoch = nb) modified so that in the file
LoebFitCourse.txt the last line that contained the name of the asim
(trial eval mse coactpenality coact ArmSpike16_Standalone-0.asim)
is removed and an empty line is added to indicate that the process was
interrupted and continued.
The corresponding asimFile (in LoebLastSimFiles folder) is removed and the
corresponding aproj file in AprojFile folder is also removed.
The las line in ArmSpike36Loeb.par (which contains the name
of the removed asim File) is also removed
modified October 31, 2017 (D.Cataert):
procedure "saveparams()" modified so that it prints the name of the .aproj
saved in "AprojFiles" directory
All procedures use os.path to handle path
modified November 03, 2017 (D.Cataert):
dontChangeSyn is now printed in checknonzeroSyn()
"""
import class_animatLabModel as AnimatLabModel
import class_animatLabSimulationRunner as AnimatLabSimRunner
import class_simulationSet as SimulationSet
import class_projectManager as ProjectManager
# import xml.etree.ElementTree as elementTree
# import class_chartData as ChartData
# import xml.etree.ElementTree as elementTree
# from random import shuffle
import os
import pickle
# import random
from FoldersArm import FolderOrg
from animatlabOptimSetting import OptimizeSimSettings
from optimization import runMarquez, runCMAe, runLoeb
from optimization import setPlaybackControlMode
# from optimization import existe
# from optimization import improveSynapses, improveSynapsesFR, improveStims
# from optimization import enableStims, formTemplateSmooth, savecurve, liste
# from optimization import affichChartColumn
from optimization import affichMotor
from optimization import affichNeurons, affichNeuronsFR
from optimization import affichExtStim
from optimization import affichConnexions, affichConnexionsFR
from optimization import writeTitres, tablo, findTxtFileName
from optimization import readTabloTxt
from optimization import savechartfile
from optimization import writeBestResSuite
from optimization import writeaddTab
# from optimization import testquality
from optimization import findList_asimFiles
from optimization import getSimSetFromAsim
from optimization import setMotorStimsOff
from optimization import copyFile, copyFileDir, copyRenameFile
from optimization import copyFileWithExt, createSubDirIncrem
from optimization import findFirstType
# from optimization import enableStims
# from optimization import getlistparam
from optimization import savefileincrem, affich_table
def show_tab_extstim():
optSet.tab_stims = affichExtStim(optSet.ExternalStimuli, 1)
def show_tab_motorstim():
optSet.tab_motors = affichMotor(model, optSet.motorStimuli, 1)
def readAnimatLabV2ProgDir():
filename = "animatlabV2ProgDir.txt"
try:
fic = open(filename, 'r')
directory = fic.readline()
fic.close()
except:
directory = ""
# print "First instance: Root directory will be created from GUI"
return directory
def readAnimatLabDir():
filename = "animatlabSimDir.txt"
try:
f = open(filename, 'r')
directory = f.readline()
f.close()
except:
directory = ""
return directory
def loadParams(paramFicName, optSet):
"""
loads parameters for optimization procedures (Loeb and CMAes)
from a file named "paramOpt.pkl" stored in ResultFiles directory of the
chosen simulation. This file (and the 3ResultFiles" directory were created
by the "GUI_AnimatLabOptizarion.py" graphic user interface)
After reading the "paramOpt.pkl" file, parameters are stored in the
optSet object (from the Class OptimizeSimSettings)
If "paramOpt.pkl" exists return "True"
If "paramOpt.pkl" does not exist, return "False"
"""
try:
print
print "looking paramOpt file:", paramFicName
with open(paramFicName, 'rb') as input:
optSet.paramLoebName = pickle.load(input)
optSet.paramLoebValue = pickle.load(input)
optSet.paramLoebType = pickle.load(input)
optSet.paramLoebCoul = pickle.load(input)
optSet.paramMarquezName = pickle.load(input)
optSet.paramMarquezValue = pickle.load(input)
optSet.paramMarquezType = pickle.load(input)
optSet.paramMarquezCoul = pickle.load(input)
print "nb loaded param :", len(optSet.paramLoebName)
# print "nb nb actual param param:", len(listparNameOpt)
print "nb expected param:", 42
# There are 41 Loeb parameters in this version
nbloadedpar = len(optSet.paramLoebName)
if nbloadedpar == 42:
if optSet.paramLoebName[16] == 'disabledSynNbs':
# This is the last version that includes "seriesSynNSParam"
print "paramOpt :"
optSet.printParams(optSet.paramLoebName, optSet.paramLoebValue)
elif optSet.paramLoebName[16] == 'allsyn':
# This is the last version that includes "seriesSynNSParam"
print "this version does not indicate seriesSynNSParam"
print "ACTUALIZING..."
optSet.update_optSetParamLoeb()
saveParams_pickle(optSet.paramFicName, optSet)
print "paramMarquez :"
optSet.printParams(optSet.paramMarquezName,
optSet.paramMarquezValue)
print '=================== Param loaded ===================='
response = True
elif nbloadedpar == 41:
print "paramOpt with only 41 params:"
pln = ['selectedChart'] + optSet.paramLoebName
optSet.paramLoebName = pln
plv = [0] + optSet.paramLoebValue
optSet.paramLoebValue = plv
plt = [int] + optSet.paramLoebType
optSet.paramLoebType = plt
plc = ["Magenta"] + optSet.paramLoebCoul
optSet.paramLoebCoul = plc
optSet.printParams(optSet.paramLoebName,
optSet.paramLoebValue)
print "paramMarquez :"
optSet.printParams(optSet.paramMarquezName,
optSet.paramMarquezValue)
print '=================== Param loaded ===================='
response = True
else:
print "Mismatch between existing and actual parameter files"
response = False
except:
# print "No parameter file with this name in the directory",
# print "NEEDs to create a new parameter file"
response = False
return response
def saveParams_pickle(paramFicName, optSet):
"""
doc string
"""
with open(paramFicName, 'wb') as output:
pickle.dump(optSet.paramLoebName, output)
pickle.dump(optSet.paramLoebValue, output)
pickle.dump(optSet.paramLoebType, output)
pickle.dump(optSet.paramLoebCoul, output)
pickle.dump(optSet.paramMarquezName, output)
pickle.dump(optSet.paramMarquezValue, output)
pickle.dump(optSet.paramMarquezType, output)
pickle.dump(optSet.paramMarquezCoul, output)
print "&&&&&& File saved :", paramFicName, " &&&&&&"
def saveparams(filename, lastname):
"""
Writes the names and values of all parameters in a human readable text file
The name is composed by the simulation name + Loeb or CMAe,
and the extension ".par"
"""
listparnam = ["selectedChart",
"mvtcolumn",
"startMvt1",
"endMvt1",
"endPos1",
"angle1",
"startMvt2",
"endMvt2",
"endPos2",
"angle2",
"startEQM",
"endEQM",
"allstim",
"disabledStimNbs",
"dontChangeStimNbs",
"seriesStimParam",
"allsyn",
"dontChangeSynNbs",
"dontChangeSynFRNbs",
"seriesSynParam",
"seriesSynFRParam",
"nbepoch",
"nbstimtrials",
"nbsyntrials",
"nbsteps",
"deltaStimCoeff",
"maxDeltaStim",
"multSynCoeff",
"maxMultSyn",
"coactivityFactor",
"nsActivThr",
"limQuality",
"maxStim",
"maxSynAmp",
"maxG",
"maxWeight",
"defaultval",
"cmaes_sigma",
"fourchetteStim",
"fourchetteSyn"
]
listparval = [optSet.selectedChart,
optSet.mvtcolumn,
optSet.startMvt1,
optSet.endMvt1,
optSet.endPos1,
optSet.angle1,
optSet.startMvt2,
optSet.endMvt2,
optSet.endPos2,
optSet.angle2,
optSet.startEQM,
optSet.endEQM,
optSet.allstim,
optSet.disabledStimNbs,
optSet.dontChangeStimNbs,
optSet.seriesStimParam,
optSet.allsyn,
optSet.dontChangeSynNbs,
optSet.dontChangeSynFRNbs,
optSet.seriesSynParam,
optSet.seriesSynFRParam,
optSet.nbepoch,
optSet.nbstimtrials,
optSet.nbsyntrials,
optSet.nbsteps,
optSet.deltaStimCoeff,
optSet.maxDeltaStim,
optSet.multSynCoeff,
optSet.maxMultSyn,
optSet.coactivityFactor,
optSet.activThr,
optSet.limQuality,
optSet.maxStim,
optSet.maxSynAmp,
optSet.maxG,
optSet.maxWeight,
optSet.defaultval,
optSet.cmaes_sigma,
optSet.fourchetteStim,
optSet.fourchetteSyn
]
comment = "Optimization Parameters Values Saved for " + lastname
listparval.append(lastname)
pathname = os.path.join(folders.animatlab_result_dir, filename)
if not os.path.exists(pathname):
writeaddTab(folders, listparnam, filename, 'w', "", 0)
writeaddTab(folders, listparval, filename, 'a', comment, 1)
def checknonzeroSynFR(optSet):
"""
checks if synaptic weight of enabled synapses between firing rate neurons
is different from zero. All parameters are accessible in the optSet object.
If synaptic weight is zero, the weight is changed to 1e-15
This procedure is used to avoid a synaptic weight to be trapped in zero
value in the optimization process.
"""
print
print "Checking 'Firing Rate Neuron' connexions..."
firstSynapseFR = findFirstType(model, "SynapsesFR")
for syn in range(len(optSet.SynapsesFR)):
if syn not in optSet.disabledSynFRNbs:
tempName = optSet.model.lookup["Name"][firstSynapseFR+syn]
tempName.split('*')
neuronSource = tempName.split('*')[0]
neuronTarget = tempName.split('*')[1]
connexSourceName = neuronSource
connexTargetName = neuronTarget
synapseID = optSet.SynapsesFR[syn].find("ID").text
synapseName = connexSourceName + "-" + connexTargetName
# synapseType = optSet.SynapsesFR[syn].find("Type").text
synapseWeight = optSet.model.getElementByID(synapseID).\
find("Weight").text
print synapseName,
for sp in range(4-(len(synapseName)+1)/8):
print '\t',
print "Weight : ",
print synapseWeight,
if synapseWeight == '0':
print "\t\t\t-->",
model.getElementByID(synapseID).find("Weight").text = '1e-15'
print optSet.model.getElementByID(synapseID).\
find("Weight").text
else:
print
def checknonzeroSyn(optSet):
"""
checks if synaptic weight (G) of enabled synapses between voltage neurons
is different from zero. All parameters are accessible in the optSet object.
If synaptic weight is zero, the weight is changed to 1e-15
This procedure is used to avoid a synaptic weight G to be trapped in zero
value in the optimization process.
"""
print
print "Checking 'Voltage Neuron' connexions..."
for syn in range(len(optSet.Connexions)):
if syn not in optSet.disabledSynNbs:
sourceID = optSet.Connexions[syn].find("SourceID").text
targetID = optSet.Connexions[syn].find("TargetID").text
neuronSource = model.getElementByID(sourceID)
neuronTarget = model.getElementByID(targetID)
connexSourceName = neuronSource.find("Name").text
connexTargetName = neuronTarget.find("Name").text
synapseTempID = optSet.Connexions[syn].find("SynapseTypeID").text
synapseTempName = model.getElementByID(synapseTempID).\
find("Name").text
synapseTempType = model.getElementByID(synapseTempID).\
find("Type").text
if syn in optSet.dontChangeSynNbs:
print "dontChange\t",
else:
print "Optim \t",
print connexSourceName,
for sp in range(2-(len(connexSourceName)+0)/8):
print '\t',
print '->', connexTargetName,
for sp in range(3-(len(connexTargetName)+4)/8):
print '\t',
if synapseTempType == 'NonSpikingChemical':
# The value of SynAmp is in the SynapseType
print "SynAmp : ",
synAmpVal = model.getElementByID(synapseTempID).\
find("SynAmp").text
print synAmpVal,
if synAmpVal == 'O':
print "\t-->",
model.getElementByID(synapseTempID).\
find("SynAmp").text = '0.0001'
print model.getElementByID(synapseTempID).\
find("SynAmp").text
else:
print
elif synapseTempType == 'SpikingChemical':
# The value of G is in the "Connexion"
print "G : ",
print optSet.Connexions[syn].find("G").text,
if optSet.Connexions[syn].find("G").text == '0':
print "\t-->",
optSet.Connexions[syn].find("G").text = '0.0001'
print optSet.Connexions[syn].find("G").text
else:
print
def checknonzeroExtStimuli(optSet):
"""
checks if external stimuli is different from zero.
All parameters are accessible in the optSet object. If it is the case, the
value of external stimuli is set to 1e-11 (usig the optSet object)
This procedure is used to avoid a synaptic weight G to be trapped in zero
value in the optimization process.
"""
print
print "Checking External Stimuli..."
for stim in range(optSet.nbStims):
if optSet.ExternalStimuli[stim].find("Enabled").text == 'True':
stimName = optSet.ExternalStimuli[stim].find("Name").text
if stim in optSet.dontChangeStimNbs:
print "dontChange\t",
else:
print "Optim \t",
print stimName,
for sp in range(3-(len(stimName)+1)/8):
print '\t',
print "CurrentOn : ",
print optSet.ExternalStimuli[stim].find("CurrentOn").text,
if optSet.ExternalStimuli[stim].find("CurrentOn").text == '0':
print "-->",
optSet.ExternalStimuli[stim].find("CurrentOn").text = '1e-11'
print optSet.ExternalStimuli[stim].find("CurrentOn").text
else:
print
def makemvtsavechart(jointNb, motorName, val, motorStart, motorEnd):
"""
sets angle positions and velocities for motors
"""
# ========== copying original asim File to Temp Directory ===========
print "copying original asim File to Temp Directory"
# simFileName=findChartName(folders.animatlab_commonFiles_dir)[0] + '.asim'
simFileName = os.path.split(model.asimFile)[-1]
sourceDir = folders.animatlab_commonFiles_dir
destDir = folders.animatlab_rootFolder + "temp/"
if not os.path.exists(destDir):
os.makedirs(destDir)
copyFile(simFileName, sourceDir, destDir)
# ================= Disable all external Stimuli... =================
for stim in range(optSet.nbStims):
optSet.ExternalStimuli[stim].find("Enabled").text = 'False'
# ========== prepares simSet for mvt control and runprojMan =========
chartRootName = "imposedMaxAmplMvt"
simSet = SimulationSet.SimulationSet()
simSet.samplePts = []
for idx in range(len(motorName)):
simSet.set_by_range({motorName[idx] + ".Equation": [val[idx]]})
simSet.set_by_range({motorName[idx] + ".StartTime": [motorStart[idx]]})
simSet.set_by_range({motorName[idx] + ".EndTime": [motorEnd[idx]]})
print simSet.samplePts
projMan.make_asims(simSet)
projMan.run(cores=-1)
# ====================== Saves the chart result =====================
tab = tablo(folders, findTxtFileName(model, optSet, 1))
comment = ""
destdir = os.path.join(folders.animatlab_rootFolder, "ChartResultFiles/")
chartname = savechartfile(chartRootName, destdir, tab, comment)
print "... chart file {} saved; {}".format(chartname, comment)
# ====================== Modifies the asim file =====================
for idx in range(len(motorName)):
motorstim = model.getElementByName(motorName[idx])
motorstim.find("Equation").text = str(val[idx])
motorstim.find("StartTime").text = str(motorStart[idx])
motorstim.find("EndTime").text = str(motorEnd[idx])
motorstim.find("Enabled").text = "True"
# ====================== Saves the new asim file =====================
model.saveXML(overwrite=True)
show_tab_extstim()
show_tab_motorstim()
# === copying asim File from FinalModel to MaxMvtModel Directory ====
print "\nCopying asim File from FinalModel to MaxMvtModel Directory"
sourceDir = folders.animatlab_commonFiles_dir
destDir = folders.animatlab_rootFolder + "MaxMvtModel/"
if not os.path.exists(destDir):
os.makedirs(destDir)
copyFile(simFileName, sourceDir, destDir)
# ===== copying original asim File back to FinalModel Directory =====
print "\ncopying original asim File back to FinalModel Directory"
sourceDir = folders.animatlab_rootFolder + "temp/"
destDir = folders.animatlab_commonFiles_dir
copyFile(simFileName, sourceDir, destDir)
return chartname
def setMusclesandSpindles():
"""
Procedure not finalized yet
"""
# Prepares the motor -driven movement for max amplitudes
motorName, motorType = [], []
val, motorStart, motorEnd = [], [], []
jointNb = 0
mindeg = (optSet.jointLimDwn[jointNb])*180/3.1415926
maxdeg = (optSet.jointLimUp[jointNb])*180/3.1415926
print "set Muscles and Spindles"
print "\t\t\t", "radians", "\t\t\t", "degres"
print "limits \t",
print optSet.jointName[jointNb],
print "[", optSet.jointLimDwn[jointNb],
print "-->",
print optSet.jointLimUp[jointNb], "]",
print "[", mindeg, "-->", maxdeg, "]"
j = 0
for i in range(len(optSet.motorStimuli)):
motorEl = optSet.motorStimuli[i]
for idx, elem in enumerate(motorEl):
motorName.append(elem.find("Name").text)
motorType.append(elem.find("Type").text)
txt1 = ""
for k in range(3-((len(motorName[j])+0)/8)):
txt1 += "\t"
# val.append(optSet.jointLimDwn[jointNb])
val.append(float(elem.find("Equation").text))
motorStart.append(float(elem.find("StartTime").text))
motorEnd.append(float(elem.find("EndTime").text))
if motorType[j] == "MotorPosition":
# set initial position to min angle
val[j] = optSet.jointLimDwn[jointNb]
print motorName[j], txt1, motorType[j], "\t", motorStart[j],
print "\t", motorEnd[j], "\t", val[j]
j += 1
chartname = makemvtsavechart(jointNb, motorName, val, motorStart, motorEnd)
# TODO : continuer l'implementation utiliser les moteurs pour démarrer à
# optSet.jointLimDwn
# et faire mesure des muscles et spindles, puis aller en optSet.jointLimUp
# et refaire les mesures
def getSimSetDic(sourceDir, filename, simFileDir):
"""
reads a text file (its name is given by "filename", and it is located in
the folder "sourceDir"), to extract the .asim file names it contains (6th
item of each line, when present).
"getSimSetDic" opens the asim files and uses simSet.samplePts (function of
the class simulationSet) to get the dictionaries (param name : values)
Returns an array containing two items:
- list of dictionaries of param names and values for each .asim file
- list of the corresponding asim file names
"""
seriesStimParam = ['CurrentOn', 'StartTime', 'EndTime']
seriesSynParam = ['G']
seriesSynFRParam = ['Weight']
tab = readTabloTxt(sourceDir, filename)
simFile = []
asimSimSet = []
asimtab_stims = []
simsetDic = []
simFileNb = 0
simFileName = []
for rg in range(len(tab)):
if len(tab[rg]) >= 7:
try:
rrg = int(tab[rg][0])
print "\nline", rrg, "file:", tab[rg][5]
simFile.append(tab[rg][5])
asimFileName = simFileDir + simFile[simFileNb]
result = getSimSetFromAsim(optSet,
seriesStimParam,
seriesSynParam,
seriesSynFRParam,
asimFileName,
affiche=0)
asimSimSet.append(result[0])
asimtab_stims.append(result[1])
simFileName.append(asimFileName)
simFileNb += 1
except:
None
for sset in range(simFileNb):
simsetDic.append(asimSimSet[sset].samplePts)
for sset in range(simFileNb):
print sset
print simsetDic[sset]
return [simsetDic, simFileName]
def normCenter(simsetDic, simFileName):
"""
normalize all parameter values contained in the list of dictionaries
(simsetDic), using the limits for each parameter (stored in optSet) to
calculate a normalized value in the range [0, 1]:
((value-limMin)/(limMax-limMin))
Returns a list of lists of normalized parameter values
"""
simFileNb = len(simsetDic)
# Normalizing and centering values
normVal = []
for sset in range(simFileNb):
realstim = []
normcentstim = []
valuedic = simsetDic[sset][0]
print simFileName[sset]
for key in valuedic.keys():
if key.split(".")[1] in optSet.seriesStimParam:
if key.split(".")[1] == "StartTime":
limMax = optSet.endPos2
limMin = 0
elif key.split(".")[1] == "EndTime":
limMax = optSet.endPos2
limMin = 0
elif key.split(".")[1] == "CurrentOn":
limMax = optSet.maxStim
limMin = - limMax
print key, " ", valuedic[key],
realstim.append(valuedic[key])
normcentstim.append((valuedic[key]-limMin)/(limMax-limMin))
print "\t-> ", (valuedic[key]-limMin)/(limMax-limMin)
for key in valuedic.keys():
if key.split(".")[1] in optSet.seriesSynParam:
if key.split(".")[1] == "G":
limMax = optSet.maxG
limMin = 0
elif key.split(".")[1] == "Weight":
limMax = optSet.maxWeight
limMin = 0
print key, " ", valuedic[key],
realstim.append(valuedic[key])
normcentstim.append((valuedic[key]-limMin)/(limMax-limMin))
print "\t-> ", (valuedic[key]-limMin)/(limMax-limMin)
normVal.append(normcentstim)
print
return normVal
def calculatedist(normVal, simFileName):
"""
Calculates the distance between sets of parameter values that are
normalized. These normalized values are in an array (normVal).
Each set of normalized values is associated to an .asim File, the Name of
which is given in an array (simFileName).
Returns an array containing three items: a distance table (tabdistances),
a table of file names (tabnames), and a table of file numbers (tabnbs).
"""
simFileNb = len(normVal)
tabdistances = []
tabnames = []
tabnbs = []
for fic1 in range(simFileNb):
tabdistances.append([])
tabnames.append([])
ficname1 = os.path.split(simFileName[fic1])[-1]
name1 = os.path.splitext(ficname1)[0]
nb1 = name1.split("-")[1]
tabnbs.append(nb1)
for fic2 in range(simFileNb):
ficname2 = os.path.split(simFileName[fic2])[-1]
name2 = os.path.splitext(ficname2)[0]
nb2 = name2.split("-")[1]
dist = 0
for par in range(len(normVal[fic2])):
dist += (normVal[fic1][par] - normVal[fic2][par])**2
distance = dist**0.5
print nb1, "\t", nb2, "\t", distance
tabdistances[fic1].append(distance)
tabnames[fic1].append(nb1 + "-" + nb2)
dist = 0
print
return [tabdistances, tabnames, tabnbs]
def analyzeDistance(sourceDir, filename, simFileDir):
"""
This procedure calls a function (getSimSetDic) that reads a text file
(its name is given by "filename", and it is located in the folder
"sourceDir"), to extract the asim file names it contains (last item of each
line)."getSimSetDic" opens the asim files and returns dictionalries of
param names and values for each asim file
Then it calls "calculatedist", a function that calculates the distances
Returns an array containing three items:
- a table of distances without column names (tabdistances)
- an array the pairs of file numbers for the tabdistances table (tabnames)
- the list of file numbers (tabnbs)
"""
[simsetDic, simFileName] = getSimSetDic(sourceDir, filename, simFileDir)
normVal = normCenter(simsetDic, simFileName)
[tabdistances, tabnames, tabnbs] = calculatedist(normVal, simFileName)
return [tabdistances, tabnames, tabnbs]
def createtabdistances(tabdistances, tabnbs):
"""
creates a printable table of distances between the parameter values of each
simulation and the parameter values of all other simulations (identified by
their number).
It uses the tabdistances (aray of disnances) and tabnbs (array of simfile
numbers)
Returns the array of tabdistances
"""
tabdist = []
# prepares tab Titles
ligne = ["nb1"]
for fic1 in range(len(tabdistances)):
ligne.append(tabnbs[fic1])
tabdist.append(ligne)
# add next lines
for fic1 in range(len(tabdistances)):
ligne = [tabnbs[fic1]]
for fic2 in range(len(tabdistances)):
ligne.append(tabdistances[fic1][fic2])
tabdist.append(ligne)
affich_table(tabdist, 3)
return tabdist
def actualiseSaveAprojFromAsimFile(asimFileName, aprojFileName, overwrite=0):
"""
Actualizes the parameter values in the .aproj object defined in the model
object from class AnimatLabModel. It calls a function getSimSetFromAsim()
that creates a simSet object (asimSimSet) from class SimulationSet, by
extracting all parameter values from the .asim file, and assembling them
in a simSet object that it returns with the table of external stimuli.
Once the .aproj object (that is in memory) is actualized, it saves an
.aproj file with the name and path contained in aprojFileName.
Returns the path+Name of the saved aproj file (names end with an
incremented number).
"""
seriesStimParam = ['CurrentOn', 'StartTime', 'EndTime']
seriesSynParam = optSet.seriesSynParam
# seriesSynNSParam = optSet.seriesSynNSParam
seriesSynNSParam = ['SynAmp', 'ThreshV']
seriesSynFRParam = optSet.seriesSynFRParam
res = getSimSetFromAsim(optSet, seriesStimParam, seriesSynParam,
seriesSynNSParam, seriesSynFRParam,
asimFileName, affiche=1)
asimSimSet = res[0]
asimtab_stims = res[1]
asimtab_motorst = res[2]
model.actualizeAproj(asimSimSet)
model.actualizeAprojStimState(asimtab_stims)
model.actualizeAprojMotorState(asimtab_motorst)
complete_name = model.saveXMLaproj(aprojFileName, overwrite=overwrite)
return complete_name
def changeparamvalue(paramName, paramType, value):
model.getElementByName(paramName).find(paramType).text = value
def actualiseSaveAprojFromAsimFileDir(asimsourcedir, aprojdestdir, suffix):
listAsim = findList_asimFiles(asimsourcedir)
name = os.path.splitext(aprojFicName)[0]
ext = os.path.splitext(aprojFicName)[1]
ficName = name + suffix + ext
for filesource in listAsim:
asimFileName = os.path.join(asimsourcedir, filesource)
print asimFileName
nam = os.path.splitext(filesource)[0]
numero = nam.split("-")[1]
ficName = name + suffix + str(numero) + ext
aprojFileName = aprojdestdir + ficName
actualiseSaveAprojFromAsimFile(asimFileName,
aprojFileName,
overwrite=1)
def CMAeLastSimFilesToAprojFiles():
asimsourcedir = os.path.join(folders.animatlab_rootFolder,
"CMAeLastSimFiles/")
aprojdestdir = aprojSaveDir
suffix = "CMAeLast-"
actualiseSaveAprojFromAsimFileDir(asimsourcedir,
aprojdestdir, suffix)
def CMAeMinAsimFilesToCMAeMinAprojFiles():
asimsourcedir = os.path.join(folders.animatlab_rootFolder,
"CMAeMinAsimFiles/")
aprojdestdir = os.path.join(folders.animatlab_rootFolder,
"CMAeMinAprojFiles/")
suffix = "CMAeMin-"
actualiseSaveAprojFromAsimFileDir(asimsourcedir,
aprojdestdir, suffix)
# ########################### Marquez procedures #############################
def execMarquez():
"""
executes Marquez procedures and saves the .asim file in "FinalTwitchModel"
folder. It saves the chart file in "ChartTwitchFiles" folder.
"""
runMarquez(folders, model, optSet, projMan)
sourceDir = folders.animatlab_rootFolder + "FinalTwitchModel/"
asimFileNamesList = findList_asimFiles(sourceDir)
if asimFileNamesList != []:
asimFileName = folders.animatlab_rootFolder +\
"FinalTwitchModel/" + asimFileNamesList[0]
name = os.path.splitext(aprojFicName)[0]
ext = os.path.splitext(aprojFicName)[1]
ficname = name + "Marquez" + ext
aprojFileName = aprojSaveDir + ficname
actualiseSaveAprojFromAsimFile(asimFileName, aprojFileName)
# ############################# Loeb procedures ###############################
def initializeLoeb():
"""
Initialize Loeb procedures before starting a new series
Sets essai to 0
erases the 5 files that contain the state of the last Loeb process
"""
global essai
essai = 0
dirName = folders.animatlab_rootFolder + "ResultFiles/"
filename = dirName + "stimbestfits.txt"
# TODO:
# if os.path.exists(destDir):
if os.path.exists(filename):
os.remove(filename)
filename = dirName + "stimbestfitsCoact.txt"
if os.path.exists(filename):
os.remove(filename)
filename = dirName + "stimcoeff.txt"
if os.path.exists(filename):
os.remove(filename)
filename = dirName + "synbestfits.txt"
if os.path.exists(filename):
os.remove(filename)
filename = dirName + "synbestfitsCoact.txt"
if os.path.exists(filename):
os.remove(filename)
filename = dirName + "syncoeff.txt"
if os.path.exists(filename):
os.remove(filename)
def findLastSavedFile(directory, ficname, ext):
"""
For folders (the path is given in directory) that contain files with
increment number added to the file name contained in ficname. It returns
the last file in the list, with the complete path.
"""
number = -1
if os.path.exists(directory):
complete_name = os.path.join(directory, ficname + "-0" + ext)
while os.path.exists(complete_name):
number = number + 1
txtnumber = str(number)
name = ficname + "-" + txtnumber + ext
complete_name = os.path.join(directory, name)
# print number, txtnumber
number = number - 1
txtnumber = str(number)
name = ficname + "-" + txtnumber + ext
complete_name = os.path.join(directory, name)
return complete_name
def eraseLastSavedFile(directory, ficname, ext):
"""
For folders (the path is given in directory) that contain files with
increment number added to the file name contained in ficname.
It erases the last file in directory
"""
complete_name = findLastSavedFile(directory, ficname, ext)
if os.path.exists(complete_name):
os.remove(complete_name)
# print complete_name + " has been erased"
return complete_name
def readtwolastlines(directory, ficname, ext):
"""
Reads a text file (name is given by ficname + ext; in directory)
Returns the two last lines
"""
complete_name = os.path.join(directory, ficname + ext)
table = []
if os.path.exists(complete_name):
line = 1
f = open(complete_name, 'r')
txt = ""
while 1:
line = line + 1
txt = f.readline()
# print txt
if txt == '':
break
else:
table.append(txt)
if len(table) >= 2:
res = [table[len(table)-2], table[len(table)-1]]
elif len(table) >= 1:
res = [table[len(table)-1], table[len(table)-1]]
print res[0], res[1]
return res
def eraseLastLine(directory, ficname, ext):
"""
Erases the last line of a text file (name is given by ficname + ext; in
directory)
"""
complete_name = os.path.join(directory, ficname + ext)
table = []
if os.path.exists(complete_name):
line = 1
f = open(complete_name, 'r')
txt = ""
while 1:
line = line + 1
txt2 = txt
txt = f.readline()
# print txt
if txt == '':
print txt2
break
else:
table.append(txt)
f = open(os.path.join(directory, "temp.txt"), 'w')
for line in range(len(table)-1):
f.write(table[line])
f.write("\n")
f.close()
os.remove(complete_name)
os.rename(os.path.join(directory, "temp.txt"), complete_name)
None
def execLoeb():
"""
Executes Loeb optimization procedures. When optimization is finished, It
copies the bestfit .asim file in LoebBestSimFiles folder and saves the
bestfit .aproj file in AprojFiles folder
"""
global essai
try:
essaiNb = runLoeb(folders, model, optSet, projMan, essai)
except:
initializeLoeb()
essaiNb = runLoeb(folders, model, optSet, projMan, essai)
essai = essaiNb
# ---------------------------------------------------------
# Copies the bestfit .asim file in LoebLastSimFiles folder
destdir = folders.animatlab_rootFolder + "LoebLastSimFiles/"
sourcedir = folders.animatlab_commonFiles_dir
simFileName = os.path.splitext(os.path.split(model.asimFile)[-1])[0]
filesource = simFileName + ".asim"
filedest = simFileName + ".asim"
comment = ""
numero = copyRenameFile(sourcedir, filesource,
destdir, filedest, comment,
replace=0)
comment = simFileName + '-{0:d}.asim'.format(numero)
titles = ["trial", "eval", "mse", "coactpenality", "coact", comment]
writeBestResSuite(folders, "LoebFitCourse.txt", titles, 0)
# ---------------------------------------------------------
# saves the bestfit .aproj file in AprojFiles folder
name = os.path.splitext(aprojFicName)[0]
ext = os.path.splitext(aprojFicName)[1]
ficname = name + "Loeb" + ext
asimFileName = model.asimFile
aprojFileName = aprojSaveDir + ficname
complete_name = actualiseSaveAprojFromAsimFile(asimFileName, aprojFileName)
lastname = os.path.split(complete_name)[-1]
saveparams(folders.subdir + "Loeb.par", lastname)
def initializeExecLoeb():
"""
Initialize Loeb procedures before starting a new series. Sets essai to 0.
Erases the 5 files that contain the state of the last Loeb process
Executes a new Loeb optimization process.
"""
initializeLoeb()
execLoeb()
def continueLoeb(nbepoch=1):
"""
Prolonges the Loeb optimization process. Gets the last essai value (number
of AnimatLab runs in the previous Loeb process). This number is written in
the two last lines of the "LoebFitCourse.txt" file.
The name of the last asim file is also contained in the last line of the
"LoebFitCourse.txt" file. The name of this last asim file is incremental.
The process uses this number to check no error occurred before deleting
the last file in "LoebLastSimFiles" and "AprojFiles" folders.
THe last lines in "LoebFitCourse.txt" and "ArmSpike36Loeb.par" files are
also deleted. Finally, the Loeb optimization procedure is run for the
number of epoch indicated in the parameter "nbepoch", the value of which is
previously saved in optSet.nbepoch
"""
global essai
directory = folders.animatlab_result_dir
res = readtwolastlines(directory, "LoebFitCourse", ".txt")
line1 = res[0]
nb = line1.split('\t')[0]
try:
essai = int(nb)
except:
essai = 0
print "ERROR in LoebFitCourse.txt!!!!"
line2 = res[1]
last_string = line2.split('\t')[len(line2.split('\t'))-1]
asimfile = last_string.split('\n')[0]
print "last asim file : ", asimfile
asimfilename = os.path.splitext(asimfile)[0]
lastfilenumbertxt = asimfilename.split("-")[1]
directory = os.path.join(folders.animatlab_rootFolder, "LoebLastSimFiles")
ficname = os.path.splitext(os.path.split(model.asimFile)[-1])[0]
lastcomplete_asim_name = findLastSavedFile(directory, ficname, ".asim")
lastasim = os.path.split(lastcomplete_asim_name)[1]
lastasimname = os.path.splitext(lastasim)[0]
lastasimnamenumbertxt = lastasimname.split("-")[1]
if lastasimnamenumbertxt == lastfilenumbertxt:
completeasim_name = eraseLastSavedFile(directory, ficname, ".asim")
print completeasim_name, " has been erased"
directory = os.path.join(folders.animatlab_rootFolder, "AprojFiles")
name = os.path.splitext(aprojFicName)[0]
ficname = name + "Loeb"
lastcomplete_aproj_name = findLastSavedFile(directory, ficname, ".aproj")
lastaproj = os.path.split(lastcomplete_aproj_name)[1]
lastaprojname = os.path.splitext(lastaproj)[0]
lastaprojnamenumbertxt = lastaprojname.split("-")[1]
if lastaprojnamenumbertxt == lastfilenumbertxt:
completeaproj_name = eraseLastSavedFile(directory, ficname, ".aproj")
print completeaproj_name, " has been erased"
directory = folders.animatlab_result_dir
eraseLastLine(directory, "LoebFitCourse", ".txt")
eraseLastLine(directory, "ArmSpike36Loeb", ".par")
optSet.nbepoch = nbepoch
execLoeb()
# =============================================================================
# ########################### CMAe procedures #############################
def FinalModelfromCMAeMinAsimFiles(model, cmaeNb):
"""
Copies the last .asim file from the CMAeMinAsimFiles folder and saves it in
the FinalModel folder after changing its name (remove the increment number)
This file replaces the previous asim File of this folder.
"""
simFileName = os.path.splitext(os.path.split(model.asimFile)[-1])[0]
comment = ""
# --------------------------------------------------------------------
# Copies sim file from "CMAeBestSimFiles" to "FinalModel" folder
destdir = folders.animatlab_commonFiles_dir
sourcedir = folders.animatlab_rootFolder + "CMAeMinAsimFiles/"
filesource = simFileName + "-" + str(cmaeNb) + ".asim"
filedest = simFileName + ".asim"
# Replaces the previous .asim File
copyRenameFile(sourcedir, filesource, destdir, filedest, comment,
replace=1)
def FinalModelfromCMAeLastSimFiles(model, cmaeNb):
"""
Copies the last .asim file from the CMAeMinAsimFiles folder and saves it in
the FinalModel folder after changing its name (remove the increment number)
This file replaces the previous asim File of this folder.
"""
simFileName = os.path.splitext(os.path.split(model.asimFile)[-1])[0]
comment = ""
# --------------------------------------------------------------------
# Copies sim file from "CMAeBestSimFiles" to "FinalModel" folder
destdir = folders.animatlab_commonFiles_dir
sourcedir = os.path.join(folders.animatlab_rootFolder, "CMAeLastSimFiles/")
filesource = simFileName + "-" + str(cmaeNb) + ".asim"
filedest = simFileName + ".asim"
# Replaces the previous .asim File
copyRenameFile(sourcedir, filesource, destdir, filedest, comment,
replace=1)
def execCMAe(nbevals):
"""
Executes a CMAe optimization process for a number of run indicated in
"nbevals".
After the CMAe optimization process is finished, it saves the best
asim file in the "CMAeLastSimFiles" folder (with incremental number)
It saves the bestfit .aproj file in AprojFiles folder.
It saves the parameters set used to run CMAe nd the name of theaproj file
in the "*CMAe.par" file that is in the "ResultFiles" folder.
It copies also the outcmaes*.dat files produced by CMAes procedure and
located in the working directory, and saves these files into "CMAeData"
folder in a subfolder named CMAeData-xx (xx being incremental)
"""
[res, simSet] = runCMAe(folders, model, optSet, projMan, nbevals)
# =============== Creates and Saves the new asim file ================
projMan.make_asims(simSet) # saves the asim in "simFiles" folder
# this is the best asim file (even if MSE > seuilMSEsave)
# --------------------------------------------------------------------
# Copies asim file from "SimFiles" to "CMAeFinalSimFiles" folder
simFileName = os.path.splitext(os.path.split(model.asimFile)[-1])[0]
destdir = folders.animatlab_rootFolder + "CMAeLastSimFiles/"
sourcedir = folders.animatlab_simFiles_dir
filesource = simFileName + "-1.asim"
filedest = simFileName + ".asim"
# Add the .asim file with increment number
numero = copyRenameFile(sourcedir, filesource,
destdir, filedest, "",
replace=0)
# --------------------------------------------------------------------
# saves the bestfit .aproj file in AprojFiles folder
name = os.path.splitext(aprojFicName)[0]
ext = os.path.splitext(aprojFicName)[1]
ficName = name + "CMAeLast" + ext
asimFileName = sourcedir + filesource
aprojFileName = aprojSaveDir + ficName
complete_name = actualiseSaveAprojFromAsimFile(asimFileName, aprojFileName)
lastname = os.path.split(complete_name)[-1]
saveparams(folders.subdir + "CMAe.par", lastname)
cwd = os.getcwd()
CMAeDataSourceDir = cwd
CMAeDataDestDir = folders.animatlab_rootFolder + "CMAeData/"
CMAeDataSubDir = "CMAeData"
destDir = createSubDirIncrem(CMAeDataDestDir, CMAeDataSubDir)
dirname = os.path.basename(os.path.split(destDir)[0])
copyFileWithExt(CMAeDataSourceDir, destDir, ".dat")
# --------------------------------------------------------------------
# add two last lines in "CMAeFitCourse.txt" file
comment = simFileName + '-{0:d}.asim'.format(numero)
comment = comment + "; " + dirname
titles = ["trial", "eval", "mse", "coactpenality", "coact", comment]
writeBestResSuite(folders, "CMAeFitCourse.txt", titles, 0)
# =============================================================================
def initialise():
global model, optSet
model = AnimatLabModel.AnimatLabModel(folders.animatlab_commonFiles_dir)
optSet = OptimizeSimSettings(folders=folders, model=model,
projMan=projMan, sims=sims)
setMotorStimsOff(model, optSet.motorStimuli)
# Looks for a parameter file in the chosen directory
fileName = 'paramOpt.pkl'
if loadParams(folders.animatlab_result_dir + fileName, optSet):
# optSet was updated from "paramOpt.pkl"
# we use then optSet to implement the needed variables
optSet.actualizeparamLoeb()
optSet.actualizeparamMarquez()
else:
print "paramOpt.pkl MISSING !!, run 'GUI_animatlabOptimization.py'"
print
optSet.tab_motors = affichMotor(model, optSet.motorStimuli, 1)
optSet.tab_neurons = affichNeurons(optSet, optSet.Neurons, 1)
optSet.tab_neuronsFR = affichNeuronsFR(optSet, optSet.NeuronsFR, 1)
checknonzeroSyn(optSet)
optSet.tab_connexions = affichConnexions(model, optSet,
optSet.Connexions, 1)
checknonzeroSynFR(optSet)
optSet.tab_connexionsFR = affichConnexionsFR(model, optSet,
optSet.SynapsesFR, 1)
checknonzeroExtStimuli(optSet)
optSet.tab_stims = affichExtStim(optSet, optSet.ExternalStimuli, 1)
# ============================================================================
# MAIN PROGRAM
# ============================================================================
if __name__ == '__main__':
"""
"""
global essai
animatsimdir = readAnimatLabDir()
animatLabV2ProgDir = readAnimatLabV2ProgDir()
if animatsimdir != "":
subdir = os.path.split(animatsimdir)[-1]
print subdir
rootname = os.path.dirname(animatsimdir)
rootname += "/"
folders = FolderOrg(animatlab_rootFolder=rootname,
python27_source_dir=animatLabV2ProgDir,
subdir=subdir)
folders.affectDirectories()
aprojSaveDir = folders.animatlab_rootFolder + "AprojFiles/"
if not os.path.exists(aprojSaveDir):
os.makedirs(aprojSaveDir)
copyFileDir(animatsimdir,
aprojSaveDir,
copy_dir=0)
aprojCMAeDir = folders.animatlab_rootFolder + "CMAeMinAprojFiles/"
if not os.path.exists(aprojCMAeDir):
os.makedirs(aprojCMAeDir)
copyFileDir(animatsimdir,
aprojCMAeDir,
copy_dir=0)
else:
print "No selected directory run GUI_AnimatLabOptimization.py"
quit
if animatsimdir != "":
sims = AnimatLabSimRunner.AnimatLabSimulationRunner("Test Sims",
rootFolder = folders.animatlab_rootFolder,
commonFiles = folders.animatlab_commonFiles_dir,
sourceFiles = folders.python27_source_dir,
simFiles = folders.animatlab_simFiles_dir,
resultFiles = folders.animatlab_result_dir)
model = AnimatLabModel.AnimatLabModel(folders.animatlab_commonFiles_dir)
projMan = ProjectManager.ProjectManager('Test Project')
aprojFicName = os.path.split(model.aprojFile)[-1]
optSet = OptimizeSimSettings(folders=folders, model=model,
projMan=projMan, sims=sims)
#
listparNameOpt = optSet.paramLoebName
setPlaybackControlMode(model, mode=0) # 0: fastest Possible;
# # 1: match physics
setMotorStimsOff(model, optSet.motorStimuli)
# Looks for a parameter file in the chosen directory
fileName = 'paramOpt.pkl'
if loadParams(folders.animatlab_result_dir + fileName, optSet):
# optSet was updated from "paramOpt.pkl"
# we use then optSet to implement the needed variables
listparNameOpt = optSet.paramLoebName
listparValOpt = optSet.paramLoebValue
listparTypeOpt = optSet.paramLoebType
listparCoulOpt = optSet.paramLoebCoul
optSet.actualizeparamLoeb()
listparNameMarquez = optSet.paramMarquezName
listparValMarquez = optSet.paramMarquezValue
listparTypeMarquez = optSet.paramMarquezType
listparCoulMarquez = optSet.paramMarquezCoul
optSet.actualizeparamMarquez()
else:
print "paramOpt.pkl MISSING !!, run 'GUI_animatlabOptimization.py'"
print
optSet.tab_motors = affichMotor(model, optSet.motorStimuli, 1)
# optSet.tab_chartcolumns = affichChartColumn(optSet.ChartColumns, 1)
optSet.tab_neurons = affichNeurons(optSet, optSet.Neurons, 1)
optSet.tab_neuronsFR = affichNeuronsFR(optSet, optSet.NeuronsFR, 1)
checknonzeroSyn(optSet)
optSet.tab_connexions = affichConnexions(model, optSet,
optSet.Connexions, 1)
checknonzeroSynFR(optSet)
optSet.tab_connexionsFR = affichConnexionsFR(model, optSet,
optSet.SynapsesFR, 1)
checknonzeroExtStimuli(optSet)
optSet.tab_stims = affichExtStim(optSet, optSet.ExternalStimuli, 1)
#
print
# ###################################################################
model.saveXML(overwrite=True)
# ###################################################################
writeTitres(folders, 'stim', optSet.allPhasesStim,
optSet.tab_stims, optSet.seriesStimParam)
writeTitres(folders, 'syn', optSet.allPhasesSyn,
optSet.tab_connexions, optSet.seriesSynParam)
writeTitres(folders, 'synFR', optSet.allPhasesSynFR,
optSet.tab_connexionsFR, optSet.seriesSynFRParam)
print "fourchetteStim:", optSet.fourchetteStim
print "fourchetteSyn", optSet.fourchetteSyn
print "cmaes_sigma", optSet.cmaes_sigma
print "seuilMSEsave", optSet.seuilMSEsave
# ###################################################################
# setMusclesandSpindles()
# ###################################################################
# ###################################################################
# execMarquez()
# ###################################################################
# ###################################################################
# initializeExecLoeb()
# continueLoeb(nbepoch = 2)
# ###################################################################
"""
optSet.fourchetteStim = 20
optSet.fourchetteSyn = 20
optSet.cmaes_sigma = 0.1
optSet.seuilMSEsave = 100
"""
# ###################################################################
# execCMAe(nbevals=500)
# FinalModelfromCMAeMinAsimFiles(model, cmaeNb=25)
# ###################################################################
"""
# FinalModelfromCMAeLastSimFiles(model, cmaeNb)
# initialise()
CMAeLastSimFilesToAprojFiles()
CMAeMinAsimFilesToCMAeMinAprojFiles()
asimFileName = folders.animatlab_rootFolder + "CMAeMinAsimFiles/" +\
"ArmNS06_Standalone-3.asim"
aprojFileName = folders.animatlab_rootFolder + "CMAeMinAprojFiles/" +\
"ArmNS06CMAeMin-3.aproj"
actualiseSaveAprojFromAsimFile(asimFileName,
aprojFileName,
overwrite=1)
"""
# ###################################################################
# ###################################################################
# UTILITIES
# ###################################################################
#
# ------------ to change the value of an Externalstimuli ------------
"""
paramType = "CurrentOn"
listparamName = ("1FlxGamma_St2", "1ExtGamma_St2", "1FlxAlpha_St1",
"1ExtGamma_St1", "1FlxGamma_St1", "1ExtAlpha_St1")
paramName = "1FlxGamma_St1"
paramName = "1FlxGamma_St2"
paramName = "1ExtGamma_St1"
paramName = "1ExtGamma_St2"
paramName = "1FlxAlpha_St1"
paramName = "1ExtAlpha_St1"
value = '1e-011'
for paramName in listparamName:
print paramName
changeparamvalue(paramName, paramType, value)
checknonzeroExtStimuli(optSet)
model.saveXML(overwrite=True)
"""
#
#
# ----------------- to change the value of a synapse ----------------
"""
paramType = "G"
listparamName = ("1FlxGamma*1FlxPotGam",
"1ExtGamma*1ExtPotGam")
value = '0.005'
for paramName in listparamName:
print paramName
changeparamvalue(paramName, paramType, value)
checknonzeroSyn(optSet)
model.saveXML(overwrite=True)
"""
#
#
# ------------------- Distance in parameter space -------------------
"""
courseFileName = "CMAeFitCourse.txt"
pathCourseFileName = os.path.join(folders.animatlab_rootFolder,
"ResultFiles/")
simFileDir = os.path.join(folders.animatlab_rootFolder,
"CMAeMinAsimFiles/")
# tabdistances is a table containing distances but no column names
[tabdistances, tabnames, tabnbs] = analyzeDistance(pathCourseFileName,
courseFileName,
simFileDir)
# tabdist is a printable table with coloum names and row names
tabdist = createtabdistances(tabdistances, tabnbs)
comment = "CMAeFitCourse.txt" + "; seuil:" + str(optSet.seuilMSEsave)
directory = os.path.join(folders.animatlab_result_dir,
"DistanceTables/")
savefileincrem("Tabdistances",
directory,
tabdist, comment)
#
#
"""
|
neuRowsATL/animatLabSimulationAPI
|
mainOpt.py
|
Python
|
gpl-2.0
| 59,314
|
[
"NEURON"
] |
6766123a571f86774b0f86faac3d33d3ad11004ee780d70a40a1cc96faffcb59
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import math
import random
import wx.lib.pubsub as ps
import vtk
import constants as const
import project as prj
import session as ses
TYPE = {const.LINEAR: _(u"Linear"),
const.ANGULAR: _(u"Angular"),
}
LOCATION = {const.SURFACE: _(u"3D"),
const.AXIAL: _(u"Axial"),
const.CORONAL: _(u"Coronal"),
const.SAGITAL: _(u"Sagittal")
}
class MeasurementManager(object):
"""
A class to manage the use (Addition, remotion and visibility) from
measures.
"""
def __init__(self):
self.current = None
self.measures = []
self._bind_events()
def _bind_events(self):
ps.Publisher().subscribe(self._add_point, "Add measurement point")
ps.Publisher().subscribe(self._change_name, "Change measurement name")
ps.Publisher().subscribe(self._remove_measurements, "Remove measurements")
ps.Publisher().subscribe(self._set_visibility, "Show measurement")
ps.Publisher().subscribe(self._load_measurements, "Load measurement dict")
def _load_measurements(self, pubsub_evt):
dict = pubsub_evt.data
for i in dict:
m = dict[i]
if m.type == const.LINEAR:
mr = LinearMeasure(m.colour)
else:
mr = AngularMeasure(m.colour)
self.current = (m, mr)
self.measures.append(self.current)
for point in m.points:
x, y, z = point
actors = mr.AddPoint(x, y, z)
ps.Publisher().sendMessage(("Add actors", m.location),
(actors, m.slice_number))
self.current = None
if not m.is_shown:
mr.SetVisibility(False)
if m.location == const.SURFACE:
ps.Publisher().sendMessage('Render volume viewer')
else:
ps.Publisher().sendMessage('Update slice viewer')
def _add_point(self, pubsub_evt):
position = pubsub_evt.data[0]
type = pubsub_evt.data[1] # Linear or Angular
location = pubsub_evt.data[2] # 3D, AXIAL, SAGITAL, CORONAL
try:
slice_number = pubsub_evt.data[3]
except IndexError:
slice_number = 0
to_remove = False
if self.current is None:
print "To Create"
to_create = True
elif self.current[0].location != location:
print "To Create"
print "To Remove"
to_create = True
to_remove = True
elif self.current[0].slice_number != slice_number:
print "To Create"
print "To Remove"
to_create = True
to_remove = True
else:
print "To not Create"
to_create = False
if to_create:
m = Measurement()
m.index = len(self.measures)
m.location = location
m.slice_number = slice_number
m.type = type
if type == const.LINEAR:
mr = LinearMeasure(m.colour)
else:
mr = AngularMeasure(m.colour)
if to_remove:
print "---To REMOVE"
actors = self.current[1].GetActors()
slice_number = self.current[0].slice_number
ps.Publisher().sendMessage(('Remove actors',
self.current[0].location), (actors, slice_number))
if self.current[0].location == const.SURFACE:
ps.Publisher().sendMessage('Render volume viewer')
else:
ps.Publisher().sendMessage('Update slice viewer')
session = ses.Session()
session.ChangeProject()
self.current = (m, mr)
mr = self.current[1]
m = self.current[0]
x, y, z = position
actors = mr.AddPoint(x, y, z)
m.points.append(position)
ps.Publisher().sendMessage(("Add actors", location),
(actors, m.slice_number))
if mr.IsComplete():
index = prj.Project().AddMeasurement(m)
#m.index = index # already done in proj
self.measures.append(self.current)
name = m.name
colour = m.colour
m.value = mr.GetValue()
type_ = TYPE[type]
location = LOCATION[location]
if type == const.LINEAR:
value = u"%.2f mm"% m.value
else:
value = u"%.2f°"% m.value
msg = 'Update measurement info in GUI',
ps.Publisher().sendMessage(msg,
(index, name, colour,
type_, location,
value))
self.current = None
def _change_name(self, pubsub_evt):
index, new_name = pubsub_evt.data
self.measures[index][0].name = new_name
def _remove_measurements(self, pubsub_evt):
indexes = pubsub_evt.data
print indexes
for index in indexes:
m, mr = self.measures.pop(index)
actors = mr.GetActors()
prj.Project().RemoveMeasurement(index)
ps.Publisher().sendMessage(('Remove actors', m.location),
(actors, m.slice_number))
ps.Publisher().sendMessage('Update slice viewer')
ps.Publisher().sendMessage('Render volume viewer')
session = ses.Session()
session.ChangeProject()
def _set_visibility(self, pubsub_evt):
index, visibility = pubsub_evt.data
m, mr = self.measures[index]
m.is_shown = visibility
mr.SetVisibility(visibility)
if m.location == const.SURFACE:
ps.Publisher().sendMessage('Render volume viewer')
else:
ps.Publisher().sendMessage('Update slice viewer')
class Measurement():
general_index = -1
def __init__(self):
Measurement.general_index += 1
self.index = Measurement.general_index
self.name = const.MEASURE_NAME_PATTERN %(self.index+1)
self.colour = random.choice(const.MEASURE_COLOUR)
self.value = 0
self.location = const.SURFACE # AXIAL, CORONAL, SAGITTAL
self.type = const.LINEAR # ANGULAR
self.slice_number = 0
self.points = []
self.is_shown = True
def Load(self, info):
self.index = info["index"]
self.name = info["name"]
self.colour = info["colour"]
self.value = info["value"]
self.location = info["location"]
self.type = info["type"]
self.slice_number = info["slice_number"]
self.points = info["points"]
self.is_shown = info["is_shown"]
class CirclePointRepresentation(object):
"""
This class represents a circle that indicate a point in the surface
"""
def __init__(self, colour=(1, 0, 0), radius=1.0):
"""
colour: the colour of the representation
radius: the radius of circle representation
"""
self.colour = colour
self.radius = radius
def GetRepresentation(self, x, y, z):
"""
Return a actor that represents the point in the given x, y, z point
"""
sphere = vtk.vtkSphereSource()
sphere.SetCenter(x, y, z)
sphere.SetRadius(self.radius)
# c = vtk.vtkCoordinate()
# c.SetCoordinateSystemToWorld()
m = vtk.vtkPolyDataMapper()
m.SetInputConnection(sphere.GetOutputPort())
# m.SetTransformCoordinate(c)
a = vtk.vtkActor()
a.SetMapper(m)
a.GetProperty().SetColor(self.colour)
return a
class CrossPointRepresentation(object):
"""
This class represents a cross that indicate a point in the surface
"""
def __init__(self, camera, colour=(1, 0, 0), size=1.0):
"""
colour: the colour of the representation
size: the size of the representation
camera: the active camera, to get the orientation to draw the cross
"""
self.camera = camera
self.colour = colour
self.size = size
def GetRepresentation(self, x, y, z):
pc = self.camera.GetPosition() # camera position
pf = self.camera.GetFocalPoint() # focal position
pp = (x, y, z) # point where the user clicked
# Vector from camera position to user clicked point
vcp = [j-i for i,j in zip(pc, pp)]
# Vector from camera position to camera focal point
vcf = [j-i for i,j in zip(pc, pf)]
# the vector where the perpendicular vector will be given
n = [0,0,0]
# The cross, or vectorial product, give a vector perpendicular to vcp
# and vcf, in this case this vector will be in horizontal, this vector
# will be stored in the variable "n"
vtk.vtkMath.Cross(vcp, vcf, n)
# then normalize n to only indicate the direction of this vector
vtk.vtkMath.Normalize(n)
# then
p1 = [i*self.size + j for i,j in zip(n, pp)]
p2 = [i*-self.size + j for i,j in zip(n, pp)]
sh = vtk.vtkLineSource()
sh.SetPoint1(p1)
sh.SetPoint2(p2)
n = [0,0,0]
vcn = [j-i for i,j in zip(p1, pc)]
vtk.vtkMath.Cross(vcp, vcn, n)
vtk.vtkMath.Normalize(n)
p3 = [i*self.size + j for i,j in zip(n, pp)]
p4 = [i*-self.size +j for i,j in zip(n, pp)]
sv = vtk.vtkLineSource()
sv.SetPoint1(p3)
sv.SetPoint2(p4)
cruz = vtk.vtkAppendPolyData()
cruz.AddInput(sv.GetOutput())
cruz.AddInput(sh.GetOutput())
c = vtk.vtkCoordinate()
c.SetCoordinateSystemToWorld()
m = vtk.vtkPolyDataMapper2D()
m.SetInputConnection(cruz.GetOutputPort())
m.SetTransformCoordinate(c)
a = vtk.vtkActor2D()
a.SetMapper(m)
a.GetProperty().SetColor(self.colour)
return a
class LinearMeasure(object):
def __init__(self, colour=(1, 0, 0), representation=None):
self.colour = colour
self.points = []
self.point_actor1 = None
self.point_actor2 = None
self.line_actor = None
self.text_actor = None
if not representation:
representation = CirclePointRepresentation(colour)
self.representation = representation
print colour
def IsComplete(self):
"""
Is this measure complete?
"""
return not self.point_actor2 is None
def AddPoint(self, x, y, z):
if not self.point_actor1:
self.SetPoint1(x, y, z)
return (self.point_actor1, )
elif not self.point_actor2:
self.SetPoint2(x, y, z)
return (self.point_actor2, self.line_actor, self.text_actor)
def SetPoint1(self, x, y, z):
self.points.append((x, y, z))
self.point_actor1 = self.representation.GetRepresentation(x, y, z)
def SetPoint2(self, x, y, z):
self.points.append((x, y, z))
self.point_actor2 = self.representation.GetRepresentation(x, y, z)
self.CreateMeasure()
def CreateMeasure(self):
self._draw_line()
self._draw_text()
def _draw_line(self):
line = vtk.vtkLineSource()
line.SetPoint1(self.points[0])
line.SetPoint2(self.points[1])
c = vtk.vtkCoordinate()
c.SetCoordinateSystemToWorld()
m = vtk.vtkPolyDataMapper2D()
m.SetInputConnection(line.GetOutputPort())
m.SetTransformCoordinate(c)
a = vtk.vtkActor2D()
a.SetMapper(m)
a.GetProperty().SetColor(self.colour)
self.line_actor = a
def _draw_text(self):
p1, p2 = self.points
text = ' %.2f mm ' % \
math.sqrt(vtk.vtkMath.Distance2BetweenPoints(p1, p2))
x,y,z=[(i+j)/2 for i,j in zip(p1, p2)]
textsource = vtk.vtkTextSource()
textsource.SetText(text)
textsource.SetBackgroundColor((250/255.0, 247/255.0, 218/255.0))
textsource.SetForegroundColor(self.colour)
m = vtk.vtkPolyDataMapper2D()
m.SetInputConnection(textsource.GetOutputPort())
a = vtk.vtkActor2D()
a.SetMapper(m)
a.DragableOn()
a.GetPositionCoordinate().SetCoordinateSystemToWorld()
a.GetPositionCoordinate().SetValue(x,y,z)
a.GetProperty().SetColor((0, 1, 0))
self.text_actor = a
def GetNumberOfPoints(self):
return len(self.points)
def GetValue(self):
p1, p2 = self.points
return math.sqrt(vtk.vtkMath.Distance2BetweenPoints(p1, p2))
def SetRenderer(self, renderer):
if self.point_actor1:
self.render.RemoveActor(self.point_actor1)
renderer.AddActor(self.point_actor1)
if self.point_actor2:
self.render.RemoveActor(self.point_actor2)
renderer.AddActor(self.point_actor2)
if self.line_actor:
self.render.RemoveActor(self.line_actor)
renderer.AddActor(self.line_actor)
if self.text_actor:
self.render.RemoveActor(self.text_actor)
renderer.AddActor(self.text_actor)
self.render = renderer
def SetVisibility(self, v):
self.point_actor1.SetVisibility(v)
self.point_actor2.SetVisibility(v)
self.line_actor.SetVisibility(v)
self.text_actor.SetVisibility(v)
def GetActors(self):
"""
Get the actors already created in this measure.
"""
actors = []
if self.point_actor1:
actors.append(self.point_actor1)
if self.point_actor2:
actors.append(self.point_actor2)
if self.line_actor:
actors.append(self.line_actor)
if self.text_actor:
actors.append(self.text_actor)
return actors
def Remove(self):
if self.point_actor1:
self.render.RemoveActor(self.point_actor1)
del self.point_actor1
if self.point_actor2:
self.render.RemoveActor(self.point_actor2)
del self.point_actor2
if self.line_actor:
self.render.RemoveActor(self.line_actor)
del self.line_actor
if self.text_actor:
self.render.RemoveActor(self.text_actor)
del self.text_actor
# def __del__(self):
# self.Remove()
class AngularMeasure(object):
def __init__(self, colour=(1, 0, 0), representation=None):
self.colour = colour
self.points = [0, 0, 0]
self.number_of_points = 0
self.point_actor1 = None
self.point_actor2 = None
self.point_actor3 = None
self.line_actor = None
self.text_actor = None
if not representation:
representation = CirclePointRepresentation(colour)
self.representation = representation
print colour
def IsComplete(self):
return not self.point_actor3 is None
def AddPoint(self, x, y, z):
if not self.point_actor1:
self.SetPoint1(x, y, z)
return (self.point_actor1,)
elif not self.point_actor2:
self.SetPoint2(x, y, z)
return (self.point_actor2,)
elif not self.point_actor3:
self.SetPoint3(x, y, z)
return (self.point_actor3, self.line_actor, self.text_actor)
def SetPoint1(self, x, y, z):
self.points[0] = (x, y, z)
self.number_of_points = 1
self.point_actor1 = self.representation.GetRepresentation(x, y, z)
def SetPoint2(self, x, y, z):
self.number_of_points = 2
self.points[1] = (x, y, z)
self.point_actor2 = self.representation.GetRepresentation(x, y, z)
def SetPoint3(self, x, y, z):
self.number_of_points = 3
self.points[2] = (x, y, z)
self.point_actor3 = self.representation.GetRepresentation(x, y, z)
self.CreateMeasure()
def CreateMeasure(self):
self._draw_line()
self._draw_text()
def _draw_line(self):
line1 = vtk.vtkLineSource()
line1.SetPoint1(self.points[0])
line1.SetPoint2(self.points[1])
line2 = vtk.vtkLineSource()
line2.SetPoint1(self.points[1])
line2.SetPoint2(self.points[2])
arc = self.DrawArc()
line = vtk.vtkAppendPolyData()
line.AddInput(line1.GetOutput())
line.AddInput(line2.GetOutput())
line.AddInput(arc.GetOutput())
c = vtk.vtkCoordinate()
c.SetCoordinateSystemToWorld()
m = vtk.vtkPolyDataMapper2D()
m.SetInputConnection(line.GetOutputPort())
m.SetTransformCoordinate(c)
a = vtk.vtkActor2D()
a.SetMapper(m)
a.GetProperty().SetColor(self.colour)
self.line_actor = a
def DrawArc(self):
d1 = math.sqrt(vtk.vtkMath.Distance2BetweenPoints(self.points[0],
self.points[1]))
d2 = math.sqrt(vtk.vtkMath.Distance2BetweenPoints(self.points[2],
self.points[1]))
if d1 < d2:
d = d1
p1 = self.points[0]
a,b,c = [j-i for i,j in zip(self.points[1], self.points[2])]
else:
d = d2
p1 = self.points[2]
a,b,c = [j-i for i,j in zip(self.points[1], self.points[0])]
t = (d / math.sqrt(a**2 + b**2 + c**2))
x = self.points[1][0] + a*t
y = self.points[1][1] + b*t
z = self.points[1][2] + c*t
p2 = (x, y, z)
arc = vtk.vtkArcSource()
arc.SetPoint1(p1)
arc.SetPoint2(p2)
arc.SetCenter(self.points[1])
arc.SetResolution(20)
return arc
def _draw_text(self):
text = u' %.2f ' % \
self.CalculateAngle()
x,y,z= self.points[1]
textsource = vtk.vtkTextSource()
textsource.SetText(text)
textsource.SetBackgroundColor((250/255.0, 247/255.0, 218/255.0))
textsource.SetForegroundColor(self.colour)
m = vtk.vtkPolyDataMapper2D()
m.SetInputConnection(textsource.GetOutputPort())
a = vtk.vtkActor2D()
a.SetMapper(m)
a.DragableOn()
a.GetPositionCoordinate().SetCoordinateSystemToWorld()
a.GetPositionCoordinate().SetValue(x,y,z)
self.text_actor = a
def GetNumberOfPoints(self):
return self.number_of_points
def GetValue(self):
return self.CalculateAngle()
def SetVisibility(self, v):
self.point_actor1.SetVisibility(v)
self.point_actor2.SetVisibility(v)
self.point_actor3.SetVisibility(v)
self.line_actor.SetVisibility(v)
self.text_actor.SetVisibility(v)
def GetActors(self):
"""
Get the actors already created in this measure.
"""
actors = []
if self.point_actor1:
actors.append(self.point_actor1)
if self.point_actor2:
actors.append(self.point_actor2)
if self.point_actor3:
actors.append(self.point_actor3)
if self.line_actor:
actors.append(self.line_actor)
if self.text_actor:
actors.append(self.text_actor)
return actors
def CalculateAngle(self):
"""
Calculate the angle between 2 vectors in 3D space. It is based on law of
cosines for vector.
The Alpha Cosine is equal the dot product from two vector divided for
product between the magnitude from that vectors. Then the angle is inverse
cosine.
"""
v1 = [j-i for i,j in zip(self.points[0], self.points[1])]
v2 = [j-i for i,j in zip(self.points[2], self.points[1])]
#print vtk.vtkMath.Normalize(v1)
#print vtk.vtkMath.Normalize(v2)
cos = vtk.vtkMath.Dot(v1, v2)/(vtk.vtkMath.Norm(v1)*vtk.vtkMath.Norm(v2))
angle = math.degrees(math.acos(cos))
return angle
def Remove(self):
if self.point_actor1:
self.render.RemoveActor(self.point_actor1)
del self.point_actor1
if self.point_actor2:
self.render.RemoveActor(self.point_actor2)
del self.point_actor2
if self.point_actor3:
self.render.RemoveActor(self.point_actor3)
del self.point_actor3
if self.line_actor:
self.render.RemoveActor(self.line_actor)
del self.line_actor
if self.text_actor:
self.render.RemoveActor(self.text_actor)
del self.text_actor
def SetRenderer(self, renderer):
if self.point_actor1:
self.render.RemoveActor(self.point_actor1)
renderer.AddActor(self.point_actor1)
if self.point_actor2:
self.render.RemoveActor(self.point_actor2)
renderer.AddActor(self.point_actor2)
if self.point_actor3:
self.render.RemoveActor(self.point_actor3)
renderer.AddActor(self.point_actor3)
if self.line_actor:
self.render.RemoveActor(self.line_actor)
renderer.AddActor(self.line_actor)
if self.text_actor:
self.render.RemoveActor(self.text_actor)
renderer.AddActor(self.text_actor)
self.render = renderer
# def __del__(self):
# self.Remove()
|
tatiana/invesalius
|
invesalius/data/measures.py
|
Python
|
gpl-2.0
| 21,435
|
[
"VTK"
] |
5b42538fb8cc1f663bc25be402f67d9f2e03614ee13aab5012508a250c891c87
|
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .. import kern
from ..core import GP, Param
from ..likelihoods import Gaussian
class GPLVM(GP):
"""
Gaussian Process Latent Variable Model
"""
def __init__(self, Y, input_dim, init='PCA', X=None, kernel=None, name="gplvm", Y_metadata=None, normalizer=False):
"""
:param Y: observed data
:type Y: np.ndarray
:param input_dim: latent dimensionality
:type input_dim: int
:param init: initialisation method for the latent space
:type init: 'PCA'|'random'
:param normalizer:
normalize the outputs Y.
If normalizer is True, we will normalize using Standardize.
If normalizer is False (the default), no normalization will be done.
:type normalizer: bool
"""
if X is None:
from ..util.initialization import initialize_latent
X, fracs = initialize_latent(init, input_dim, Y)
else:
fracs = np.ones(input_dim)
if kernel is None:
kernel = kern.RBF(input_dim, lengthscale=fracs, ARD=input_dim > 1) + kern.Bias(input_dim, np.exp(-2))
likelihood = Gaussian()
super(GPLVM, self).__init__(X, Y, kernel, likelihood, name='GPLVM', Y_metadata=Y_metadata, normalizer=normalizer)
self.X = Param('latent_mean', X)
self.link_parameter(self.X, index=0)
def parameters_changed(self):
super(GPLVM, self).parameters_changed()
self.X.gradient = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X, None)
|
SheffieldML/GPy
|
GPy/models/gplvm.py
|
Python
|
bsd-3-clause
| 1,680
|
[
"Gaussian"
] |
f5f00aefd6cbabea61f451a699e09777207fdefa36e970ee44e7ce4d15b1a70d
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
for clinic in orm.Clinic.objects.all():
clinic.type = ('primary' if clinic.name.endswith('PHC') else 'general')
clinic.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clinics.clinic': {
'Meta': {'object_name': 'Clinic'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lga': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.LGA']", 'null': 'True'}),
'lga_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'pbf_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'primary'", 'max_length': '16', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'clinics.clinicscore': {
'Meta': {'object_name': 'ClinicScore'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quality': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {}),
'start_date': ('django.db.models.fields.DateField', [], {})
},
u'clinics.clinicstaff': {
'Meta': {'object_name': 'ClinicStaff'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'staff_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'year_started': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.genericfeedback': {
'Meta': {'object_name': 'GenericFeedback'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
'display_on_dashboard': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_on_summary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'message_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'report_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'clinics.lga': {
'Meta': {'object_name': 'LGA'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.State']"})
},
u'clinics.patient': {
'Meta': {'unique_together': "[('clinic', 'serial')]", 'object_name': 'Patient'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'clinics.region': {
'Meta': {'unique_together': "(('external_id', 'type'),)", 'object_name': 'Region'},
'alternate_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'external_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'lga'", 'max_length': '16'})
},
u'clinics.service': {
'Meta': {'object_name': 'Service'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'clinics.state': {
'Meta': {'object_name': 'State'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'clinics.visit': {
'Meta': {'object_name': 'Visit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Patient']"}),
'satisfied': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.ClinicStaff']", 'null': 'True', 'blank': 'True'}),
'survey_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'survey_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'survey_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'visit_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'welcome_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'clinics.visitregistrationerror': {
'Meta': {'object_name': 'VisitRegistrationError'},
'error_type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'clinics.visitregistrationerrorlog': {
'Meta': {'object_name': 'VisitRegistrationErrorLog'},
'error_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'message_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
}
}
complete_apps = ['clinics']
symmetrical = True
|
myvoice-nigeria/myvoice
|
myvoice/clinics/migrations/0046_set_clinic_type.py
|
Python
|
bsd-2-clause
| 13,561
|
[
"VisIt"
] |
ed6600ab968433d483f17cf910403bd3cfc2cc165296c646f4d7d6ff82648429
|
#!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Extract a distribution for the soma radii of the population (list) of neurons.
for the soma radii of the population (list) of neurons.
'''
import argparse
import neurom as nm
from neurom import stats as st
def parse_args():
'''Parse command line arguments'''
parser = argparse.ArgumentParser(
description='Morphology fit distribution extractor',
epilog='Note: Prints the optimal distribution and corresponding parameters.')
parser.add_argument('datapath',
help='Path to morphology data file or directory')
return parser.parse_args()
def test_multiple_distr(filepath):
'''Runs the distribution fit for multiple distributions and returns
the optimal distribution along with the corresponding parameters.
'''
# load a neuron from an SWC file
population = nm.load_neurons(filepath)
# Create a list of basic distributions
distr_to_check = ('norm', 'expon', 'uniform')
# Get the soma radii of a population of neurons
soma_size = nm.get('soma_radii', population)
# Find the best fit distribution
return st.optimal_distribution(soma_size, distr_to_check)
if __name__ == '__main__':
args = parse_args()
data_path = args.datapath
result = test_multiple_distr(data_path)
print("Optimal distribution fit for soma radius is: %s with parameters %s" %
(result.type, result.params))
|
eleftherioszisis/NeuroM
|
examples/soma_radius_fit.py
|
Python
|
bsd-3-clause
| 3,138
|
[
"NEURON"
] |
3f3fac13f5230011e6feb016fdbb9c40244fc4355416b90962ac60679ebef067
|
"""
Test helper functions and base classes.
"""
import functools
import inspect
import json
import operator
import os
import pprint
import unittest
import urlparse
from contextlib import contextmanager
from datetime import datetime
from unittest import TestCase
import requests
from bok_choy.javascript import js_defined
from bok_choy.page_object import XSS_INJECTION
from bok_choy.promise import EmptyPromise, Promise
from bok_choy.web_app_test import WebAppTest
from opaque_keys.edx.locator import CourseLocator
from path import Path as path
from pymongo import ASCENDING, MongoClient
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.common import BASE_URL
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from openedx.core.lib.tests.assertions.events import EventMatchTolerates, assert_event_matches, is_matching_event
from openedx.core.release import RELEASE_LINE, doc_version
from xmodule.partitions.partitions import UserPartition
MAX_EVENTS_IN_FAILURE_OUTPUT = 20
def skip_if_browser(browser):
"""
Method decorator that skips a test if browser is `browser`
Args:
browser (str): name of internet browser
Returns:
Decorated function
"""
def decorator(test_function):
@functools.wraps(test_function)
def wrapper(self, *args, **kwargs):
if self.browser.name == browser:
raise unittest.SkipTest('Skipping as this test will not work with {}'.format(browser))
test_function(self, *args, **kwargs)
return wrapper
return decorator
def is_youtube_available():
"""
Check if the required youtube urls are available.
If a URL in `youtube_api_urls` is not reachable then subsequent URLs will not be checked.
Returns:
bool:
"""
# TODO: Design and implement a better solution that is reliable and repeatable,
# reflects how the application works in production, and limits the third-party
# network traffic (e.g. repeatedly retrieving the js from youtube from the browser).
youtube_api_urls = {
'main': 'https://www.youtube.com/',
'player': 'https://www.youtube.com/iframe_api',
# For transcripts, you need to check an actual video, so we will
# just specify our default video and see if that one is available.
'transcript': 'http://video.google.com/timedtext?lang=en&v=3_yD_cEKoCk',
}
for url in youtube_api_urls.itervalues():
try:
response = requests.get(url, allow_redirects=False)
except requests.exceptions.ConnectionError:
return False
if response.status_code >= 300:
return False
return True
def is_focused_on_element(browser, selector):
"""
Check if the focus is on the element that matches the selector.
"""
return browser.execute_script("return $('{}').is(':focus')".format(selector))
def load_data_str(rel_path):
"""
Load a file from the "data" directory as a string.
`rel_path` is the path relative to the data directory.
"""
full_path = path(__file__).abspath().dirname() / "data" / rel_path
with open(full_path) as data_file:
return data_file.read()
def remove_file(filename):
"""
Remove a file if it exists
"""
if os.path.exists(filename):
os.remove(filename)
def disable_animations(page):
"""
Disable jQuery and CSS3 animations.
"""
disable_jquery_animations(page)
disable_css_animations(page)
def enable_animations(page):
"""
Enable jQuery and CSS3 animations.
"""
enable_jquery_animations(page)
enable_css_animations(page)
@js_defined('window.jQuery')
def disable_jquery_animations(page):
"""
Disable jQuery animations.
"""
page.browser.execute_script("jQuery.fx.off = true;")
@js_defined('window.jQuery')
def enable_jquery_animations(page):
"""
Enable jQuery animations.
"""
page.browser.execute_script("jQuery.fx.off = false;")
def disable_css_animations(page):
"""
Disable CSS3 animations, transitions, transforms.
"""
page.browser.execute_script("""
var id = 'no-transitions';
// if styles were already added, just do nothing.
if (document.getElementById(id)) {
return;
}
var css = [
'* {',
'-webkit-transition: none !important;',
'-moz-transition: none !important;',
'-o-transition: none !important;',
'-ms-transition: none !important;',
'transition: none !important;',
'-webkit-transition-property: none !important;',
'-moz-transition-property: none !important;',
'-o-transition-property: none !important;',
'-ms-transition-property: none !important;',
'transition-property: none !important;',
'-webkit-transform: none !important;',
'-moz-transform: none !important;',
'-o-transform: none !important;',
'-ms-transform: none !important;',
'transform: none !important;',
'-webkit-animation: none !important;',
'-moz-animation: none !important;',
'-o-animation: none !important;',
'-ms-animation: none !important;',
'animation: none !important;',
'}'
].join(''),
head = document.head || document.getElementsByTagName('head')[0],
styles = document.createElement('style');
styles.id = id;
styles.type = 'text/css';
if (styles.styleSheet){
styles.styleSheet.cssText = css;
} else {
styles.appendChild(document.createTextNode(css));
}
head.appendChild(styles);
""")
def enable_css_animations(page):
"""
Enable CSS3 animations, transitions, transforms.
"""
page.browser.execute_script("""
var styles = document.getElementById('no-transitions'),
head = document.head || document.getElementsByTagName('head')[0];
head.removeChild(styles)
""")
def select_option_by_text(select_browser_query, option_text, focus_out=False):
"""
Chooses an option within a select by text (helper method for Select's select_by_visible_text method).
Wrap this in a Promise to prevent a StaleElementReferenceException
from being raised while the DOM is still being rewritten
"""
def select_option(query, value):
""" Get the first select element that matches the query and select the desired value. """
try:
select = Select(query.first.results[0])
select.select_by_visible_text(value)
if focus_out:
query.first.results[0].send_keys(Keys.TAB)
return True
except StaleElementReferenceException:
return False
msg = 'Selected option {}'.format(option_text)
EmptyPromise(lambda: select_option(select_browser_query, option_text), msg).fulfill()
def get_selected_option_text(select_browser_query):
"""
Returns the text value for the first selected option within a select.
Wrap this in a Promise to prevent a StaleElementReferenceException
from being raised while the DOM is still being rewritten
"""
def get_option(query):
""" Get the first select element that matches the query and return its value. """
try:
select = Select(query.first.results[0])
return (True, select.first_selected_option.text)
except StaleElementReferenceException:
return (False, None)
text = Promise(lambda: get_option(select_browser_query), 'Retrieved selected option text').fulfill()
return text
def get_options(select_browser_query):
"""
Returns all the options for the given select.
"""
return Select(select_browser_query.first.results[0]).options
def generate_course_key(org, number, run):
"""
Makes a CourseLocator from org, number and run
"""
default_store = os.environ.get('DEFAULT_STORE', 'draft')
return CourseLocator(org, number, run, deprecated=(default_store == 'draft'))
def select_option_by_value(browser_query, value, focus_out=False):
"""
Selects a html select element by matching value attribute
"""
select = Select(browser_query.first.results[0])
select.select_by_value(value)
def options_selected():
"""
Returns True if all options in select element where value attribute
matches `value`. if any option is not selected then returns False
and select it. if value is not an option choice then it returns False.
"""
all_options_selected = True
has_option = False
for opt in select.options:
if opt.get_attribute('value') == value:
has_option = True
if not opt.is_selected():
all_options_selected = False
opt.click()
if all_options_selected and not has_option:
all_options_selected = False
if focus_out:
browser_query.first.results[0].send_keys(Keys.TAB)
return all_options_selected
# Make sure specified option is actually selected
EmptyPromise(options_selected, "Option is selected").fulfill()
def is_option_value_selected(browser_query, value):
"""
return true if given value is selected in html select element, else return false.
"""
select = Select(browser_query.first.results[0])
ddl_selected_value = select.first_selected_option.get_attribute('value')
return ddl_selected_value == value
def element_has_text(page, css_selector, text):
"""
Return true if the given text is present in the list.
"""
text_present = False
text_list = page.q(css=css_selector).text
if len(text_list) > 0 and (text in text_list):
text_present = True
return text_present
def get_modal_alert(browser):
"""
Returns instance of modal alert box shown in browser after waiting
for 6 seconds
"""
WebDriverWait(browser, 6).until(EC.alert_is_present())
return browser.switch_to.alert
def get_element_padding(page, selector):
"""
Get Padding of the element with given selector,
:returns a dict object with the following keys.
1 - padding-top
2 - padding-right
3 - padding-bottom
4 - padding-left
Example Use:
progress_page.get_element_padding('.wrapper-msg.wrapper-auto-cert')
"""
js_script = """
var $element = $('%(selector)s');
element_padding = {
'padding-top': $element.css('padding-top').replace("px", ""),
'padding-right': $element.css('padding-right').replace("px", ""),
'padding-bottom': $element.css('padding-bottom').replace("px", ""),
'padding-left': $element.css('padding-left').replace("px", "")
};
return element_padding;
""" % {'selector': selector}
return page.browser.execute_script(js_script)
def is_404_page(browser):
""" Check if page is 404 """
return 'Page not found (404)' in browser.find_element_by_tag_name('h1').text
def create_multiple_choice_xml(correct_choice=2, num_choices=4):
"""
Return the Multiple Choice Problem XML, given the name of the problem.
"""
# all choices are incorrect except for correct_choice
choices = [False for _ in range(num_choices)]
choices[correct_choice] = True
choice_names = ['choice_{}'.format(index) for index in range(num_choices)]
question_text = 'The correct answer is Choice {}'.format(correct_choice)
return MultipleChoiceResponseXMLFactory().build_xml(
question_text=question_text,
choices=choices,
choice_names=choice_names,
)
def create_multiple_choice_problem(problem_name):
"""
Return the Multiple Choice Problem Descriptor, given the name of the problem.
"""
xml_data = create_multiple_choice_xml()
return XBlockFixtureDesc(
'problem',
problem_name,
data=xml_data,
metadata={'rerandomize': 'always'}
)
def auto_auth(browser, username, email, staff, course_id):
"""
Logout and login with given credentials.
"""
AutoAuthPage(browser, username=username, email=email, course_id=course_id, staff=staff).visit()
def assert_link(test, expected_link, actual_link):
"""
Assert that 'href' and text inside help DOM element are correct.
Arguments:
test: Test on which links are being tested.
expected_link (dict): The expected link attributes.
actual_link (dict): The actual link attribute on page.
"""
test.assertEqual(expected_link['href'], actual_link.get_attribute('href'))
test.assertEqual(expected_link['text'], actual_link.text)
def assert_opened_help_link_is_correct(test, url):
"""
Asserts that url of browser when help link is clicked is correct.
Arguments:
test (AcceptanceTest): test calling this method.
url (str): url to verify.
"""
test.browser.switch_to_window(test.browser.window_handles[-1])
WebDriverWait(test.browser, 10).until(lambda driver: driver.current_url == url)
# Check that the URL loads. Can't do this in the browser because it might
# be loading a "Maze Found" missing content page.
response = requests.get(url)
test.assertEqual(response.status_code, 200, "URL {!r} returned {}".format(url, response.status_code))
EDX_BOOKS = {
'course_author': 'edx-partner-course-staff',
'learner': 'edx-guide-for-students',
}
OPEN_BOOKS = {
'course_author': 'open-edx-building-and-running-a-course',
'learner': 'open-edx-learner-guide',
}
def url_for_help(book_slug, path_component):
"""
Create a full help URL given a book slug and a path component.
"""
# Emulate the switch between books that happens in envs/bokchoy.py
books = EDX_BOOKS if RELEASE_LINE == "master" else OPEN_BOOKS
url = 'https://edx.readthedocs.io/projects/{}/en/{}{}'.format(books[book_slug], doc_version(), path_component)
return url
class EventsTestMixin(TestCase):
"""
Helpers and setup for running tests that evaluate events emitted
"""
def setUp(self):
super(EventsTestMixin, self).setUp()
mongo_host = 'edx.devstack.mongo' if 'BOK_CHOY_HOSTNAME' in os.environ else 'localhost'
self.event_collection = MongoClient(mongo_host)["test"]["events"]
self.start_time = datetime.now()
def reset_event_tracking(self):
"""Drop any events that have been collected thus far and start collecting again from scratch."""
self.event_collection.drop()
self.start_time = datetime.now()
@contextmanager
def capture_events(self, event_filter=None, number_of_matches=1, captured_events=None):
"""
Context manager that captures all events emitted while executing a particular block.
All captured events are stored in the list referenced by `captured_events`. Note that this list is appended to
*in place*. The events will be appended to the list in the order they are emitted.
The `event_filter` is expected to be a callable that allows you to filter the event stream and select particular
events of interest. A dictionary `event_filter` is also supported, which simply indicates that the event should
match that provided expectation.
`number_of_matches` tells this context manager when enough events have been found and it can move on. The
context manager will not exit until this many events have passed the filter. If not enough events are found
before a timeout expires, then this will raise a `BrokenPromise` error. Note that this simply states that
*at least* this many events have been emitted, so `number_of_matches` is simply a lower bound for the size of
`captured_events`.
"""
start_time = datetime.utcnow()
yield
events = self.wait_for_events(
start_time=start_time, event_filter=event_filter, number_of_matches=number_of_matches)
if captured_events is not None and hasattr(captured_events, 'append') and callable(captured_events.append):
for event in events:
captured_events.append(event)
@contextmanager
def assert_events_match_during(self, event_filter=None, expected_events=None, in_order=True):
"""
Context manager that ensures that events matching the `event_filter` and `expected_events` are emitted.
This context manager will filter out the event stream using the `event_filter` and wait for
`len(expected_events)` to match the filter.
It will then compare the events in order with their counterpart in `expected_events` to ensure they match the
more detailed assertion.
Typically `event_filter` will be an `event_type` filter and the `expected_events` list will contain more
detailed assertions.
"""
captured_events = []
with self.capture_events(event_filter, len(expected_events), captured_events):
yield
self.assert_events_match(expected_events, captured_events, in_order=in_order)
def wait_for_events(self, start_time=None, event_filter=None, number_of_matches=1, timeout=None):
"""
Wait for `number_of_matches` events to pass the `event_filter`.
By default, this will look at all events that have been emitted since the beginning of the setup of this mixin.
A custom `start_time` can be specified which will limit the events searched to only those emitted after that
time.
The `event_filter` is expected to be a callable that allows you to filter the event stream and select particular
events of interest. A dictionary `event_filter` is also supported, which simply indicates that the event should
match that provided expectation.
`number_of_matches` lets us know when enough events have been found and it can move on. The function will not
return until this many events have passed the filter. If not enough events are found before a timeout expires,
then this will raise a `BrokenPromise` error. Note that this simply states that *at least* this many events have
been emitted, so `number_of_matches` is simply a lower bound for the size of `captured_events`.
Specifying a custom `timeout` can allow you to extend the default 30 second timeout if necessary.
"""
if start_time is None:
start_time = self.start_time
if timeout is None:
timeout = 30
def check_for_matching_events():
"""Gather any events that have been emitted since `start_time`"""
return self.matching_events_were_emitted(
start_time=start_time,
event_filter=event_filter,
number_of_matches=number_of_matches
)
return Promise(
check_for_matching_events,
# This is a bit of a hack, Promise calls str(description), so I set the description to an object with a
# custom __str__ and have it do some intelligent stuff to generate a helpful error message.
CollectedEventsDescription(
'Waiting for {number_of_matches} events to match the filter:\n{event_filter}'.format(
number_of_matches=number_of_matches,
event_filter=self.event_filter_to_descriptive_string(event_filter),
),
functools.partial(self.get_matching_events_from_time, start_time=start_time, event_filter={})
),
timeout=timeout
).fulfill()
def matching_events_were_emitted(self, start_time=None, event_filter=None, number_of_matches=1):
"""Return True if enough events have been emitted that pass the `event_filter` since `start_time`."""
matching_events = self.get_matching_events_from_time(start_time=start_time, event_filter=event_filter)
return len(matching_events) >= number_of_matches, matching_events
def get_matching_events_from_time(self, start_time=None, event_filter=None):
"""
Return a list of events that pass the `event_filter` and were emitted after `start_time`.
This function is used internally by most of the other assertions and convenience methods in this class.
The `event_filter` is expected to be a callable that allows you to filter the event stream and select particular
events of interest. A dictionary `event_filter` is also supported, which simply indicates that the event should
match that provided expectation.
"""
if start_time is None:
start_time = self.start_time
if isinstance(event_filter, dict):
event_filter = functools.partial(is_matching_event, event_filter)
elif not callable(event_filter):
raise ValueError(
'event_filter must either be a dict or a callable function with as single "event" parameter that '
'returns a boolean value.'
)
matching_events = []
cursor = self.event_collection.find(
{
"time": {
"$gte": start_time
}
}
).sort("time", ASCENDING)
for event in cursor:
matches = False
try:
# Mongo automatically assigns an _id to all events inserted into it. We strip it out here, since
# we don't care about it.
del event['_id']
if event_filter is not None:
# Typically we will be grabbing all events of a particular type, however, you can use arbitrary
# logic to identify the events that are of interest.
matches = event_filter(event)
except AssertionError:
# allow the filters to use "assert" to filter out events
continue
else:
if matches is None or matches:
matching_events.append(event)
return matching_events
def assert_matching_events_were_emitted(self, start_time=None, event_filter=None, number_of_matches=1):
"""Assert that at least `number_of_matches` events have passed the filter since `start_time`."""
description = CollectedEventsDescription(
'Not enough events match the filter:\n' + self.event_filter_to_descriptive_string(event_filter),
functools.partial(self.get_matching_events_from_time, start_time=start_time, event_filter={})
)
self.assertTrue(
self.matching_events_were_emitted(
start_time=start_time, event_filter=event_filter, number_of_matches=number_of_matches
),
description
)
def assert_no_matching_events_were_emitted(self, event_filter, start_time=None):
"""Assert that no events have passed the filter since `start_time`."""
matching_events = self.get_matching_events_from_time(start_time=start_time, event_filter=event_filter)
description = CollectedEventsDescription(
'Events unexpected matched the filter:\n' + self.event_filter_to_descriptive_string(event_filter),
lambda: matching_events
)
self.assertEquals(len(matching_events), 0, description)
def assert_events_match(self, expected_events, actual_events, in_order=True):
"""Assert that each actual event matches one of the expected events.
Args:
expected_events (List): a list of dicts representing the expected events.
actual_events (List): a list of dicts that were actually recorded.
in_order (bool): if True then the events must be in the same order (defaults to True).
"""
if in_order:
for expected_event, actual_event in zip(expected_events, actual_events):
assert_event_matches(
expected_event,
actual_event,
tolerate=EventMatchTolerates.lenient()
)
else:
for expected_event in expected_events:
actual_event = next(event for event in actual_events if is_matching_event(expected_event, event))
assert_event_matches(
expected_event,
actual_event or {},
tolerate=EventMatchTolerates.lenient()
)
def relative_path_to_absolute_uri(self, relative_path):
"""Return an aboslute URI given a relative path taking into account the test context."""
return urlparse.urljoin(BASE_URL, relative_path)
def event_filter_to_descriptive_string(self, event_filter):
"""Find the source code of the callable or pretty-print the dictionary"""
message = ''
if callable(event_filter):
file_name = '(unknown)'
try:
file_name = inspect.getsourcefile(event_filter)
except TypeError:
pass
try:
list_of_source_lines, line_no = inspect.getsourcelines(event_filter)
except IOError:
pass
else:
message = '{file_name}:{line_no}\n{hr}\n{event_filter}\n{hr}'.format(
event_filter=''.join(list_of_source_lines).rstrip(),
file_name=file_name,
line_no=line_no,
hr='-' * 20,
)
if not message:
message = '{hr}\n{event_filter}\n{hr}'.format(
event_filter=pprint.pformat(event_filter),
hr='-' * 20,
)
return message
class CollectedEventsDescription(object):
"""
Produce a clear error message when tests fail.
This class calls the provided `get_events_func` when converted to a string, and pretty prints the returned events.
"""
def __init__(self, description, get_events_func):
self.description = description
self.get_events_func = get_events_func
def __str__(self):
message_lines = [
self.description,
'Events:'
]
events = self.get_events_func()
events.sort(key=operator.itemgetter('time'), reverse=True)
for event in events[:MAX_EVENTS_IN_FAILURE_OUTPUT]:
message_lines.append(pprint.pformat(event))
if len(events) > MAX_EVENTS_IN_FAILURE_OUTPUT:
message_lines.append(
'Too many events to display, the remaining events were omitted. Run locally to diagnose.')
return '\n\n'.join(message_lines)
class AcceptanceTest(WebAppTest):
"""
The base class of all acceptance tests.
"""
def __init__(self, *args, **kwargs):
super(AcceptanceTest, self).__init__(*args, **kwargs)
# Use long messages so that failures show actual and expected values
self.longMessage = True # pylint: disable=invalid-name
def tearDown(self):
try:
self.browser.get('http://{}:{}'.format(
os.environ.get('BOK_CHOY_HOSTNAME', '127.0.0.1'),
os.environ.get('BOK_CHOY_LMS_PORT', 8003),
))
except: # pylint: disable=bare-except
self.browser.get('http://{}:{}'.format(
os.environ.get('BOK_CHOY_HOSTNAME', '127.0.0.1'),
os.environ.get('BOK_CHOY_CMS_PORT', 8031),
))
logs = self.browser.execute_script("return window.localStorage.getItem('console_log_capture');")
if not logs:
return
logs = json.loads(logs)
log_dir = path('test_root') / 'log'
if 'shard' in os.environ:
log_dir /= "shard_{}".format(os.environ["SHARD"])
log_dir.mkdir_p()
with (log_dir / '{}.browser.log'.format(self.id()[:60])).open('w') as browser_log:
for (message, url, line_no, col_no, stack) in logs:
browser_log.write(u"{}:{}:{}: {}\n {}\n".format(
url,
line_no,
col_no,
message,
(stack or "").replace('\n', '\n ')
))
super(AcceptanceTest, self).tearDown()
class UniqueCourseTest(AcceptanceTest):
"""
Test that provides a unique course ID.
"""
def setUp(self):
super(UniqueCourseTest, self).setUp()
self.course_info = {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run',
'display_name': 'Test Course' + XSS_INJECTION + self.unique_id
}
@property
def course_id(self):
"""
Returns the serialized course_key for the test
"""
# TODO - is there a better way to make this agnostic to the underlying default module store?
default_store = os.environ.get('DEFAULT_STORE', 'draft')
course_key = CourseLocator(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
deprecated=(default_store == 'draft')
)
return unicode(course_key)
class YouTubeConfigError(Exception):
"""
Error occurred while configuring YouTube Stub Server.
"""
pass
class YouTubeStubConfig(object):
"""
Configure YouTube Stub Server.
"""
YOUTUBE_HOSTNAME = os.environ.get('BOK_CHOY_HOSTNAME', '127.0.0.1')
PORT = 9080
URL = 'http://{}:{}/'.format(YOUTUBE_HOSTNAME, PORT)
@classmethod
def configure(cls, config):
"""
Allow callers to configure the stub server using the /set_config URL.
Arguments:
config (dict): Configuration dictionary.
Raises:
YouTubeConfigError
"""
youtube_stub_config_url = cls.URL + 'set_config'
config_data = {param: json.dumps(value) for param, value in config.items()}
response = requests.put(youtube_stub_config_url, data=config_data)
if not response.ok:
raise YouTubeConfigError(
'YouTube Server Configuration Failed. URL {0}, Configuration Data: {1}, Status was {2}'.format(
youtube_stub_config_url, config, response.status_code))
@classmethod
def reset(cls):
"""
Reset YouTube Stub Server Configurations using the /del_config URL.
Raises:
YouTubeConfigError
"""
youtube_stub_config_url = cls.URL + 'del_config'
response = requests.delete(youtube_stub_config_url)
if not response.ok:
raise YouTubeConfigError(
'YouTube Server Configuration Failed. URL: {0} Status was {1}'.format(
youtube_stub_config_url, response.status_code))
@classmethod
def get_configuration(cls):
"""
Allow callers to get current stub server configuration.
Returns:
dict
"""
youtube_stub_config_url = cls.URL + 'get_config'
response = requests.get(youtube_stub_config_url)
if response.ok:
return json.loads(response.content)
else:
return {}
def click_and_wait_for_window(page, element):
"""
To avoid a race condition, click an element that launces a new window, and
wait for that window to launch.
To check this, make sure the number of window_handles increases by one.
Arguments:
page (PageObject): Page object to perform method on
element (WebElement): Clickable element that triggers the new window to open
"""
num_windows = len(page.browser.window_handles)
element.click()
WebDriverWait(page.browser, 10).until(
lambda driver: len(driver.window_handles) > num_windows
)
def create_user_partition_json(partition_id, name, description, groups, scheme="random"):
"""
Helper method to create user partition JSON. If scheme is not supplied, "random" is used.
"""
# All that is persisted about a scheme is its name.
class MockScheme(object):
name = scheme
return UserPartition(
partition_id, name, description, groups, MockScheme()
).to_json()
def assert_nav_help_link(test, page, href, signed_in=True, close_window=True):
"""
Asserts that help link in navigation bar is correct.
It first checks the url inside anchor DOM element and
then clicks to ensure that help opens correctly.
Arguments:
test (AcceptanceTest): Test object
page (PageObject): Page object to perform tests on.
href (str): The help link which we expect to see when it is opened.
signed_in (bool): Specifies whether user is logged in or not. (It affects the css)
close_window(bool): Close the newly-opened help window before continuing
"""
expected_link = {
'href': href,
'text': 'Help'
}
# Get actual anchor help element from the page.
actual_link = page.get_nav_help_element_and_click_help(signed_in)
# Assert that 'href' and text are the same as expected.
assert_link(test, expected_link, actual_link)
# Assert that opened link is correct
assert_opened_help_link_is_correct(test, href)
# Close the help window if not kept open intentionally
if close_window:
close_help_window(page)
def assert_side_bar_help_link(test, page, href, help_text, as_list_item=False, index=-1, close_window=True):
"""
Asserts that help link in side bar is correct.
It first checks the url inside anchor DOM element and
then clicks to ensure that help opens correctly.
Arguments:
test (AcceptanceTest): Test object
page (PageObject): Page object to perform tests on.
href (str): The help link which we expect to see when it is opened.
as_list_item (bool): Specifies whether help element is in one of the
'li' inside a sidebar list DOM element.
index (int): The index of element in case there are more than
one matching elements.
close_window(bool): Close the newly-opened help window before continuing
"""
expected_link = {
'href': href,
'text': help_text
}
# Get actual anchor help element from the page.
actual_link = page.get_side_bar_help_element_and_click_help(as_list_item=as_list_item, index=index)
# Assert that 'href' and text are the same as expected.
assert_link(test, expected_link, actual_link)
# Assert that opened link is correct
assert_opened_help_link_is_correct(test, href)
# Close the help window if not kept open intentionally
if close_window:
close_help_window(page)
def close_help_window(page):
"""
Closes the help window
Args:
page (PageObject): Page object to perform tests on.
"""
browser_url = page.browser.current_url
if browser_url.startswith('https://edx.readthedocs.io') or browser_url.startswith('http://edx.readthedocs.io'):
page.browser.close() # close only the current window
page.browser.switch_to_window(page.browser.window_handles[0])
class TestWithSearchIndexMixin(object):
""" Mixin encapsulating search index creation """
TEST_INDEX_FILENAME = "test_root/index_file.dat"
def _create_search_index(self):
""" Creates search index backing file """
with open(self.TEST_INDEX_FILENAME, "w+") as index_file:
json.dump({}, index_file)
def _cleanup_index_file(self):
""" Removes search index backing file """
remove_file(self.TEST_INDEX_FILENAME)
|
kmoocdev2/edx-platform
|
common/test/acceptance/tests/helpers.py
|
Python
|
agpl-3.0
| 36,474
|
[
"VisIt"
] |
da89d5bc99aa6211a5a071ced4fcfbb1deaed53c4d7ff9184c22e7887272a48b
|
<<<<<<< HEAD
<<<<<<< HEAD
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
def parse(source, filename='<unknown>', mode='exec'):
"""
Parse the source into an AST node.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
"""
return compile(source, filename, mode, PyCF_ONLY_AST)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
sets, booleans, and None.
"""
if isinstance(node_or_string, str):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, (Str, Bytes)):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Set):
return set(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, NameConstant):
return node.value
elif isinstance(node, UnaryOp) and \
isinstance(node.op, (UAdd, USub)) and \
isinstance(node.operand, (Num, UnaryOp, BinOp)):
operand = _convert(node.operand)
if isinstance(node.op, UAdd):
return + operand
else:
return - operand
elif isinstance(node, BinOp) and \
isinstance(node.op, (Add, Sub)) and \
isinstance(node.right, (Num, UnaryOp, BinOp)) and \
isinstance(node.left, (Num, UnaryOp, BinOp)):
left = _convert(node.left)
right = _convert(node.right)
if isinstance(node.op, Add):
return left + right
else:
return left - right
raise ValueError('malformed node or string: ' + repr(node))
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
=======
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
def parse(source, filename='<unknown>', mode='exec'):
"""
Parse the source into an AST node.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
"""
return compile(source, filename, mode, PyCF_ONLY_AST)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
sets, booleans, and None.
"""
if isinstance(node_or_string, str):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, (Str, Bytes)):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Set):
return set(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, NameConstant):
return node.value
elif isinstance(node, UnaryOp) and \
isinstance(node.op, (UAdd, USub)) and \
isinstance(node.operand, (Num, UnaryOp, BinOp)):
operand = _convert(node.operand)
if isinstance(node.op, UAdd):
return + operand
else:
return - operand
elif isinstance(node, BinOp) and \
isinstance(node.op, (Add, Sub)) and \
isinstance(node.right, (Num, UnaryOp, BinOp)) and \
isinstance(node.left, (Num, UnaryOp, BinOp)):
left = _convert(node.left)
right = _convert(node.right)
if isinstance(node.op, Add):
return left + right
else:
return left - right
raise ValueError('malformed node or string: ' + repr(node))
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
def parse(source, filename='<unknown>', mode='exec'):
"""
Parse the source into an AST node.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
"""
return compile(source, filename, mode, PyCF_ONLY_AST)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
sets, booleans, and None.
"""
if isinstance(node_or_string, str):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, (Str, Bytes)):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Set):
return set(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, NameConstant):
return node.value
elif isinstance(node, UnaryOp) and \
isinstance(node.op, (UAdd, USub)) and \
isinstance(node.operand, (Num, UnaryOp, BinOp)):
operand = _convert(node.operand)
if isinstance(node.op, UAdd):
return + operand
else:
return - operand
elif isinstance(node, BinOp) and \
isinstance(node.op, (Add, Sub)) and \
isinstance(node.right, (Num, UnaryOp, BinOp)) and \
isinstance(node.left, (Num, UnaryOp, BinOp)):
left = _convert(node.left)
right = _convert(node.right)
if isinstance(node.op, Add):
return left + right
else:
return left - right
raise ValueError('malformed node or string: ' + repr(node))
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
ArcherSys/ArcherSys
|
Lib/ast.py
|
Python
|
mit
| 36,242
|
[
"VisIt"
] |
38f384b9a82f2332e968ddf14761cef0e950526c4d7500b39ac88d80e0691c7d
|
from os.path import join
import h5py
import numpy as np
import scipy.stats as ss
def generate_data(path):
distributions = {'Gaussian': {'options': dict(loc=0, scale=0.1),
'name': 'norm'},
'Exponential': {'options': dict(loc=-0.5, scale=1),
'name': 'expon'},
'Chi Square': {'options': dict(loc=-0.5, df=1),
'name': 'chi2'},
'Alpha': {'options': dict(a=3, loc=-0.5),
'name': 'alpha'},
'Beta': {'options': dict(a=3, b=2, loc=-0.5),
'name': 'beta'}
}
x = np.linspace(-1, 1, num=1000)
with h5py.File(join(path, 'demo_data.hdf5'), 'w') as f:
for group, vals in distributions.items():
gauss_pdf = f.create_group(group)
gauss_pdf.create_dataset("x", data=x)
gauss_pdf.create_dataset("pdf", data=getattr(
ss, vals['name'])(**vals['options']).pdf(x))
|
ericmjl/bokeh
|
examples/app/simple_hdf5/create_hdf5.py
|
Python
|
bsd-3-clause
| 1,089
|
[
"Gaussian"
] |
ba464cc9fad8f0f007982e80eaf49296489f6eaef53448dc7a0a5f678d54eb1d
|
import numpy as np
import scipy.special
import scipy.linalg
import vbr_utilities
import time
import pdb
import copy
import matplotlib.pyplot as plt
import sys
# TO DO: Check that the fixed Gamma hyper-hyperpriors that I use are appropriately uninformative...
######################################################################################################
#
# This code is mostly based on material contained in the following sources:
#
# 1. Bishop & Tipping, 2000, "Variational Relevance Vector Machines", in "Uncertainty in Artificial
# Intelligence Proceedings 2000", p46-53
# 2. Section 3 of Chapter 10 in Bishop, 2006, "Pattern Recognition and Machine Learning", published
# by Springer, USA.
# 3. Notes by Jan Drugowitsch, 2008 (updated 2010), accessible here:
# http://www.lnc.ens.fr/~jdrugowi/code/bayes_linear_notes-0.1.3.pdf
#
######################################################################################################
def do_linear_regression( vbr_object, convergence=0.00001 ):
"""
Uses the VB algorithm to perform linear regression. This routine assumes
that the following object attributes have already been set:
- phi_train_norm (normalised basis matrix)
- target_train_norm (normalised target vector)
The form of the Bayesian model is:
p( t | Phi(x), w, beta ) = prod( N( t_n | Phi_n*w, beta^(-1) )
where Phi_m is the mth row of the NxM basis matrix Phi(x), and where:
p( w | alpha ) = prod( N( w_m | 0, alpha_m ) )
p( alpha ) = prod( Gam( alpha_m | a, b_m ) )
p( beta ) = Gam( beta | c, d )
Note that this notation is more consistent with that used by Bishop &
Tipping 2000 as opposed to Drugowitsch; the main difference is the notation
of (a,b) and (c,d) for the hyperparameters on alpha and beta, which are
swapped compared to Drugowitsch (which can be annoyingly confusing).
Output is generated in the form of the following object attributes:
**model_weights_means - means of the posterior distributions inferred
for the linear basis weights
**model_weights_stdvs - standard deviations of the posterior distributions
for the linear basis weights
**model_an, model_bn - parameters for the Gamma posteriors on the inferred
alpha values, which themselves are the expected values of the Gaussian
posteriors on each of the linear weights; model_an is constant and the same
for all linear weights, but model_bn is a vector learned from the data with
each entry corresponding to one of the linear weights
**model_cn, model_dn - scalar parameters of the Gamma posterior on the white
noise term beta; model_cn is constant, but model_dn is learned from the
data
**model_alpha - vector containing the inferred precisions for the Gaussian
posteriors on each of the linear weights, as learned from the data
**model_beta - the expected value of the Gamma distribution for the white
noise precision; this will be inferred as part of the VB algorithm unless
it has been explicitly fixed to some value which wil be indicated by the
model_beta_fixed object attribute
**model_smatrix - MxM matrix where M is the number of basis functions in the
model, used to calculate various quantities; denoted by SN in the Bishop
paper+textbook and denoted as V in the Drugowitsch notes
"""
# Get the final number of basis functions that are to be used for the
# regression (includes constant offset as a basis function):
try:
vbr_object.n_basis_funcs = np.shape( vbr_object.phi_train_norm )[1]
except:
vbr_object.n_basis_funcs = 0
try:
vbr_object.n_appendages = len( vbr_object.phi_ixs_appendages_postnorm )
except:
vbr_object.n_appendages = 0
# Implement the VB algorithm over all unknown quantities until convergence
# is reached:
print '\nPerforming VB iterative algorithm:'
t1=time.time()
# The appropriate algorithm is selected depending on how the white noise
# term is to be treated:
if vbr_object.model_beta_fixed==True:
iterate_betafixed( vbr_object, convergence=convergence )
else:
iterate_betafree( vbr_object, convergence=convergence )
t2 = time.time()
delt = (t2-t1)/60.
print ' Time taken for VB convergence = %.3f minutes' % delt
# Update the VBR object to store the results of the regression:
vbr_object.regression_run_status = 'Yes'
vbr_object.disentangle_status = 'No'
print '\n Finished: VBR object has been updated with output from linear regression.'
return None
def iterate_betafixed( vbr_object, convergence=None ):
"""
The VB algorithm with the white noise fixed ahead of time. Shrinkage priors on each
of the linear weights.
This has been adapted from the Bishop & Tipping 2000 and Drugowitsch 2008 papers
that have beta as a free parameter + shrinkage priors on each of the alpha terms,
combined with the VB linear regression derivation given in the Bishop textbook
where beta is held fixed and a single shrinkage prior is placed on all of the alpha
terms.
Note that we have maintained the Bishop (as opposed to Drugowitsch) notation for
the hyperparameters on alpha and beta, i.e. (a0,b0) for beta and (c0,d0) for alpha.
This can be a bit confusing but is done to maintain consistency of notation throughout
the code.
"""
# NOTE: It is most natural to work with matrices rather than numpy arrays when
# implementing the VB algorithm. I don't actually think it makes a significant
# difference in speed, if any, but it definitely makes things syntactically
# more simple, eg. a*b instead of np.multiply(a,b).
# Rename a few quantities to make things more concise below:
n_data = vbr_object.n_data_train
n_basis_funcs = vbr_object.n_basis_funcs
beta = vbr_object.model_beta_mean_norm # scalar
# And put the data in matrix format for this routine only:
phi = np.matrix(vbr_object.phi_train_norm)
target = np.matrix(vbr_object.target_train_norm).T
# Initialise the variational lower bound L(q) and set the maximum number
# of iterations:
lq_last = -sys.float_info.max
max_iter = 500
first_pass = True
# Initialise an ignorant Gamma priors over the alpha parameters (TODO think about
# whether or not the priors as I've defined them here are actually appropriate!!!):
a0 = 1e-4#1e-2
b0 = 1e-4
# Compute a couple of matrix products that will be used later:
phi_corr = phi.T * phi
phitarget_corr = phi.T * target
# Calculate the updated first-parameter values for the Gamma-distributions
# over the alpha parameters - these can be defined up here because they
# remain unchanged during the iterative algorithm:
an = a0 + 1./2.
# Calculate the expected values of the alpha parameter priors:
exp_alpha = np.matrix(np.ones(n_basis_funcs) * a0 / b0).T
for iter in range(max_iter):
# Calculate the s matrix and associated quantities:
inv_s = np.matrix( np.diag( np.array( exp_alpha )[:,0] ) ) + beta * phi_corr
s = np.matrix(scipy.linalg.inv(inv_s))
# NOTE: To get the log determinant of s, we need to take the *negative* log
# determinant of the *inverse* of s:
logdet_s = -vbr_utilities.logdet(inv_s)
# Calculate updated expectation values for the linear weights:
exp_w = beta * np.dot( s, phitarget_corr )[:,0]
# Evaluated the updated expected values for the Gamma distributions over each
# of the alpha values:
bn = b0 + 0.5 * ( ( np.array( exp_w )[:,0]**2 ) + np.diag(s) )
exp_alpha = np.matrix( an / bn ).T
# Calculate the variational lower bound, but ignore the terms depending on the fixed
# constants (a0,b0,c0,d0,n_data,D) because these are unchanging/irrelevant when comparing
# successive lower bound values:
term1 = + beta * exp_w.T * phitarget_corr
term2 = - 0.5 * beta * np.trace( phi_corr * ( exp_w * exp_w.T + s ) )
term3 = - ( an/2. ) * np.sum( np.array( np.diag( exp_w * exp_w.T + s ) ) / bn )
term4 = - ( a0+0.5 ) * np.sum( np.log(bn) )
term5 = - b0 * an * np.sum( 1./bn )
term6 = + 0.5 * logdet_s
lq = term1+term2+term3+term4+term5+term6
# It can be proven that the variational bound *must* grow. So...
if lq_last > lq:
# ... otherwise, something has gone wrong or the problem is misbehaved/unstable!
file = open('ERROR_LOG','w')
file.write('Previous bound = %6.6f --> DECREASED to current bound = %6.6f' % (lq_last, lq))
file.close()
raise Exception('Variational bound should not reduce - see ERROR_LOG')
return
# Stop if change in variation bound is < 0.001%, i.e. arbitrary definition of convergence:
if abs(lq_last - lq) < abs(convergence * lq):
break
if first_pass==True:
first_pass=False
else:
delq = abs(100*(lq-lq_last)/float(lq_last))
print ' Step %d. Delta_L(q) = %g%%' % (iter, delq)
lq_last = lq
if iter<max_iter:
print '\n Convergence reached! \n'
if iter==max_iter:
print '\n Maximum number of iterations reached; breaking loop \n'
print
if iter == max_iter:
warnings.warn('Bayes:maxIter ... Bayesian linear regression reached maximum number of iterations.')
# Record the final value reached by the variational lower bound:
vbr_object.lq = lq
# Parameters controlling the widths of the normal priors over the linear weights:
vbr_object.model_alpha = np.array(exp_alpha).flatten() # array
# Hyperparameters controlling the Gamma hyperpriors over the alpha
# parameters, which are the widths of the priors on the linear weights:
vbr_object.model_an = an # scalar
vbr_object.model_bn = np.array(bn) # array; one entry for each linear weight
# The s matrix, which is used to calculate the covariance of the joint
# posterior distribution over the linear weights:
vbr_object.model_smatrix = np.array(s) # matrix
# Marginalised posterior distributions over the linear weights:
vbr_object.model_weights_means = np.array(exp_w).flatten() # array
vbr_object.model_weights_stdvs = np.sqrt( np.diag(s) ) # array
return None
def iterate_betafree( vbr_object, convergence=None ):
"""
The VB algorithm with the white noise inferred by training the model on the data.
Shrinkage priors on each of the linear weights.
Taken from Bishop & Tipping 2000; a similar derivation is provided in Section 10.3
of the Bishop textbook, but without shrinkage priors on each linear weight. Note
that we have maintained the Bishop (as opposed to Drugowitsch) notation for the
hyperparameters on alpha and beta, i.e. (a0,b0) for beta and (c0,d0) for alpha. This
can be a bit confusing but is done to maintain consistency of notation throughout
the code.
"""
# NOTE: It is most natural to work with matrices rather than numpy arrays when
# implementing the VB algorithm. I don't actually think it makes a significant
# difference in speed, if any, but it definitely makes things syntactically
# more simple, eg. a*b instead of np.multiply(a,b).
# Rename a few quantities to make things more concise below:
n_data = vbr_object.n_data_train
n_basis_funcs = vbr_object.n_basis_funcs
# And put the data in matrix format for this routine only:
phi = np.matrix( vbr_object.phi_train_norm )
target = np.matrix( vbr_object.target_train_norm ).T
# Initialise the lower bound L(q) and set the maximum number of iterations:
lq_last = -sys.float_info.max
max_iter = 500
first_pass = True
# Initialise an ignorant Gamma hyperprior over the alpha parameters:
a0 = 1e-6#1e-2
b0 = 1e-6
# Do the same for the the beta parameter:
c0 = 1e-6#1e-2
d0 = 1e-6
# Compute a couple of matrix products that will be used later:
phi_corr = phi.T * phi
phitarget_corr = phi.T * target
# Calculate the updated first-parameter values for the Gamma-distributions
# over the alpha and beta parameters - these can be defined up here because
# they remain unchanged during the iterative algorithm:
an = a0 + 1 / 2. #alpha
cn = c0 + n_data / 2. #beta
# Calculate the expected values of the alpha parameter priors:
exp_alpha = np.matrix( np.ones( n_basis_funcs ) * a0 / b0).T
# Calculate values for a few more quantities that will be used later:
n_basis_gammaln_an = n_basis_funcs * scipy.special.gammaln(an)
gammaln_cn = scipy.special.gammaln(cn)
for iter in range(max_iter):
# Calculate the s matrix and associated quantities - this is an important
# part of the covariance matrix for the normal-part of the normal-gamma
# posterior over the linear weights and beta:
inv_s = np.matrix( np.diag( np.array(exp_alpha)[:,0] ) ) + phi_corr
s = np.matrix( scipy.linalg.inv( inv_s ) )
# NOTE: To get the log determinant of v, we need to take the *negative* log
# determinant of the *inverse* of v:
logdet_s = -vbr_utilities.logdet( inv_s )
# Calculate updated expectation values for the linear weights:
exp_w = np.dot( s, phitarget_corr )[:,0]
# Calculate the updated second-parameter value in the Gamma distribution
# over the beta term:
sse = np.sum( np.power( phi * exp_w - target, 2 ), axis=0 )
if np.imag( sse )==0:
sse = np.real( sse )
else:
pdb.set_trace()
dn = float( d0 + 0.5 * (sse + np.sum( ( np.array(exp_w)[:,0]**2 ) * np.array(exp_alpha)[:,0], axis=0 ) ) )
# Evaluate the updated expected value for the Gamma distribution over the
# beta parameter:
exp_beta = cn / dn
# Evaluated the updated expected values for the Gamma distributions over each
# of the alpha values:
bn = b0 + 0.5 * ( exp_beta * ( np.array(exp_w)[:,0]**2 ) + np.diag( s ) )
exp_alpha = np.matrix(an / bn).T
# Calculate the variational lower bound, but ignore the terms depending on the
# constants (a0,b0,c0,d0,n_data,D) because these are irrelevant when comparing successive
# lower bound values:
term1 = - 0.5 * ( exp_beta*sse + np.sum( np.multiply( phi,phi*s ) ) )
term2 = + 0.5 * logdet_s
term3 = - d0 * exp_beta
term4 = + gammaln_cn - cn * np.log(dn) + cn + n_basis_gammaln_an - an * np.sum(np.log(bn))
lq = term1+term2+term3+term4
# It can be proven that the variational bound *must* grow. So...
if lq_last > lq:
# ... otherwise, something has gone wrong or the problem is misbehaved/unstable!
file = open('ERROR_LOG','w')
file.write('Previous bound = %6.6f --> DECREASED to current bound = %6.6f' % (lq_last, lq))
file.close()
raise Exception('Variational bound should not reduce - see ERROR_LOG')
return
# Stop if change in variation bound is < 0.001%, i.e. arbitrary definition of convergence:
if abs(lq_last - lq) < abs(convergence * lq):
break
if first_pass==True:
first_pass=False
else:
delq = abs(100*(lq-lq_last)/float(lq_last))
print ' Step %d. Delta_L(q) = %g%%' % (iter, delq)
lq_last = lq
if iter<max_iter:
print '\n Convergence reached! \n'
if iter==max_iter:
print '\n Maximum number of iterations reached; breaking loop \n'
print
if iter == max_iter:
warnings.warn('Bayes:maxIter ... Bayesian linear regression reached maximum number of iterations.')
# Record the final value reached by the variational lower bound:
vbr_object.lq = lq
# Parameters controlling the widths of the normal priors over the linear weights:
vbr_object.model_alpha = np.array(exp_alpha).flatten() # array
# Hyperparameters controlling the Gamma hyperpriors over the alpha
# parameters, which are the widths of the priors on the linear weights:
vbr_object.model_an = an # scalar
vbr_object.model_bn = np.array(bn) # array; one entry for each linear weight
# Parameters controlling the Gamma posterior over the white noise:
vbr_object.model_cn = cn # scalar
vbr_object.model_dn = dn # scalar
# The expected value of the Gamma posterior over the white noise:
vbr_object.model_beta_mean_norm = exp_beta # scalar
vbr_object.model_beta_stdv_norm = np.sqrt(cn)/dn # scalar
# The s matrix, which is used to calculate the covariance of the joint
# posterior distribution over the linear weights:
vbr_object.model_smatrix = np.array(s) # matrix
# Marginalised posterior distributions over the linear weights:
vbr_object.model_weights_means = np.array(exp_w).flatten() # array
vbr_object.model_weights_stdvs = np.sqrt((dn/cn)*np.diag(s)) # array
# NOTE: The marginalised posterior distributions for the weights are
# strictly speaking the product of a normal distribution and a gamma
# distribution (see Eq 34 of Drugowitsch); however, we make a point
# approximation for the white noise parameter (i.e. np.sqrt(an/bn)),
# which is analogous to what Bishop & Tipping 2000 do in their Eq 36.
return None
def get_predictive_distribution( vbr_object ):
"""
Takes the output from the linear VB algorithm and calculates the predictive
distribution at the specified locations in input space. Requires that the
do_linear_regression() task has already been run, which is checked using the
regression_run_status object attribute.
In addition, the model_basis_inputs_pred object attribute must be specified,
specifying where in input space the predictive distributions is to be evaluated.
If this is not set explicitly, by default it will be set to same locations as
the training data.
Output is generated in the form of the following object attributes:
**phi_pred_norm - the normalised basis matrix used to calculate the predictive
distribution
**model_pred_means_unnorm, model_pred_stdvs_unnorm - means and standard
deviations of the Student's t, or very-nearly-Gaussian, predictive
distributions, in the same units as the target training data
**model_whitenoise_mean_unnorm - estimated or fixed white noise value in the
same units as the target training data, taken from the inferred posterior
distribution on the beta precision parameter
If the target_log_units object attribute is set to True, the predictive distribution
will be returned to its linear base by the vbr_utilities.unnormalise_model_outputs()
routine. The output will be stored in object attributes with the same name as those
above, but with an '_unlogified' suffix appended. For example, suppose that we're
fitting to the log(flux) (i.e. magnitudes) but we want to express the output in flux,
then the log(flux) predictive means will be contained in model_pred_means_unnorm
and the equivalent flux values will be contained in model_pred_means_unnorm_unlogified.
"""
print '\nComputing predictive distribution:'
# First, make sure that the predictive inputs have been provided
# in a list format; otherwise, put them in a list format:
if np.rank( vbr_object.model_basis_inputs_pred[0] )==0:
vbr_object.model_basis_inputs_pred = [ vbr_object.model_basis_inputs_pred ]
if vbr_object.model_basis_inputs_pred==vbr_object.model_basis_inputs_train:
pred_matrix_norm = vbr_object.phi_train_norm
# Often, we will want to evaluate the predictive distribution at the locations
# of the training data, in which case we can save time by setting the predictive
# basis matrices equal to the training basis matrices, rather than calculating
# them all over again:
if vbr_object.model_basis_inputs_pred==vbr_object.model_basis_inputs_train:
vbr_object.phi_pred_unnorm = vbr_object.phi_train_unnorm
vbr_object.phi_pred_norm = vbr_object.phi_train_norm
vbr_object.n_data_pred = vbr_object.n_data_train
else:
# Otherwise, we will need to construct a completely new predictive basis
# matrix. To do this, we need to start off with the standard basis functions:
pred_matrix_unnorm = vbr_utilities.construct_basis_matrix( vbr_object, whichtype='pred' )
# The previous line will have generated a normalised matrix, but without any
# extra appendages that might have also been added to the training basis matrix:
pred_matrix_norm = vbr_object.phi_pred_norm
# So we need to add any appendages separately:
if vbr_object.phi_appendages_postnorm!=None:
pred_matrix_norm = np.column_stack( [ vbr_object.phi_appendages_postnorm, pred_matrix_norm ] )
# And at last, we update the VBR object, because at this point we have our
# finished predictive basis matrix:
vbr_object.phi_pred_norm = pred_matrix_norm
# Now we calculate the means and stdvs of the predictive distribution:
wm = np.matrix( vbr_object.model_weights_means.flatten() ).T
phi = np.matrix( vbr_object.phi_pred_norm )
smatrix = np.matrix( vbr_object.model_smatrix )
# Determine whether or not the white noise term was fixed:
if vbr_object.model_beta_fixed==True:
cn = None
dn = None
beta_fixed = vbr_object.model_beta_mean_norm
else:
cn = vbr_object.model_cn
dn = vbr_object.model_dn
beta_fixed = None
# This routine does the actual calculations:
means_pred_norm, stdvs_pred_norm = vbr_utilities.calc_predictive_dist( wm, phi, smatrix, beta_fixed=beta_fixed, cn=cn, dn=dn )
# We need to unnormalise the values for the predictive distribution to get them back
# in the same units for comparison with the original input data; if we were fitting in
# log units, this step will also produce output in 'unlogified' format:
vbr_utilities.unnormalise_model_outputs( vbr_object, means_pred_norm, stdvs_pred_norm )
vbr_object.predictive_distribution_status = 'Yes'
# Print the inferred white noise value to screen:
if vbr_object.model_beta_fixed==True:
print ' Fixed whitenoise = %.6f' % (vbr_object.model_whitenoise_mean_unnorm)
else:
print ' Inferred whitenoise = %.6f' % (vbr_object.model_whitenoise_mean_unnorm)
# Finally, check to see if the predictive distribution is evaluated at the same
# locations as the training data, and if so, print the 1 and 2 sigma information:
if vbr_object.model_basis_inputs_pred==vbr_object.model_basis_inputs_train:
vbr_utilities.check_sigmalimits( vbr_object )
vbr_object.pred_at_train_locations_status = 'Yes'
else:
vbr_object.pred_at_train_locations_status = 'No'
return None
def disentangle_basis_contributions(vbr_object, make_plots=True, unlogify_plots=True, abcissa_values=None, abcissa_label=None, ordinate_label=None, y_zoom=False):
"""
This routine takes the inferred parameter distributions and uses them to separate contributions
from subsets of basis functions within the overall basis model. Specifically, it will separate
the contributions from each group of basis functions as they're organised within the variables:
** model_basis_group_names, model_appendage_names
** model_basis_types, model_basis_kwargs,
** model_basis_inputs_train, model_basis_inputs_pred
** phi_ixs_basis_groups, phi_ixs_appendages_postnorm
The following output variables are generated as object attributes:
** basis_groups_pred_means_unnorm, basis_groups_pred_stdvs_unnorm
** basis_appendages_pred_means_unnorm, basis_appendages_pred_stdvs_unnorm
Each of these variables contains a list, with the ith entry corresponding to the contribution
from ith basis function group or the ith appendage.
In addition to these outputs, a 'mirror' attribute is generated with a '_complements' suffix.
Specifically, these complement variables are the complementary contribution from all other basis
functions and appendages.
If the target_log_units attribute is set to True, 'unlogified' versions of each variable will
also be generated.
Plots of each basis contribution can be made depending on how the optional arguments
of this routine are specified; they are fairly self-explanatory.
"""
# First do the standard basis groups:
try:
# Work out the number of groups:
n_basis_groups = len(vbr_object.phi_ixs_basis_groups)
if vbr_object.disentangle_status=='Yes':
print '\nBasis group contributions have already been disentangled.'
else:
# Set up lists to hold the output arrays:
group_names = []
group_means = []
group_stdvs = []
# Also keep track of the complement distributions:
group_complement_means = []
group_complement_stdvs = []
# If the target data is specified as being in log units, set up additional lists to hold
# the output arrays that have been converted from log to linear units:
if vbr_object.target_log_units==True:
group_means_unlogified = []
group_stdvs_unlogified = []
group_complement_means_unlogified = []
group_complement_stdvs_unlogified = []
# Disentangle the basis contributions, one at a time:
for i in range(n_basis_groups):
print '\n Disentangling basis group %i of %i...' % (i+1,n_basis_groups)
ixs_group = vbr_object.phi_ixs_basis_groups[i]
subset_unnorm, complement_unnorm = vbr_utilities.split_basis_model(vbr_object, ixs_group)
# Record the marginalised distribution for the group subset:
group_names = group_names+[vbr_object.model_basis_group_names[i]]
group_means = group_means+[subset_unnorm[0]]
group_stdvs = group_stdvs+[subset_unnorm[1]]
# Also record the means of the complement, for the purposes of plotting below:
group_complement_means = group_complement_means+[complement_unnorm[0]]
group_complement_stdvs = group_complement_stdvs+[complement_unnorm[1]]
# If we have log units, also record the results in linear units:
if vbr_object.target_log_units==True:
# Work out the arrays for the current step:
group_means_unlogified_i, group_stdvs_unlogified_i \
= vbr_utilities.unlogify_distribution(group_means[i], group_stdvs[i])
group_complement_means_unlogified_i, group_complement_stdvs_unlogified_i \
= vbr_utilities.unlogify_distribution(group_complement_means[i], group_complement_stdvs[i])
#group_complement_means_unlogified_i = np.exp(group_complement_means[i])
# Add the arrays for the current step to the list:
group_means_unlogified = group_means_unlogified+[group_means_unlogified_i]
group_stdvs_unlogified = group_stdvs_unlogified+[group_stdvs_unlogified_i]
group_complement_means_unlogified = group_complement_means_unlogified+ \
[group_complement_means_unlogified_i]
group_complement_stdvs_unlogified = group_complement_stdvs_unlogified+ \
[group_complement_stdvs_unlogified_i]
# Install the output in the vbr object:
vbr_object.basis_groups_pred_means_unnorm = group_means
vbr_object.basis_groups_pred_stdvs_unnorm = group_stdvs
vbr_object.basis_groups_pred_means_unnorm_complements = group_complement_means
vbr_object.basis_groups_pred_stdvs_unnorm_complements = group_complement_stdvs
if vbr_object.target_log_units==True:
vbr_object.basis_groups_pred_means_unnorm_unlogified = group_means_unlogified
vbr_object.basis_groups_pred_stdvs_unnorm_unlogified = group_stdvs_unlogified
vbr_object.basis_groups_pred_means_unnorm_unlogified_complements = group_complement_means_unlogified
vbr_object.basis_groups_pred_stdvs_unnorm_unlogified_complements = group_complement_stdvs_unlogified
except:
n_basis_groups = 0
print ' \nWARNING: No basis groups found for model!\n'
vbr_object.basis_groups_pred_means_unnorm = None
vbr_object.basis_groups_pred_means_unnorm_complement = None
vbr_object.basis_groups_pred_stdvs_unnorm = None
# Now do the same as above for any special appendages that have been included in the model:
try:
n_appendages = len(vbr_object.phi_ixs_appendages_postnorm)
if vbr_object.disentangle_status=='No':
appendage_names = []
appendage_means = []
appendage_stdvs = []
appendage_complement_means = []
if vbr_object.target_log_units==True:
appendage_means_unlogified = []
appendage_stdvs_unlogified = []
appendage_complement_means_unlogified = []
for i in range(n_appendages):
print '\n Disentangling appendage %i of %i...' % (i+1,n_appendages)
ixs_appendage = vbr_object.phi_ixs_appendages_postnorm[i]
subset_unnorm, complement_unnorm = vbr_utilities.split_basis_model(vbr_object, ixs_appendage)
appendage_names = appendage_names+[vbr_object.model_appendage_names]
appendage_means = appendage_means+[subset_unnorm[0]]
appendage_stdvs = appendage_stdvs+[subset_unnorm[1]]
appendage_complement_means = appendage_complement_means+[complement_unnorm[0]]
if vbr_object.target_log_units==True:
# Work out the arrays for the current step:
appendage_means_unlogified_i, appendage_stdvs_unlogified_i \
= vbr_utilities.unlogify_distribution(appendage_means[i], appendage_stdvs[i])
appendage_complement_means_unlogified_i = np.exp(appendage_complement_means[i])
# Add the arrays for the current step to the list:
appendage_means_unlogified = appendage_means_unlogified+[appendage_means_unlogified_i]
appendage_stdvs_unlogified = appendage_stdvs_unlogified+[appendage_stdvs_unlogified_i]
appendage_complement_means_unlogified = appendage_complement_means_unlogified+ \
[appendage_complement_means_unlogified_i]
vbr_object.basis_appendages_pred_means_unnorm = appendage_means
vbr_object.basis_appendages_pred_means_unnorm_complements = appendage_complement_means
vbr_object.basis_appendages_pred_stdvs_unnorm = appendage_stdvs
if vbr_object.target_log_units==True:
vbr_object.basis_appendages_pred_means_unnorm_unlogified = appendage_means_unlogified
vbr_object.basis_appendages_pred_means_unnorm_unlogified_complements = appendage_complement_means_unlogified
vbr_object.basis_appendages_pred_stdvs_unnorm_unlogified = appendage_stdvs_unlogified
except:
n_appendages = 0
print ' \n No appendages found for model\n'
vbr_object.basis_appendages_pred_means_unnorm = None
vbr_object.basis_appendages_pred_means_unnorm_complements = None
vbr_object.basis_appendages_pred_stdvs_unnorm = None
vbr_object.disentangle_status = 'Yes'
# Plot the results if requested:
if make_plots==True:
print 'Plotting separate basis group contributions...'
# Prepare the data for plotting:
pred_means = np.zeros([vbr_object.n_data_pred, n_basis_groups+n_appendages])
pred_stdvs = np.zeros([vbr_object.n_data_pred, n_basis_groups+n_appendages])
corrected_data = np.zeros([vbr_object.n_data_pred, n_basis_groups+n_appendages])
axes_titles = []
if n_basis_groups>0:
for i in range(n_basis_groups):
if (vbr_object.target_log_units==True)*(unlogify_plots==True):
pred_means[:,i] = vbr_object.basis_groups_pred_means_unnorm_unlogified[i]
pred_stdvs[:,i] = vbr_object.basis_groups_pred_stdvs_unnorm_unlogified[i]
corrected_data[:,i] = np.exp(vbr_object.target_train_unnorm - \
vbr_object.basis_groups_pred_means_unnorm_complements[i])
else:
pred_means[:,i] = vbr_object.basis_groups_pred_means_unnorm[i]
pred_stdvs[:,i] = vbr_object.basis_groups_pred_stdvs_unnorm[i]
corrected_data[:,i] = vbr_object.target_train_unnorm- \
vbr_object.basis_groups_pred_means_unnorm_complements[i]
axes_titles = axes_titles+[vbr_object.model_basis_group_names[i]]
if n_appendages>0:
for i in range(n_appendages):
if (vbr_object.target_log_units==True)*(unlogify_plots==True):
pred_means[:,i+n_basis_groups] = vbr_object.basis_appendages_pred_means_unnorm_unlogified[i]
pred_stdvs[:,i+n_basis_groups] = vbr_object.basis_appendages_pred_stdvs_unnorm_unlogified[i]
corrected_data[:,i+n_basis_groups] = np.exp(vbr_object.target_train_unnorm_unlogified - \
vbr_object.basis_appendages_pred_means_unnorm_unlogified_complements[i])
else:
pred_means[:,i+n_basis_groups] = vbr_object.basis_appendages_pred_means_unnorm[i]
pred_stdvs[:,i+n_basis_groups] = vbr_object.basis_appendages_pred_stdvs_unnorm[i]
corrected_data[:,i+n_basis_groups] = vbr_object.target_train_unnorm-vbr_object.basis_appendages_pred_means_unnorm_complements[i]
axes_titles = axes_titles+[vbr_object.model_appendage_names[i]]
# Now set up the plotting:
shade = [0.5,0.5,0.5]
if abcissa_values==None:
x = np.arange(vbr_object.n_data_pred)
else:
x = abcissa_values
# Work out how the subplots will be divided between figures:
n_axes_perfig = 4
n_axes_total = n_basis_groups+n_appendages
n_figs = np.ceil(n_axes_total/float(n_axes_perfig))
# Get the minimum and maximum extents of the data:
x_low = x.min()
x_upp = x.max()
y_min = np.max([(pred_means-pred_stdvs).min(),corrected_data.min()])
y_max = np.max([(pred_means+pred_stdvs).max(),corrected_data.max()])
# Specify the axes properties (vertical position specified below):
edge_buffer = 0.08
ax_xlow = 1.5*edge_buffer
ax_width = 1-2*edge_buffer
ax_height = (1-2*edge_buffer)/float(n_axes_perfig)
axes_counter = 0
fig_counter = 0
for i in range(n_axes_total):
# Specify the vertical position for the current axis:
ax_ylow = 1-(axes_counter%n_axes_perfig+1)*ax_height-edge_buffer
if axes_counter%n_axes_perfig==0:
fig = plt.figure()
fig_counter += 1
fig.suptitle('Basis Contributions (%i of %i)' % (fig_counter,n_figs))
ax0 = fig.add_axes([ax_xlow,ax_ylow,ax_width,ax_height])
else:
ax = fig.add_axes([ax_xlow,ax_ylow,ax_width,ax_height], sharex=ax0)
cax = plt.gca()
cax.plot(x,corrected_data[:,i],'.g',ms=10,alpha=0.7)
cax.fill_between(x,pred_means[:,i]-pred_stdvs[:,i],pred_means[:,i]+pred_stdvs[:,i],color=shade)
cax.plot(x,pred_means[:,i],'-r',lw=2)
cax.set_xlim([x_low,x_upp])
# Define the minimum and maximum extents of the current subplot axes:
if y_zoom==True:
y_low = corrected_data[:,i].min()
y_upp = corrected_data[:,i].max()
else:
y_low = y_min-0.1*(y_max-y_min)
y_upp = y_max+0.1*(y_max-y_min)
cax.set_ylim([y_low,y_upp])
# Specify where to locate the text within the subplot axis:
x_text = x_low+0.05*(x_upp-x_low)
y_text = y_upp-0.1*(y_upp-y_low)
cax.text(x_text,y_text,axes_titles[i],fontsize=12)
if axes_counter%n_axes_perfig!=n_axes_perfig-1:
plt.setp(cax.xaxis.get_ticklabels(), visible = False)
else:
if abcissa_label!=None:
cax.set_xlabel(abcissa_label)
axes_counter += 1
plt.draw()
print '\n'
return None
def basis_subset_contribution( vbr_object, make_plots=True ):
"""
TO DO - MAYBE?
"""
# similar to 'disentangle_basis_groups', but for an arbitrary combination of basis functions.
#vbr_utilities.divide_basis_model(...)
print '\n\nNot implemented yet!\n\n'
return None
def get_marginalised_loglikelihood( vbr_object ):
"""
Evaluates the loglikelihood of the data given the model, marginalised over all parameters of the
model. In other words, it is the joint pdf of the predictive distribution evaluated at the locations
of the input data. The output is stored in the object attribute 'marginalised_loglikelihood'.
If the white noise beta parameter was free, this joint pdf should strictly speaking be the product
of Student's t distributions (see Equation 31 of the 2008 report by Drugowitsch), one for each data
point. However, here we approximate the joint pdf as a multivariate Gaussian with diagonal covariance
function.
This is a good approximation, provided the number of degrees of freedom of the original Student's t
distributions is large, i.e. the parameter cn is large.
"""
# Make a copy of the object so that we can force the predictive locations to be the
# same as the locations of the input training data:
temp_object = copy.deepcopy( vbr_object )
# Check to see if we need to reevaluate the predictive distribution at the locations
# of the training data, or if this has been done already:
if temp_object.pred_at_train_locations_status=='No':
temp_object.model_basis_inputs_pred = temp_object.model_basis_inputs_train
if temp_object.regression_run_status=='No':
temp_object.do_linear_regression()
# Get the number of data points:
ndata = temp_object.n_data_train
# Get the training data:
data = temp_object.target_train_unnorm
# We are approximating the marginalised likelihood as a multivariate normal distribution, although
# strictly speaking it's a multivariate Student's t distribution. Proceeding with our approximation,
# specify the mean vector and covariance matrix:
mean_vector = temp_object.model_pred_means_unnorm
stdv_vector = temp_object.model_pred_stdvs_unnorm
# Evaluate the log pdf of the multivariate normal marginalised likelihood distribution, and install
# this value in the original object:
vbr_object.logp = np.sum( np.log( 1./stdv_vector ) ) + \
np.sum( -( ( data-mean_vector )**2. ) / 2. / ( stdv_vector**2. ) )
return None
def plot_beta( vbr_object ):
"""
Plots the posterior distribution over beta.
"""
if vbr_object.model_beta_fixed==True:
print '\n\nBeta parameter was fixed (i.e. delta distribution)\n\n'
else:
shape = vbr_object.model_cn
rate = vbr_object.model_dn
sig0 = 1. / np.sqrt( vbr_object.model_beta_mean_norm )
sig_x = np.r_[ 1e-6:3*sig0:1j*1000 ]
beta_x = 1./(sig_x**2.)
term1 = np.log(1.) - scipy.special.gammaln(shape)
term2 = shape * np.log( rate )
term3 = (shape-1) * np.log( beta_x )
term4 = -rate*beta_x
beta_logpdf = term1 + term2 + term3 + term4
plt.figure()
plt.plot( sig_x/sig0, np.exp(beta_logpdf), '-g', lw=2 )
plt.xlabel('Normalised white noise value')
plt.ylabel('Probability')
plt.title('Inferred White Noise Distribution')
return None
|
tomevans/linvb
|
linvb/vbr_routines.py
|
Python
|
gpl-2.0
| 40,963
|
[
"Gaussian"
] |
ce15500d7735075c960be060b3c0d16d12bbd38aa29fa162ba53b9f0d8e72779
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfbase/cidfonts.py
#$Header $
__version__=''' $Id$ '''
__doc__="""CID (Asian multi-byte) font support.
This defines classes to represent CID fonts. They know how to calculate
their own width and how to write themselves into PDF files."""
import os
from types import ListType, TupleType, DictType
from string import find, split, strip
import marshal
import time
try:
from hashlib import md5
except ImportError:
from md5 import md5
import reportlab
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase._cidfontdata import allowedTypeFaces, allowedEncodings, CIDFontInfo, \
defaultUnicodeEncodings, widthsByUnichar
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfdoc
from reportlab.pdfbase.pdfutils import _escape
from reportlab.rl_config import CMapSearchPath
#quick hackery for 2.0 release. Now we always do unicode, and have built in
#the CMAP data, any code to load CMap files is not needed.
DISABLE_CMAP = True
def findCMapFile(name):
"Returns full filename, or raises error"
for dirname in CMapSearchPath:
cmapfile = dirname + os.sep + name
if os.path.isfile(cmapfile):
#print "found", cmapfile
return cmapfile
raise IOError, 'CMAP file for encodings "%s" not found!' % name
def structToPDF(structure):
"Converts deeply nested structure to PDFdoc dictionary/array objects"
if type(structure) is DictType:
newDict = {}
for k, v in structure.items():
newDict[k] = structToPDF(v)
return pdfdoc.PDFDictionary(newDict)
elif type(structure) in (ListType, TupleType):
newList = []
for elem in structure:
newList.append(structToPDF(elem))
return pdfdoc.PDFArray(newList)
else:
return structure
class CIDEncoding(pdfmetrics.Encoding):
"""Multi-byte encoding. These are loaded from CMAP files.
A CMAP file is like a mini-codec. It defines the correspondence
between code points in the (multi-byte) input data and Character
IDs. """
# aims to do similar things to Brian Hooper's CMap class,
# but I could not get it working and had to rewrite.
# also, we should really rearrange our current encoding
# into a SingleByteEncoding since many of its methods
# should not apply here.
def __init__(self, name, useCache=1):
self.name = name
self._mapFileHash = None
self._codeSpaceRanges = []
self._notDefRanges = []
self._cmap = {}
self.source = None
if not DISABLE_CMAP:
if useCache:
from reportlab.lib.utils import get_rl_tempdir
fontmapdir = get_rl_tempdir('FastCMAPS')
if os.path.isfile(fontmapdir + os.sep + name + '.fastmap'):
self.fastLoad(fontmapdir)
self.source = fontmapdir + os.sep + name + '.fastmap'
else:
self.parseCMAPFile(name)
self.source = 'CMAP: ' + name
self.fastSave(fontmapdir)
else:
self.parseCMAPFile(name)
def _hash(self, text):
hasher = md5()
hasher.update(text)
return hasher.digest()
def parseCMAPFile(self, name):
"""This is a tricky one as CMAP files are Postscript
ones. Some refer to others with a 'usecmap'
command"""
#started = time.clock()
cmapfile = findCMapFile(name)
# this will CRAWL with the unicode encodings...
rawdata = open(cmapfile, 'r').read()
self._mapFileHash = self._hash(rawdata)
#if it contains the token 'usecmap', parse the other
#cmap file first....
usecmap_pos = find(rawdata, 'usecmap')
if usecmap_pos > -1:
#they tell us to look in another file
#for the code space ranges. The one
# to use will be the previous word.
chunk = rawdata[0:usecmap_pos]
words = split(chunk)
otherCMAPName = words[-1]
#print 'referred to another CMAP %s' % otherCMAPName
self.parseCMAPFile(otherCMAPName)
# now continue parsing this, as it may
# override some settings
words = split(rawdata)
while words != []:
if words[0] == 'begincodespacerange':
words = words[1:]
while words[0] != 'endcodespacerange':
strStart, strEnd, words = words[0], words[1], words[2:]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
self._codeSpaceRanges.append((start, end),)
elif words[0] == 'beginnotdefrange':
words = words[1:]
while words[0] != 'endnotdefrange':
strStart, strEnd, strValue = words[0:3]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
value = int(strValue)
self._notDefRanges.append((start, end, value),)
words = words[3:]
elif words[0] == 'begincidrange':
words = words[1:]
while words[0] != 'endcidrange':
strStart, strEnd, strValue = words[0:3]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
value = int(strValue)
# this means that 'start' corresponds to 'value',
# start+1 corresponds to value+1 and so on up
# to end
offset = 0
while start + offset <= end:
self._cmap[start + offset] = value + offset
offset = offset + 1
words = words[3:]
else:
words = words[1:]
#finished = time.clock()
#print 'parsed CMAP %s in %0.4f seconds' % (self.name, finished - started)
def translate(self, text):
"Convert a string into a list of CIDs"
output = []
cmap = self._cmap
lastChar = ''
for char in text:
if lastChar != '':
#print 'convert character pair "%s"' % (lastChar + char)
num = ord(lastChar) * 256 + ord(char)
else:
#print 'convert character "%s"' % char
num = ord(char)
lastChar = char
found = 0
for low, high in self._codeSpaceRanges:
if low < num < high:
try:
cid = cmap[num]
#print '%d -> %d' % (num, cid)
except KeyError:
#not defined. Try to find the appropriate
# notdef character, or failing that return
# zero
cid = 0
for low2, high2, notdef in self._notDefRanges:
if low2 < num < high2:
cid = notdef
break
output.append(cid)
found = 1
break
if found:
lastChar = ''
else:
lastChar = char
return output
def fastSave(self, directory):
f = open(os.path.join(directory, self.name + '.fastmap'), 'wb')
marshal.dump(self._mapFileHash, f)
marshal.dump(self._codeSpaceRanges, f)
marshal.dump(self._notDefRanges, f)
marshal.dump(self._cmap, f)
f.close()
def fastLoad(self, directory):
started = time.clock()
f = open(os.path.join(directory, self.name + '.fastmap'), 'rb')
self._mapFileHash = marshal.load(f)
self._codeSpaceRanges = marshal.load(f)
self._notDefRanges = marshal.load(f)
self._cmap = marshal.load(f)
f.close()
finished = time.clock()
#print 'loaded %s in %0.4f seconds' % (self.name, finished - started)
def getData(self):
"""Simple persistence helper. Return a dict with all that matters."""
return {
'mapFileHash': self._mapFileHash,
'codeSpaceRanges': self._codeSpaceRanges,
'notDefRanges': self._notDefRanges,
'cmap': self._cmap,
}
class CIDTypeFace(pdfmetrics.TypeFace):
"""Multi-byte type face.
Conceptually similar to a single byte typeface,
but the glyphs are identified by a numeric Character
ID (CID) and not a glyph name. """
def __init__(self, name):
"""Initialised from one of the canned dictionaries in allowedEncodings
Or rather, it will be shortly..."""
pdfmetrics.TypeFace.__init__(self, name)
self._extractDictInfo(name)
def _extractDictInfo(self, name):
try:
fontDict = CIDFontInfo[name]
except KeyError:
raise KeyError, ("Unable to find information on CID typeface '%s'" % name +
"Only the following font names work:" + repr(allowedTypeFaces)
)
descFont = fontDict['DescendantFonts'][0]
self.ascent = descFont['FontDescriptor']['Ascent']
self.descent = descFont['FontDescriptor']['Descent']
self._defaultWidth = descFont['DW']
self._explicitWidths = self._expandWidths(descFont['W'])
# should really support self.glyphWidths, self.glyphNames
# but not done yet.
def _expandWidths(self, compactWidthArray):
"""Expands Adobe nested list structure to get a dictionary of widths.
Here is an example of such a structure.::
(
# starting at character ID 1, next n characters have the widths given.
1, (277,305,500,668,668,906,727,305,445,445,508,668,305,379,305,539),
# all Characters from ID 17 to 26 are 668 em units wide
17, 26, 668,
27, (305, 305, 668, 668, 668, 566, 871, 727, 637, 652, 699, 574, 555,
676, 687, 242, 492, 664, 582, 789, 707, 734, 582, 734, 605, 605,
641, 668, 727, 945, 609, 609, 574, 445, 668, 445, 668, 668, 590,
555, 609, 547, 602, 574, 391, 609, 582, 234, 277, 539, 234, 895,
582, 605, 602, 602, 387, 508, 441, 582, 562, 781, 531, 570, 555,
449, 246, 449, 668),
# these must be half width katakana and the like.
231, 632, 500
)
"""
data = compactWidthArray[:]
widths = {}
while data:
start, data = data[0], data[1:]
if type(data[0]) in (ListType, TupleType):
items, data = data[0], data[1:]
for offset in range(len(items)):
widths[start + offset] = items[offset]
else:
end, width, data = data[0], data[1], data[2:]
for idx in range(start, end+1):
widths[idx] = width
return widths
def getCharWidth(self, characterId):
return self._explicitWidths.get(characterId, self._defaultWidth)
class CIDFont(pdfmetrics.Font):
"Represents a built-in multi-byte font"
_multiByte = 1
def __init__(self, face, encoding):
assert face in allowedTypeFaces, "TypeFace '%s' not supported! Use any of these instead: %s" % (face, allowedTypeFaces)
self.faceName = face
#should cache in registry...
self.face = CIDTypeFace(face)
assert encoding in allowedEncodings, "Encoding '%s' not supported! Use any of these instead: %s" % (encoding, allowedEncodings)
self.encodingName = encoding
self.encoding = CIDEncoding(encoding)
#legacy hack doing quick cut and paste.
self.fontName = self.faceName + '-' + self.encodingName
self.name = self.fontName
# need to know if it is vertical or horizontal
self.isVertical = (self.encodingName[-1] == 'V')
#no substitutes initially
self.substitutionFonts = []
def formatForPdf(self, text):
encoded = _escape(text)
#print 'encoded CIDFont:', encoded
return encoded
def stringWidth(self, text, size, encoding=None):
"""This presumes non-Unicode input. UnicodeCIDFont wraps it for that context"""
cidlist = self.encoding.translate(text)
if self.isVertical:
#this part is "not checked!" but seems to work.
#assume each is 1000 ems high
return len(cidlist) * size
else:
w = 0
for cid in cidlist:
w = w + self.face.getCharWidth(cid)
return 0.001 * w * size
def addObjects(self, doc):
"""The explicit code in addMinchoObjects and addGothicObjects
will be replaced by something that pulls the data from
_cidfontdata.py in the next few days."""
internalName = 'F' + repr(len(doc.fontMapping)+1)
bigDict = CIDFontInfo[self.face.name]
bigDict['Name'] = '/' + internalName
bigDict['Encoding'] = '/' + self.encodingName
#convert to PDF dictionary/array objects
cidObj = structToPDF(bigDict)
# link into document, and add to font map
r = doc.Reference(cidObj, internalName)
fontDict = doc.idToObject['BasicFonts'].dict
fontDict[internalName] = r
doc.fontMapping[self.name] = '/' + internalName
class UnicodeCIDFont(CIDFont):
"""Wraps up CIDFont to hide explicit encoding choice;
encodes text for output as UTF16.
lang should be one of 'jpn',chs','cht','kor' for now.
if vertical is set, it will select a different widths array
and possibly glyphs for some punctuation marks.
halfWidth is only for Japanese.
>>> dodgy = UnicodeCIDFont('nonexistent')
Traceback (most recent call last):
...
KeyError: "don't know anything about CID font nonexistent"
>>> heisei = UnicodeCIDFont('HeiseiMin-W3')
>>> heisei.name
'HeiseiMin-W3'
>>> heisei.language
'jpn'
>>> heisei.encoding.name
'UniJIS-UCS2-H'
>>> #This is how PDF data gets encoded.
>>> print heisei.formatForPdf('hello')
\\000h\\000e\\000l\\000l\\000o
>>> tokyo = u'\u6771\u4AEC'
>>> print heisei.formatForPdf(tokyo)
gqJ\\354
"""
def __init__(self, face, isVertical=False, isHalfWidth=False):
#pass
try:
lang, defaultEncoding = defaultUnicodeEncodings[face]
except KeyError:
raise KeyError("don't know anything about CID font %s" % face)
#we know the languages now.
self.language = lang
#rebuilt encoding string. They follow rules which work
#for the 7 fonts provided.
enc = defaultEncoding[:-1]
if isHalfWidth:
enc = enc + 'HW-'
if isVertical:
enc = enc + 'V'
else:
enc = enc + 'H'
#now we can do the more general case
CIDFont.__init__(self, face, enc)
#self.encName = 'utf_16_le'
#it's simpler for unicode, just use the face name
self.name = self.fontName = face
self.vertical = isVertical
self.isHalfWidth = isHalfWidth
self.unicodeWidths = widthsByUnichar[self.name]
def formatForPdf(self, text):
#these ones should be encoded asUTF16 minus the BOM
from codecs import utf_16_be_encode
#print 'formatting %s: %s' % (type(text), repr(text))
if type(text) is not unicode:
text = text.decode('utf8')
utfText = utf_16_be_encode(text)[0]
encoded = _escape(utfText)
#print ' encoded:',encoded
return encoded
#
#result = _escape(encoded)
#print ' -> %s' % repr(result)
#return result
def stringWidth(self, text, size, encoding=None):
"Just ensure we do width test on characters, not bytes..."
if type(text) is type(''):
text = text.decode('utf8')
widths = self.unicodeWidths
return size * 0.001 * sum([widths.get(uch, 1000) for uch in text])
#return CIDFont.stringWidth(self, text, size, encoding)
def precalculate(cmapdir):
# crunches through all, making 'fastmap' files
import os
files = os.listdir(cmapdir)
for file in files:
if os.path.isfile(cmapdir + os.sep + self.name + '.fastmap'):
continue
try:
enc = CIDEncoding(file)
except:
print 'cannot parse %s, skipping' % enc
continue
enc.fastSave(cmapdir)
print 'saved %s.fastmap' % file
def test():
# only works if you have cirrect encodings on your box!
c = Canvas('test_japanese.pdf')
c.setFont('Helvetica', 30)
c.drawString(100,700, 'Japanese Font Support')
pdfmetrics.registerFont(CIDFont('HeiseiMin-W3','90ms-RKSJ-H'))
pdfmetrics.registerFont(CIDFont('HeiseiKakuGo-W5','90ms-RKSJ-H'))
# the two typefaces
c.setFont('HeiseiMin-W3-90ms-RKSJ-H', 16)
# this says "This is HeiseiMincho" in shift-JIS. Not all our readers
# have a Japanese PC, so I escaped it. On a Japanese-capable
# system, print the string to see Kanji
message1 = '\202\261\202\352\202\315\225\275\220\254\226\276\222\251\202\305\202\267\201B'
c.drawString(100, 675, message1)
c.save()
print 'saved test_japanese.pdf'
## print 'CMAP_DIR = ', CMAP_DIR
## tf1 = CIDTypeFace('HeiseiMin-W3')
## print 'ascent = ',tf1.ascent
## print 'descent = ',tf1.descent
## for cid in [1,2,3,4,5,18,19,28,231,1742]:
## print 'width of cid %d = %d' % (cid, tf1.getCharWidth(cid))
encName = '90ms-RKSJ-H'
enc = CIDEncoding(encName)
print message1, '->', enc.translate(message1)
f = CIDFont('HeiseiMin-W3','90ms-RKSJ-H')
print 'width = %0.2f' % f.stringWidth(message1, 10)
#testing all encodings
## import time
## started = time.time()
## import glob
## for encName in _cidfontdata.allowedEncodings:
## #encName = '90ms-RKSJ-H'
## enc = CIDEncoding(encName)
## print 'encoding %s:' % encName
## print ' codeSpaceRanges = %s' % enc._codeSpaceRanges
## print ' notDefRanges = %s' % enc._notDefRanges
## print ' mapping size = %d' % len(enc._cmap)
## finished = time.time()
## print 'constructed all encodings in %0.2f seconds' % (finished - started)
if __name__=='__main__':
import doctest
import cidfonts
doctest.testmod(cidfonts)
#test()
|
olivierdalang/stdm
|
third_party/reportlab/pdfbase/cidfonts.py
|
Python
|
gpl-2.0
| 19,374
|
[
"Brian"
] |
f705a64bebfdb2c34072aab0d62f079f86aa469abaf841a09df1bf458b06eae9
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""
A default Jinja2 environment for the rendering of html in print previews
and others.
The `loader` loads its templates from the camelot/art/templates
folder. As it is a :class:`jinja2.loaders.ChoiceLoader` object, other
loaders can be appended or prepended to it :attr:`loaders` attribute, to
customize the look of the print previews or reuse the existing style
The `environment` is a :class:`jinja2.environment.Environment` which uses
the `loader` and that can be used with
the :class:`camelot.view.action_steps.print_preview.PrintJinjaTemplate` action
step.
"""
from jinja2.environment import Environment
from jinja2.loaders import ChoiceLoader, PackageLoader
loader = ChoiceLoader( [ PackageLoader( 'camelot.art' ) ] )
class DefaultEnvironment( Environment ):
def __repr__( self ):
return '<camelot.core.templates.environment>'
environment = DefaultEnvironment( loader = loader )
|
jeroendierckx/Camelot
|
camelot/core/templates.py
|
Python
|
gpl-2.0
| 1,983
|
[
"VisIt"
] |
6165b00a5e19234a8973a2d4cc39f178a31fce349f82f7cd9389dbda9cc338a6
|
#
# Copyright 2015 Benjamin Kiessling
# 2014 Thomas M. Breuel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
kraken.pageseg
~~~~~~~~~~~~~~
Layout analysis methods.
"""
import logging
import numpy as np
from typing import Tuple, List, Callable, Optional, Dict, Any, Union
from scipy.ndimage.filters import (gaussian_filter, uniform_filter,
maximum_filter)
from kraken.lib import morph, sl
from kraken.lib.util import pil2array, is_bitonal, get_im_str
from kraken.lib.exceptions import KrakenInputException
from kraken.lib.segmentation import reading_order, topsort
__all__ = ['segment']
logger = logging.getLogger(__name__)
class record(object):
"""
Simple dict-like object.
"""
def __init__(self, **kw):
self.__dict__.update(kw)
self.label = 0 # type: int
self.bounds = [] # type: List
self.mask = None # type: np.ndarray
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def binary_objects(binary: np.ndarray) -> np.ndarray:
"""
Labels features in an array and segments them into objects.
"""
labels, _ = morph.label(binary)
objects = morph.find_objects(labels)
return objects
def estimate_scale(binary: np.ndarray) -> float:
"""
Estimates image scale based on number of connected components.
"""
objects = binary_objects(binary)
bysize = sorted(objects, key=sl.area)
scalemap = np.zeros(binary.shape)
for o in bysize:
if np.amax(scalemap[o]) > 0:
continue
scalemap[o] = sl.area(o)**0.5
scale = np.median(scalemap[(scalemap > 3) & (scalemap < 100)])
return scale
def compute_boxmap(binary: np.ndarray, scale: float,
threshold: Tuple[float, int] = (.5, 4),
dtype: str = 'i') -> np.ndarray:
"""
Returns grapheme cluster-like boxes based on connected components.
"""
objects = binary_objects(binary)
bysize = sorted(objects, key=sl.area)
boxmap = np.zeros(binary.shape, dtype)
for o in bysize:
if sl.area(o)**.5 < threshold[0]*scale:
continue
if sl.area(o)**.5 > threshold[1]*scale:
continue
boxmap[o] = 1
return boxmap
def compute_lines(segmentation: np.ndarray, scale: float) -> List[record]:
"""Given a line segmentation map, computes a list
of tuples consisting of 2D slices and masked images."""
logger.debug('Convert segmentation to lines')
lobjects = morph.find_objects(segmentation)
lines = []
for i, o in enumerate(lobjects):
if o is None:
continue
if sl.dim1(o) < 2*scale or sl.dim0(o) < scale:
continue
mask = (segmentation[o] == i+1)
if np.amax(mask) == 0:
continue
result = record()
result.label = i+1
result.bounds = o
result.mask = mask
lines.append(result)
return lines
def compute_separators_morph(binary: np.ndarray, scale: float, sepwiden: int = 10, maxcolseps: int = 2) -> np.ndarray:
"""Finds vertical black lines corresponding to column separators."""
logger.debug('Finding vertical black column lines')
d0 = int(max(5, scale/4))
d1 = int(max(5, scale)) + sepwiden
thick = morph.r_dilation(binary, (d0, d1))
vert = morph.rb_opening(thick, (10*scale, 1))
vert = morph.r_erosion(vert, (d0//2, sepwiden))
vert = morph.select_regions(vert, sl.dim1, min=3, nbest=2*maxcolseps)
vert = morph.select_regions(vert, sl.dim0, min=20*scale, nbest=maxcolseps)
return vert
def compute_colseps_conv(binary: np.ndarray, scale: float = 1.0,
minheight: int = 10, maxcolseps: int = 2) -> np.ndarray:
"""
Find column separators by convolution and thresholding.
Args:
binary:
scale:
minheight:
maxcolseps:
Returns:
Separators
"""
logger.debug(f'Finding max {maxcolseps} column separators')
# find vertical whitespace by thresholding
smoothed = gaussian_filter(1.0*binary, (scale, scale*0.5))
smoothed = uniform_filter(smoothed, (5.0*scale, 1))
thresh = (smoothed < np.amax(smoothed)*0.1)
# find column edges by filtering
grad = gaussian_filter(1.0*binary, (scale, scale*0.5), order=(0, 1))
grad = uniform_filter(grad, (10.0*scale, 1))
grad = (grad > 0.5*np.amax(grad))
# combine edges and whitespace
seps = np.minimum(thresh, maximum_filter(grad, (int(scale), int(5*scale))))
seps = maximum_filter(seps, (int(2*scale), 1))
# select only the biggest column separators
seps = morph.select_regions(seps, sl.dim0, min=minheight*scale,
nbest=maxcolseps)
return seps
def compute_black_colseps(binary: np.ndarray, scale: float, maxcolseps: int) -> Tuple[np.ndarray, np.ndarray]:
"""
Computes column separators from vertical black lines.
Args:
binary: Numpy array of the binary image
scale:
maxcolseps:
Returns:
(colseps, binary):
"""
logger.debug('Extract vertical black column separators from lines')
seps = compute_separators_morph(binary, scale, maxcolseps)
colseps = np.maximum(compute_colseps_conv(binary, scale, maxcolseps=maxcolseps), seps)
binary = np.minimum(binary, 1-seps)
return colseps, binary
def compute_white_colseps(binary: np.ndarray, scale: float, maxcolseps: int) -> Tuple[np.ndarray, np.ndarray]:
"""
Computes column separators either from vertical black lines or whitespace.
Args:
binary: Numpy array of the binary image
scale:
Returns:
colseps:
"""
return compute_colseps_conv(binary, scale, maxcolseps=maxcolseps)
def norm_max(v: np.ndarray) -> np.ndarray:
"""
Normalizes the input array by maximum value.
"""
return v/np.amax(v)
def compute_gradmaps(binary: np.ndarray, scale: float, gauss: bool = False):
"""
Use gradient filtering to find baselines
Args:
binary:
scale:
gauss: Use gaussian instead of uniform filtering
Returns:
(bottom, top, boxmap)
"""
# use gradient filtering to find baselines
logger.debug('Computing gradient maps')
boxmap = compute_boxmap(binary, scale)
cleaned = boxmap*binary
if gauss:
grad = gaussian_filter(1.0*cleaned, (0.3*scale, 6*scale), order=(1, 0))
else:
grad = gaussian_filter(1.0*cleaned, (max(4, 0.3*scale),
scale), order=(1, 0))
grad = uniform_filter(grad, (1, 6*scale))
bottom = norm_max((grad < 0)*(-grad))
top = norm_max((grad > 0)*grad)
return bottom, top, boxmap
def compute_line_seeds(binary: np.ndarray, bottom: np.ndarray, top: np.ndarray,
colseps: np.ndarray, scale: float, threshold: float = 0.2) -> np.ndarray:
"""
Base on gradient maps, computes candidates for baselines and xheights.
Then, it marks the regions between the two as a line seed.
"""
logger.debug('Finding line seeds')
vrange = int(scale)
bmarked = maximum_filter(bottom == maximum_filter(bottom, (vrange, 0)),
(2, 2))
bmarked = bmarked * (bottom > threshold*np.amax(bottom)*threshold)*(1-colseps)
tmarked = maximum_filter(top == maximum_filter(top, (vrange, 0)), (2, 2))
tmarked = tmarked * (top > threshold*np.amax(top)*threshold/2)*(1-colseps)
tmarked = maximum_filter(tmarked, (1, 20))
seeds = np.zeros(binary.shape, 'i')
delta = max(3, int(scale/2))
for x in range(bmarked.shape[1]):
transitions = sorted([(y, 1) for y in find(bmarked[:, x])] +
[(y, 0) for y in find(tmarked[:, x])])[::-1]
transitions += [(0, 0)]
for ls in range(len(transitions)-1):
y0, s0 = transitions[ls]
if s0 == 0:
continue
seeds[y0-delta:y0, x] = 1
y1, s1 = transitions[ls+1]
if s1 == 0 and (y0-y1) < 5*scale:
seeds[y1:y0, x] = 1
seeds = maximum_filter(seeds, (1, int(1+scale)))
seeds = seeds * (1-colseps)
seeds, _ = morph.label(seeds)
return seeds
def remove_hlines(binary: np.ndarray, scale: float, maxsize: int = 10) -> np.ndarray:
"""
Removes horizontal black lines that only interfere with page segmentation.
Args:
binary:
scale:
maxsize: maximum size of removed lines
Returns:
numpy.ndarray containing the filtered image.
"""
logger.debug('Filtering horizontal lines')
labels, _ = morph.label(binary)
objects = morph.find_objects(labels)
for i, b in enumerate(objects):
if sl.width(b) > maxsize*scale:
labels[b][labels[b] == i+1] = 0
return np.array(labels != 0, 'B')
def rotate_lines(lines: np.ndarray, angle: float, offset: int) -> np.ndarray:
"""
Rotates line bounding boxes around the origin and adding and offset.
"""
logger.debug(f'Rotate line coordinates by {angle} with offset {offset}')
angle = np.radians(angle)
r = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
p = np.array(lines).reshape((-1, 2))
offset = np.array([2*offset])
p = p.dot(r).reshape((-1, 4)).astype(int) + offset
x = np.sort(p[:, [0, 2]])
y = np.sort(p[:, [1, 3]])
return np.column_stack((x.flatten(), y.flatten())).reshape(-1, 4)
def segment(im, text_direction: str = 'horizontal-lr',
scale: Optional[float] = None,
maxcolseps: float = 2,
black_colseps: bool = False,
no_hlines: bool = True,
pad: Union[int, Tuple[int, int]] = 0,
mask: Optional[np.ndarray] = None,
reading_order_fn: Callable = reading_order) -> Dict[str, Any]:
"""
Segments a page into text lines.
Segments a page into text lines and returns the absolute coordinates of
each line in reading order.
Args:
im: A bi-level page of mode '1' or 'L'
text_direction: Principal direction of the text
(horizontal-lr/rl/vertical-lr/rl)
scale: Scale of the image. Will be auto-determined if set to `None`.
maxcolseps: Maximum number of whitespace column separators
black_colseps: Whether column separators are assumed to be vertical
black lines or not
no_hlines: Switch for small horizontal line removal.
pad: Padding to add to line bounding boxes. If int the same padding is
used both left and right. If a 2-tuple, uses (padding_left,
padding_right).
mask: A bi-level mask image of the same size as `im` where 0-valued
regions are ignored for segmentation purposes. Disables column
detection.
reading_order_fn: Function to call to order line output. Callable
accepting a list of slices (y, x) and a text
direction in (`rl`, `lr`).
Returns:
A dictionary containing the text direction and a list of reading order
sorted bounding boxes under the key 'boxes':
.. code-block::
{'text_direction': '$dir', 'boxes': [(x1, y1, x2, y2),...]}
Raises:
KrakenInputException: if the input image is not binarized or the text
direction is invalid.
"""
im_str = get_im_str(im)
logger.info(f'Segmenting {im_str}')
if im.mode != '1' and not is_bitonal(im):
logger.error(f'Image {im_str} is not bi-level')
raise KrakenInputException(f'Image {im_str} is not bi-level')
# rotate input image for vertical lines
if text_direction.startswith('horizontal'):
angle = 0
offset = (0, 0)
elif text_direction == 'vertical-lr':
angle = 270
offset = (0, im.size[1])
elif text_direction == 'vertical-rl':
angle = 90
offset = (im.size[0], 0)
else:
logger.error(f'Invalid text direction \'{text_direction}\'')
raise KrakenInputException(f'Invalid text direction {text_direction}')
logger.debug(f'Rotating input image by {angle} degrees')
im = im.rotate(angle, expand=True)
a = pil2array(im)
binary = np.array(a > 0.5*(np.amin(a) + np.amax(a)), 'i')
binary = 1 - binary
_, ccs = morph.label(1 - binary)
if ccs > np.dot(*im.size)/(30*30):
logger.warning(f'Too many connected components for a page image: {ccs}')
return {'text_direction': text_direction, 'boxes': []}
if not scale:
scale = estimate_scale(binary)
if no_hlines:
binary = remove_hlines(binary, scale)
# emptyish images will cause exceptions here.
try:
if mask:
if mask.mode != '1' and not is_bitonal(mask):
logger.error('Mask is not bitonal')
raise KrakenInputException('Mask is not bitonal')
mask = mask.convert('1')
if mask.size != im.size:
logger.error(f'Mask size {mask.size} doesn\'t match image size {im.size}')
raise KrakenInputException(f'Mask size {mask.size} doesn\'t match image size {im.size}')
logger.info('Masking enabled in segmenter. Disabling column detection.')
mask = mask.rotate(angle, expand=True)
colseps = pil2array(mask)
elif black_colseps:
colseps, binary = compute_black_colseps(binary, scale, maxcolseps)
else:
colseps = compute_white_colseps(binary, scale, maxcolseps)
except ValueError:
logger.warning(f'Exception in column finder (probably empty image) for {im_str}')
return {'text_direction': text_direction, 'boxes': []}
bottom, top, boxmap = compute_gradmaps(binary, scale)
seeds = compute_line_seeds(binary, bottom, top, colseps, scale)
llabels = morph.propagate_labels(boxmap, seeds, conflict=0)
spread = morph.spread_labels(seeds, maxdist=scale)
llabels = np.where(llabels > 0, llabels, spread*binary)
segmentation = llabels*binary
lines = compute_lines(segmentation, scale)
order = reading_order_fn([line.bounds for line in lines], text_direction[-2:])
lsort = topsort(order)
lines = [lines[i].bounds for i in lsort]
lines = [(s2.start, s1.start, s2.stop, s1.stop) for s1, s2 in lines]
if isinstance(pad, int):
pad = (pad, pad)
lines = [(max(x[0]-pad[0], 0), x[1], min(x[2]+pad[1], im.size[0]), x[3]) for x in lines]
return {'text_direction': text_direction,
'boxes': rotate_lines(lines, 360-angle, offset).tolist(),
'script_detection': False}
|
mittagessen/kraken
|
kraken/pageseg.py
|
Python
|
apache-2.0
| 15,317
|
[
"Gaussian"
] |
52d7a0aed6ec17c42946a3db9297779e6214519bcd948dd9993500c43be0f78f
|
# Licensed under GPL version 3 - see LICENSE.rst
import numpy as np
from astropy.table import Column
import astropy.units as u
from astropy.coordinates import SkyCoord
from ..base import SimulationSequenceElement
from ..math.rotations import axangle2mat
from ..math.utils import norm_vector, h2e, e2h
class PointingModel(SimulationSequenceElement):
'''A base model for all pointing models
Conventions:
- All angles (``ra``, ``dec``, and ``roll``) are given in decimal degrees.
- x-axis points to sky aimpoint.
- ``roll = 0`` means: z axis points North (measured N -> E).
For :math:`\delta \pm 90^{\circ}` the :math:`\alpha` value is
irrelevant for the pointing direction - any right ascension will
lead to a pointing on the pole. A value for ``ra`` is still
required, because it determines the orientation of the detector
plane. Obviously, for pointing straight at the pole, the simple
interpretation *z axis points north* is meaningless, but the
combination of ``ra``, ``dec`` and ``roll`` still uniquely
determines the position of the coordinate system.
'''
def add_dir(self, photons):
linecoords = Column(name='dir', length=len(photons),
shape=(4,))
photons.add_column(linecoords)
photons['dir'].unit = u.mm
# Leave everything unset, but chances are I will forget the 4th
# component. Play safe here.
photons['dir'][:, 3] = 0
def process_photons(self, photons):
self.add_dir(photons)
return photons
class FixedPointing(PointingModel):
r'''Transform spacecraft to fixed sky system.
This matrix transforms from the spacecraft system to a
right-handed Cartesian system that is defined in the following
way: the (x,y) plane is defined by the celestial equator, and
the x-axis points to :math:`(\alpha, \delta) = (0,0)`.
Parameters
----------
coords : `astropy.coordinates.SkySoord`
Position of the source on the sky.
roll : `~astropy.units.quantity.Quantity`
``roll = 0`` means: z axis points North (measured N -> E).
reference_transform : np.array of shape (4, 4)
By default, photons from an on-axis source come in parallel to the x-axis
of the coordinate system. Their direction points from x=+inf inwards.
If the simulation uses a different coordinate system (e.g. the optical
axis is along the z-axis) set ``reference_transform`` to a matrix that
performs the conversion.
The optical axis of the telescope is the normal to the surface of its
entrance aperture. The pointing needs to know this to determine
the correct direction of the photons.
Also, sources that do not shine directly onto the telescope aperture but
hit it at an angle, will see a smaller projected geometric area.
This is taken into account by reducing the probability of off-axies photons
accordingly, and thus this object needs to know the orientation (the
direction f the optical axis and rotation) of the aperture.
Notes
-----
For :math:`\delta \pm 90^{\circ}` the :math:`\alpha` value is
irrelevant for the pointing direction - any right ascension will
lead to a pointing on the pole. A value for ``ra`` is still
required, because it determines the orientation of the detector
plane. Obviously, for pointing straight at the pole, the simple
interpretation *z axis points north* is meaningless, but the
combination of ``ra``, ``dec`` and ``roll`` still uniquely
determines the position of the coordinate system.
'''
def __init__(self, **kwargs):
self.coords = kwargs.pop('coords')
if not self.coords.isscalar:
raise ValueError("Coordinate must be scalar, not array.")
self.roll = kwargs.pop('roll', 0. * u.rad)
self.reference_transform = kwargs.pop('reference_transform', np.eye(4))
super().__init__(**kwargs)
@property
def offset_coos(self):
'''Return `~astropy.coordinates.SkyOffsetFrame`'''
return self.coords.skyoffset_frame(rotation=self.roll)
def photons_dir(self, coos, time):
'''Calculate direction of photons in homogeneous coordinates.
Parameters
----------
coos : `astropy.coordiantes.SkyCoord`
Origin of each photon on the sky
time : np.array
Time for each photons in sec
Returns
-------
photons_dir : np.array of shape (n, 4)
Homogeneous direction vector for each photon
'''
photondir = coos.transform_to(self.offset_coos)
# Minus sign here because photons start at +inf and move towards origin
photonsdir = norm_vector(-photondir.cartesian.xyz.T)
return np.einsum('...ij,...j->...i', self.reference_transform, e2h(photonsdir, 0))
def photons_pol(self, photonsdir, polangle, time):
'''Calculate a polarization vector for linearly polarized light.
The current definition cannot handle photons coming exactly from either
the North pole or the South Pole of the sphere, because the polangle
definition "North through east" is not well-defined in these positions.
Parameters
----------
photonsdir : np.array of shape (n, 4)
Direction of photons
polangle : np.array
Polarization angle measured N through E. If polangle has no
units, it is assumed to be specified in radian.
time : np.array
Time for each photons in sec
'''
if hasattr(polangle, "unit") and (polangle.unit is not None):
polangle = polangle.to(u.rad)
north = SkyCoord(0., 90., unit='deg', frame=self.coords)
northdir = e2h(north.transform_to(self.offset_coos).cartesian.xyz.T, 0)
northdir = np.dot(self.reference_transform, northdir)
n_inskyplane = norm_vector(northdir - photonsdir * np.dot(northdir, photonsdir.T)[:, None])
e_inskyplane = e2h(np.cross(photonsdir[:, :3], n_inskyplane[:, :3]), 0)
return np.cos(polangle)[:, None] * n_inskyplane + np.sin(polangle)[:, None] * e_inskyplane
def process_photons(self, photons):
'''
Parameters
----------
photons : `astropy.table.Table`
'''
photons = super().process_photons(photons)
photons['dir'] = self.photons_dir(SkyCoord(photons['ra'],
photons['dec'],
unit='deg'),
photons['time'].data)
photons['polarization'] = self.photons_pol(photons['dir'].data,
photons['polangle'].data,
photons['time'].data)
photons.meta['RA_PNT'] = (self.coords.ra.degree, '[deg] Pointing RA')
photons.meta['DEC_PNT'] = (self.coords.dec.degree,
'[deg] Pointing Dec')
photons.meta['ROLL_PNT'] = (self.roll.to(u.degree).value,
'[deg] Pointing Roll')
photons.meta['RA_NOM'] = (self.coords.ra.degree,
'[deg] Nominal Pointing RA')
photons.meta['DEC_NOM'] = (self.coords.dec.degree,
'[deg] Nominal Pointing Dec')
photons.meta['ROLL_NOM'] = (self.roll.to(u.degree).value,
'[deg] Nominal Pointing Roll')
return photons
class JitterPointing(FixedPointing):
'''Transform spacecraft to fixed sky system.
This extends `marxs.sourcs.FixedPointing` by adding a random
jitter coordinate. In this simple implementation the jitter
angles applied to two consecutive photons are entirely
uncorrelated, even if these two photons arrive at the same time.
This class makes the assumption that jitter is small (no change in
the projected geometric area of the aperture due to jitter).
Parameters
----------
jitter : `~astropy.units.quantity.Quantity`
Gaussian sigma of jitter angle
'''
def __init__(self, **kwargs):
self.jitter = np.abs(kwargs.pop('jitter'))
super().__init__(**kwargs)
def process_photons(self, photons):
photons = super().process_photons(photons)
# Get random jitter direction
n = len(photons)
randang = np.random.rand(n) * 2. * np.pi
ax = np.vstack([np.zeros(n), np.sin(randang), np.cos(randang)]).T
if self.jitter > 0:
# For comparison it's often useful to run a model with jitter=0
# but that would fail np.random.normal(scale=0)
jitterang = np.random.normal(scale=self.jitter.to(u.radian).value, size=n)
jitterrot = axangle2mat(ax, jitterang)
photons['dir'] = e2h(np.einsum('...ij,...i->...j', jitterrot,
h2e(photons['dir'])), 0)
photons['polarization'] = e2h(np.einsum('...ij,...i->...j', jitterrot,
h2e(photons['polarization'])), 0)
return photons
|
Chandra-MARX/marxs
|
marxs/source/pointing.py
|
Python
|
gpl-3.0
| 9,285
|
[
"Gaussian"
] |
89cda99f35abf404be283177448e761d026bbe166222dae6e83b7e3234a98383
|
""" Powerful utility for running a TCP/UDP server that is used to script
Mayavi2 from the network. This uses Twisted. This particular version
has been written for the wxPython, adding support for a Qt4 version
should be trivial.
The key functions exposed are::
serve_tcp(...)
serve_udp(...)
See the function documentation for more information. Here is sample
usage::
from mayavi import mlab
from mayavi.tools import server
mlab.test_plot3d()
server.serve_tcp()
The TCP server will listen on port 8007 by default in the above. Any
data sent to the server is simply exec'd, meaning you can do pretty much
anything you want. The `engine`, `scene`, `camera` and `mlab` are all
available and can be used. For example after running the above you can
do this::
$ telnet localhost 8007
Trying 127.0.0.1...
Connected to localhost.
Escape character is '^]'.
scene.camera.azimuth(45)
mlab.clf()
mlab.test_contour3d()
scene.camera.zoom(1.5)
The nice thing about this is that you do not loose any interactivity of
your app and can continue to use its UI as before, any network commands
will be simply run on top of this.
**Warning** while this is very powerful it is also a **huge security
hole** since the remote user can do pretty much anything they want.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2009-2015, Enthought, Inc.
# License: BSD Style.
import sys
import wx
# Install wxreactor; must be done before the reactor is imported below.
from twisted.internet import wxreactor
wxreactor.install()
# The usual twisted imports.
from twisted.internet.protocol import Protocol, DatagramProtocol, Factory
from twisted.internet import reactor
from twisted.python import log
###############################################################################
# `M2UDP` protocol.
###############################################################################
class M2UDP(DatagramProtocol):
"""Implements a brain dead but supremely powerful UDP API. Any data
coming in is simply exec'd. Meaning you can do pretty much anything
you want. The `engine`, `scene`, `camera` and `mlab` are all
available and can be used. For example you can easily send this on
the network::
scene.camera.azimuth(45)
mlab.clf()
mlab.test_contour3d()
scene.camera.zoom(1.5)
And these will run just fine retaining the full interactivity of the
mayavi app.
"""
def datagramReceived(self, data, host_port):
"""Given a line of data, simply execs it to do whatever."""
host, port = host_port
log.msg("Received: %r from %s:%d" % (data, host, port))
c = data.strip()
if len(c) > 0:
mlab = self.mlab
engine = self.engine
scene = self.scene
camera = scene.camera
try:
exec(c, locals(), globals())
except:
log.err()
scene.render()
###############################################################################
# `M2TCP` protocol
###############################################################################
class M2TCP(Protocol):
"""Implements a brain dead but suprememly powerful TCP API. Any
data coming in is simply exec'd. Meaning you can do pretty much
anything you want. The `engine`, `scene`, `camera` and `mlab` are
all available and can be used. For example you can easily send this
on the network::
scene.camera.azimuth(45)
mlab.clf()
mlab.test_contour3d()
scene.camera.zoom(1.5)
And these will run just fine retaining the full interactivity of the
mayavi app.
"""
# Maximum number of concurrent connections allowed.
maxConnect = 1
def connectionMade(self):
log.msg('ConnectionMade')
self.factory.numConnect += 1
if self.factory.numConnect > self.maxConnect:
self.transport.write("Server already in use, try later\n")
self.transport.loseConnection()
def connectionLost(self, reason):
log.msg('ConnectionLost')
self.factory.numConnect -= 1
def dataReceived(self, data):
"""Given a line of data, simply execs it to do whatever."""
c = data.strip()
log.msg('Received:', c)
if len(c) > 0:
mlab = self.factory.mlab
engine = self.factory.engine
scene = self.factory.scene
camera = scene.camera
try:
exec(c, locals(), globals())
except:
log.err()
scene.render()
###############################################################################
# Utility functions.
###############################################################################
def serve_udp(engine=None, port=9007, logto=sys.stdout):
"""Serve the `M2UDP` protocol using the given `engine` on the
specified `port` logging messages to given `logto` which is a
file-like object. This function will block till the service is
closed. There is no need to call `mlab.show()` after or before
this. The Mayavi UI will be fully responsive.
**Parameters**
:engine: Mayavi engine to use. If this is `None`,
`mlab.get_engine()` is used to find an appropriate engine.
:port: int: port to serve on.
:logto: file : File like object to log messages to. If this is
`None` it disables logging.
**Examples**
Here is a very simple example::
from mayavi import mlab
from mayavi.tools import server
mlab.test_plot3d()
server.serve_udp()
Test it like so::
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', 9008))
s.sendto('camera.azimuth(10)', ('', 9007))
**Warning**
Data sent is exec'd so this is a security hole.
"""
from mayavi import mlab
e = engine or mlab.get_engine()
# Setup the protocol with the right attributes.
proto = M2UDP()
proto.engine = e
proto.scene = e.current_scene.scene
proto.mlab = mlab
if logto is not None:
log.startLogging(logto)
log.msg('Serving Mayavi2 UDP server on port', port)
log.msg('Using Engine', e)
# Register the running wxApp.
reactor.registerWxApp(wx.GetApp())
# Listen on port 9007 using above protocol.
reactor.listenUDP(port, proto)
# Run the server + app. This will block.
reactor.run()
def serve_tcp(engine=None, port=8007, logto=sys.stdout, max_connect=1):
"""Serve the `M2TCP` protocol using the given `engine` on the
specified `port` logging messages to given `logto` which is a
file-like object. This function will block till the service is
closed. There is no need to call `mlab.show()` after or before
this. The Mayavi UI will be fully responsive.
**Parameters**
:engine: Mayavi engine to use. If this is `None`,
`mlab.get_engine()` is used to find an appropriate engine.
:port: int: port to serve on.
:logto: file: File like object to log messages to. If this is
`None` it disables logging.
:max_connect: int: Maximum number of simultaneous connections to
support.
**Examples**
Here is a very simple example::
from mayavi import mlab
from mayavi.tools import server
mlab.test_plot3d()
server.serve_tcp()
The TCP server will listen on port 8007 by default in the above.
Any data sent to the server is simply exec'd, meaning you can do
pretty much anything you want. The `engine`, `scene`, `camera` and
`mlab` are all available and can be used. For example after running
the above you can do this::
$ telnet localhost 8007
Trying 127.0.0.1...
Connected to localhost.
Escape character is '^]'.
scene.camera.azimuth(45)
mlab.clf()
mlab.test_contour3d()
scene.camera.zoom(1.5)
**Warning**
Data sent is exec'd so this is a security hole.
"""
from mayavi import mlab
e = engine or mlab.get_engine()
# Setup the factory with the right attributes.
factory = Factory()
factory.protocol = M2TCP
factory.maxConnect = max_connect
factory.numConnect = 0
factory.engine = e
factory.scene = e.current_scene.scene
factory.mlab = mlab
if logto is not None:
log.startLogging(logto)
log.msg('Serving Mayavi2 TCP server on port', port)
log.msg('Using Engine', e)
# Register the running wxApp.
reactor.registerWxApp(wx.GetApp())
# Listen on port 9007 using above protocol.
reactor.listenTCP(port, factory)
# Run the server + app. This will block.
reactor.run()
###############################################################################
# Examples and tests.
###############################################################################
def test_tcp():
"""Simple test for the TCP server."""
from mayavi import mlab
mlab.test_plot3d()
serve_tcp()
def test_udp():
"""Simple test for the UDP server."""
from mayavi import mlab
mlab.test_plot3d()
serve_udp()
if __name__ == '__main__':
test_tcp()
|
dmsurti/mayavi
|
mayavi/tools/server.py
|
Python
|
bsd-3-clause
| 9,319
|
[
"Mayavi"
] |
d0095fc725207ec081503ed4473b394b0c0195a8878f3405d41aae26b970c4d7
|
"""
Serializes a Cython code tree to Cython code. This is primarily useful for
debugging and testing purposes.
The output is in a strict format, no whitespace or comments from the input
is preserved (and it could not be as it is not present in the code tree).
"""
from __future__ import absolute_import, print_function
from .Compiler.Visitor import TreeVisitor
from .Compiler.ExprNodes import *
class LinesResult(object):
def __init__(self):
self.lines = []
self.s = u""
def put(self, s):
self.s += s
def newline(self):
self.lines.append(self.s)
self.s = u""
def putline(self, s):
self.put(s)
self.newline()
class DeclarationWriter(TreeVisitor):
indent_string = u" "
def __init__(self, result=None):
super(DeclarationWriter, self).__init__()
if result is None:
result = LinesResult()
self.result = result
self.numindents = 0
self.tempnames = {}
self.tempblockindex = 0
def write(self, tree):
self.visit(tree)
return self.result
def indent(self):
self.numindents += 1
def dedent(self):
self.numindents -= 1
def startline(self, s=u""):
self.result.put(self.indent_string * self.numindents + s)
def put(self, s):
self.result.put(s)
def putline(self, s):
self.result.putline(self.indent_string * self.numindents + s)
def endline(self, s=u""):
self.result.putline(s)
def line(self, s):
self.startline(s)
self.endline()
def comma_separated_list(self, items, output_rhs=False):
if len(items) > 0:
for item in items[:-1]:
self.visit(item)
if output_rhs and item.default is not None:
self.put(u" = ")
self.visit(item.default)
self.put(u", ")
self.visit(items[-1])
def visit_Node(self, node):
raise AssertionError("Node not handled by serializer: %r" % node)
def visit_ModuleNode(self, node):
self.visitchildren(node)
def visit_StatListNode(self, node):
self.visitchildren(node)
def visit_CDefExternNode(self, node):
if node.include_file is None:
file = u'*'
else:
file = u'"%s"' % node.include_file
self.putline(u"cdef extern from %s:" % file)
self.indent()
self.visit(node.body)
self.dedent()
def visit_CPtrDeclaratorNode(self, node):
self.put('*')
self.visit(node.base)
def visit_CReferenceDeclaratorNode(self, node):
self.put('&')
self.visit(node.base)
def visit_CArrayDeclaratorNode(self, node):
self.visit(node.base)
self.put(u'[')
if node.dimension is not None:
self.visit(node.dimension)
self.put(u']')
def visit_CArrayDeclaratorNode(self, node):
self.visit(node.base)
self.put(u'[')
if node.dimension is not None:
self.visit(node.dimension)
self.put(u']')
def visit_CFuncDeclaratorNode(self, node):
# TODO: except, gil, etc.
self.visit(node.base)
self.put(u'(')
self.comma_separated_list(node.args)
self.endline(u')')
def visit_CNameDeclaratorNode(self, node):
self.put(node.name)
def visit_CSimpleBaseTypeNode(self, node):
# See Parsing.p_sign_and_longness
if node.is_basic_c_type:
self.put(("unsigned ", "", "signed ")[node.signed])
if node.longness < 0:
self.put("short " * -node.longness)
elif node.longness > 0:
self.put("long " * node.longness)
self.put(node.name)
def visit_CComplexBaseTypeNode(self, node):
self.put(u'(')
self.visit(node.base_type)
self.visit(node.declarator)
self.put(u')')
def visit_CNestedBaseTypeNode(self, node):
self.visit(node.base_type)
self.put(u'.')
self.put(node.name)
def visit_TemplatedTypeNode(self, node):
self.visit(node.base_type_node)
self.put(u'[')
self.comma_separated_list(node.positional_args + node.keyword_args.key_value_pairs)
self.put(u']')
def visit_CVarDefNode(self, node):
self.startline(u"cdef ")
self.visit(node.base_type)
self.put(u" ")
self.comma_separated_list(node.declarators, output_rhs=True)
self.endline()
def visit_container_node(self, node, decl, extras, attributes):
# TODO: visibility
self.startline(decl)
if node.name:
self.put(u' ')
self.put(node.name)
if node.cname is not None:
self.put(u' "%s"' % node.cname)
if extras:
self.put(extras)
self.endline(':')
self.indent()
if not attributes:
self.putline('pass')
else:
for attribute in attributes:
self.visit(attribute)
self.dedent()
def visit_CStructOrUnionDefNode(self, node):
if node.typedef_flag:
decl = u'ctypedef '
else:
decl = u'cdef '
if node.visibility == 'public':
decl += u'public '
if node.packed:
decl += u'packed '
decl += node.kind
self.visit_container_node(node, decl, None, node.attributes)
def visit_CppClassNode(self, node):
extras = ""
if node.templates:
extras = u"[%s]" % ", ".join(node.templates)
if node.base_classes:
extras += "(%s)" % ", ".join(node.base_classes)
self.visit_container_node(node, u"cdef cppclass", extras, node.attributes)
def visit_CEnumDefNode(self, node):
self.visit_container_node(node, u"cdef enum", None, node.items)
def visit_CEnumDefItemNode(self, node):
self.startline(node.name)
if node.cname:
self.put(u' "%s"' % node.cname)
if node.value:
self.put(u" = ")
self.visit(node.value)
self.endline()
def visit_CClassDefNode(self, node):
assert not node.module_name
if node.decorators:
for decorator in node.decorators:
self.visit(decorator)
self.startline(u"cdef class ")
self.put(node.class_name)
if node.base_class_name:
self.put(u"(")
if node.base_class_module:
self.put(node.base_class_module)
self.put(u".")
self.put(node.base_class_name)
self.put(u")")
self.endline(u":")
self.indent()
self.visit(node.body)
self.dedent()
def visit_CTypeDefNode(self, node):
self.startline(u"ctypedef ")
self.visit(node.base_type)
self.put(u" ")
self.visit(node.declarator)
self.endline()
def visit_FuncDefNode(self, node):
self.startline(u"def %s(" % node.name)
self.comma_separated_list(node.args)
self.endline(u"):")
self.indent()
self.visit(node.body)
self.dedent()
def visit_CArgDeclNode(self, node):
if node.base_type.name is not None:
self.visit(node.base_type)
self.put(u" ")
self.visit(node.declarator)
if node.default is not None:
self.put(u" = ")
self.visit(node.default)
def visit_CImportStatNode(self, node):
self.startline(u"cimport ")
self.put(node.module_name)
if node.as_name:
self.put(u" as ")
self.put(node.as_name)
self.endline()
def visit_FromCImportStatNode(self, node):
self.startline(u"from ")
self.put(node.module_name)
self.put(u" cimport ")
first = True
for pos, name, as_name, kind in node.imported_names:
assert kind is None
if first:
first = False
else:
self.put(u", ")
self.put(name)
if as_name:
self.put(u" as ")
self.put(as_name)
self.endline()
def visit_NameNode(self, node):
self.put(node.name)
def visit_IntNode(self, node):
self.put(node.value)
def visit_NoneNode(self, node):
self.put(u"None")
def visit_NotNode(self, node):
self.put(u"(not ")
self.visit(node.operand)
self.put(u")")
def visit_DecoratorNode(self, node):
self.startline("@")
self.visit(node.decorator)
self.endline()
def visit_BinopNode(self, node):
self.visit(node.operand1)
self.put(u" %s " % node.operator)
self.visit(node.operand2)
def visit_AttributeNode(self, node):
self.visit(node.obj)
self.put(u".%s" % node.attribute)
def visit_BoolNode(self, node):
self.put(str(node.value))
# FIXME: represent string nodes correctly
def visit_StringNode(self, node):
value = node.value
if value.encoding is not None:
value = value.encode(value.encoding)
self.put(repr(value))
def visit_PassStatNode(self, node):
self.startline(u"pass")
self.endline()
class CodeWriter(DeclarationWriter):
def visit_SingleAssignmentNode(self, node):
self.startline()
self.visit(node.lhs)
self.put(u" = ")
self.visit(node.rhs)
self.endline()
def visit_CascadedAssignmentNode(self, node):
self.startline()
for lhs in node.lhs_list:
self.visit(lhs)
self.put(u" = ")
self.visit(node.rhs)
self.endline()
def visit_PrintStatNode(self, node):
self.startline(u"print ")
self.comma_separated_list(node.arg_tuple.args)
if not node.append_newline:
self.put(u",")
self.endline()
def visit_ForInStatNode(self, node):
self.startline(u"for ")
self.visit(node.target)
self.put(u" in ")
self.visit(node.iterator.sequence)
self.endline(u":")
self.indent()
self.visit(node.body)
self.dedent()
if node.else_clause is not None:
self.line(u"else:")
self.indent()
self.visit(node.else_clause)
self.dedent()
def visit_IfStatNode(self, node):
# The IfClauseNode is handled directly without a seperate match
# for clariy.
self.startline(u"if ")
self.visit(node.if_clauses[0].condition)
self.endline(":")
self.indent()
self.visit(node.if_clauses[0].body)
self.dedent()
for clause in node.if_clauses[1:]:
self.startline("elif ")
self.visit(clause.condition)
self.endline(":")
self.indent()
self.visit(clause.body)
self.dedent()
if node.else_clause is not None:
self.line("else:")
self.indent()
self.visit(node.else_clause)
self.dedent()
def visit_SequenceNode(self, node):
self.comma_separated_list(node.args) # Might need to discover whether we need () around tuples...hmm...
def visit_SimpleCallNode(self, node):
self.visit(node.function)
self.put(u"(")
self.comma_separated_list(node.args)
self.put(")")
def visit_GeneralCallNode(self, node):
self.visit(node.function)
self.put(u"(")
posarg = node.positional_args
if isinstance(posarg, AsTupleNode):
self.visit(posarg.arg)
else:
self.comma_separated_list(posarg.args) # TupleNode.args
if node.keyword_args:
if isinstance(node.keyword_args, DictNode):
for i, (name, value) in enumerate(node.keyword_args.key_value_pairs):
if i > 0:
self.put(', ')
self.visit(name)
self.put('=')
self.visit(value)
else:
raise Exception("Not implemented yet")
self.put(u")")
def visit_ExprStatNode(self, node):
self.startline()
self.visit(node.expr)
self.endline()
def visit_InPlaceAssignmentNode(self, node):
self.startline()
self.visit(node.lhs)
self.put(u" %s= " % node.operator)
self.visit(node.rhs)
self.endline()
def visit_WithStatNode(self, node):
self.startline()
self.put(u"with ")
self.visit(node.manager)
if node.target is not None:
self.put(u" as ")
self.visit(node.target)
self.endline(u":")
self.indent()
self.visit(node.body)
self.dedent()
def visit_TryFinallyStatNode(self, node):
self.line(u"try:")
self.indent()
self.visit(node.body)
self.dedent()
self.line(u"finally:")
self.indent()
self.visit(node.finally_clause)
self.dedent()
def visit_TryExceptStatNode(self, node):
self.line(u"try:")
self.indent()
self.visit(node.body)
self.dedent()
for x in node.except_clauses:
self.visit(x)
if node.else_clause is not None:
self.visit(node.else_clause)
def visit_ExceptClauseNode(self, node):
self.startline(u"except")
if node.pattern is not None:
self.put(u" ")
self.visit(node.pattern)
if node.target is not None:
self.put(u", ")
self.visit(node.target)
self.endline(":")
self.indent()
self.visit(node.body)
self.dedent()
def visit_ReturnStatNode(self, node):
self.startline("return ")
self.visit(node.value)
self.endline()
def visit_ReraiseStatNode(self, node):
self.line("raise")
def visit_ImportNode(self, node):
self.put(u"(import %s)" % node.module_name.value)
def visit_TempsBlockNode(self, node):
"""
Temporaries are output like $1_1', where the first number is
an index of the TempsBlockNode and the second number is an index
of the temporary which that block allocates.
"""
idx = 0
for handle in node.temps:
self.tempnames[handle] = "$%d_%d" % (self.tempblockindex, idx)
idx += 1
self.tempblockindex += 1
self.visit(node.body)
def visit_TempRefNode(self, node):
self.put(self.tempnames[node.handle])
class PxdWriter(DeclarationWriter):
def __call__(self, node):
print(u'\n'.join(self.write(node).lines))
return node
def visit_CFuncDefNode(self, node):
if 'inline' in node.modifiers:
return
if node.overridable:
self.startline(u'cpdef ')
else:
self.startline(u'cdef ')
if node.visibility != 'private':
self.put(node.visibility)
self.put(u' ')
if node.api:
self.put(u'api ')
self.visit(node.declarator)
def visit_StatNode(self, node):
pass
|
Architektor/PySnip
|
venv/lib/python2.7/site-packages/Cython/CodeWriter.py
|
Python
|
gpl-3.0
| 15,254
|
[
"VisIt"
] |
d27fac9a70b90de90ed6dfa3f2381a3b516b3eb3621c90d2446153679c23a530
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBiovizbase(RPackage):
"""Basic graphic utilities for visualization of genomic data.
The biovizBase package is designed to provide a set of utilities, color
schemes and conventions for genomic data. It serves as the base for
various high-level packages for biological data visualization. This
saves development effort and encourages consistency."""
homepage = "https://bioconductor.org/packages/biovizBase"
git = "https://git.bioconductor.org/packages/biovizBase.git"
version('1.38.0', commit='d0f3362e0ad0e90b4b1d3e47b13ed57907d03403')
version('1.32.0', commit='de044bf236cdcd71214ae7b77689a8f0ab4f5cc8')
version('1.30.1', commit='b6776d0470e2920f71127652f185f68ca1fd2c82')
version('1.28.2', commit='43d09060028665a237b04bfeb9e2575782b08063')
version('1.26.0', commit='640742f48384f01d117b70dc5c64737e97ae9b4b')
version('1.24.0', commit='ae9cd2ff665b74a8f45ed9c1d17fc0a778b4af6c')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r@3.5.0:', when='@1.38.0:', type=('build', 'run'))
depends_on('r-scales', type=('build', 'run'))
depends_on('r-hmisc', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-dichromat', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-s4vectors@0.9.25:', type=('build', 'run'))
depends_on('r-s4vectors@0.23.19:', when='@1.38.0:', type=('build', 'run'))
depends_on('r-iranges@1.99.28:', type=('build', 'run'))
depends_on('r-genomeinfodb@1.5.14:', type=('build', 'run'))
depends_on('r-genomicranges@1.23.21:', type=('build', 'run'))
depends_on('r-summarizedexperiment', type=('build', 'run'))
depends_on('r-biostrings@2.33.11:', type=('build', 'run'))
depends_on('r-rsamtools@1.17.28:', type=('build', 'run'))
depends_on('r-genomicalignments@1.1.16:', type=('build', 'run'))
depends_on('r-genomicfeatures@1.21.19:', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-variantannotation@1.11.4:', type=('build', 'run'))
depends_on('r-ensembldb@1.99.13:', type=('build', 'run'))
depends_on('r-annotationfilter@0.99.8:', type=('build', 'run'))
depends_on('r-rlang', when='@1.28.2:', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-biovizbase/package.py
|
Python
|
lgpl-2.1
| 2,539
|
[
"Bioconductor"
] |
936474c933884406cc204ccd4d3f445ca12e198d0dd7cecfb3c339c339e3a65c
|
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import vtk
from .ChiggerObserver import ChiggerObserver
class TimerObserver(ChiggerObserver):
"""
Class for creating timers to be passed in to RenderWindow object.
"""
@staticmethod
def getOptions():
opt = ChiggerObserver.getOptions()
opt.add('duration', 1000, "The repeat interval, in milliseconds, of the timer.", vtype=int)
opt.add('count', None, "The maximum number of timer calls before terminating timer loop.",
vtype=int)
opt.add('terminate', False, "Terminate the VTK window when the 'count' is reached.")
return opt
def __init__(self, **kwargs):
super(TimerObserver, self).__init__(vtk.vtkCommand.TimerEvent, **kwargs)
self._count = 0
def addObserver(self, event, vtkinteractor):
"""
Add the TimerEvent for this object.
"""
vtkinteractor.CreateRepeatingTimer(self.getOption('duration'))
return vtkinteractor.AddObserver(event, self._callback)
def count(self):
"""
Return the current number of callback calls.
"""
return self._count
def update(self, **kwargs):
"""
Update the window object.
"""
super(TimerObserver, self).update(**kwargs)
if self._window.needsUpdate():
self._window.update()
def _callback(self, obj, event): #pylint: disable=unused-argument
"""
The function to be called by the RenderWindow.
Inputs:
obj, event: Required by VTK.
"""
if self.isOptionValid('count') and (self._count >= self.getOption('count')):
self._window.getVTKInteractor().DestroyTimer()
if self.getOption('terminate'):
self._window.getVTKInteractor().TerminateApp()
return
self.update()
self._count += 1
|
harterj/moose
|
python/chigger/observers/TimerObserver.py
|
Python
|
lgpl-2.1
| 2,204
|
[
"MOOSE",
"VTK"
] |
75971b7c4ae476cdae4aa8368f4aab1f82fe5d49583ca55f7950ad948419d781
|
#!/usr/bin/env python
# ============================================================================================================
# Created by: Lee Bergstrand & Matt McInnes
# Description: A next generation bioinformatics file format converter and sequence feature extractor.
# Requirements: This script requires the Biopython module: http://biopython.org/wiki/Download
# ============================================================================================================
from __future__ import print_function
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from Bio import AlignIO
from Bio import Phylo
from BioID import BioID
from BioID import BioIDFormat
import ntpath
import os
import argparse
import sys
import yaml
import multiprocessing
import functools
# ==============================================================================
# BioMagickFormat: Defines the properties of a bioinformatic file format
# object with extra data specific for BioMagick's functionality
# ==============================================================================
class BioMagickFormat(BioIDFormat):
def __init__(self, name, extension, bioclass):
super(BioMagickFormat, self).__init__(name, None, None, [])
self.extension = extension
self.bioclass = bioclass
# ---------------------------------
# Command line interface controller
# ---------------------------------
def main(args):
input_files = args.input
out_fmt = args.outfmt
out_dir = args.outdir[0] if args.outdir else "."
if out_fmt is None or out_fmt == []:
print("Error: at laest 1 output format is needed")
sys.exit(1)
if args.stdout is True:
if len(input_files) > 1:
print("Error: outputting to stdout is only possible with single-file conversions")
sys.exit(1)
else:
out_dir = None # Indicate to use stdout
if sys.version_info[0] <= 2 and sys.version_info[1] <= 6:
old_py = True
if not args.stdout:
print("Warning: disabling multicore processing for Python <2.7")
else:
old_py = False
if args.alphabet is not None:
alphabet = args.alphabet[0]
if alphabet == 'ambigdna':
alphabet = IUPAC.IUPACAmbiguousDNA()
elif alphabet == "unambigdna":
alphabet = IUPAC.IUPACUnambiguousDNA()
elif alphabet == "exdna":
alphabet = IUPAC.ExtendedIUPACDNA()
elif alphabet == 'ambigrna':
alphabet = IUPAC.IUPACAmbiguousRNA()
elif alphabet == "unambigrna":
alphabet = IUPAC.IUPACUnambiguousRNA()
elif alphabet == 'prot':
alphabet = IUPAC.IUPACProtein()
elif alphabet == "exprot":
alphabet = IUPAC.ExtendedIUPACProtein()
else:
print("Error: %s is not a valid alphabet" % alphabet)
print("Valid alphabets: ambigdna, unambigdna, exdna, ambigrna, unambigrna, prot, exprot")
sys.exit(1)
else:
alphabet = None
# Load and parse YAML format export settings
with open("BioMagickFormatInfo.yml", "rU") as settings_file:
contents = settings_file.read()
settings = {}
for setting in yaml.safe_load(contents):
settings[setting["name"]] = BioMagickFormat(setting["name"], setting["extension"], setting["bioclass"])
if input_files is None or input_files == []:
# Convert single file from stdin
if sys.stdin.isatty():
print("Error: you must either specify an input file or pipe some data to stdin")
sys.exit(1)
with open("converted.tmp", "w") as tmp_file:
tmp_file.write(sys.stdin.read())
id_results = BioID("./BioIDFormatInfo.yml").identify(["converted.tmp"])
direct_convert(settings, id_results, out_dir, out_fmt, alphabet)
os.remove("converted.tmp")
elif len(input_files) == 1 or old_py:
id_results = BioID("./BioIDFormatInfo.yml").identify(input_files)
direct_convert(settings, id_results, out_dir, out_fmt, alphabet)
else:
if args.jobs is not None:
process_count = args.jobs if args.jobs <= len(input_files) else len(input_files)
else:
process_count = multiprocessing.cpu_count() if multiprocessing.cpu_count() > len(input_files) else len(input_files)
pool = multiprocessing.Pool(processes=process_count)
pool.map(functools.partial(subprocess_controller, format_settings=settings, output_path=out_dir, output_formats=out_fmt,
input_alphabet=alphabet), input_files)
# -----------------------------------------------------------
# Generates of dictionary of sequence record objects per file
# -----------------------------------------------------------
def generate_sequence_objects(id_results):
seq_objects = {}
for file_path, identity in id_results.items():
with open(file_path, 'rU') as input_handle:
new_seq_objects = SeqIO.parse(input_handle, identity)
seq_objects[file_path] = new_seq_objects
return seq_objects
# --------------------------------------------------------------------------
# A wrapper for the convert function which calls it from within a subprocess
# --------------------------------------------------------------------------
def subprocess_controller(input_file, format_settings, output_path, output_formats, input_alphabet):
id_results = BioID("./BioIDFormatInfo.yml").identify([input_file])
direct_convert(format_settings, id_results, output_path, output_formats, input_alphabet)
# --------------------------------------------------------------------------------------------
# Converts between bioinformatic formats using SeqIO's, AlignIO's and Phylo's convert function
# --------------------------------------------------------------------------------------------
def direct_convert(settings, id_results, out_path, out_formats, alphabet):
if out_path is None:
out_file = "./conv.tmp"
in_path, in_format = list(id_results.items())[0]
out_format = out_formats[0]
if in_format == "unidentified":
raise Exception("Failed to identify the file")
try:
format_setting = settings[in_format]
if format_setting.bioclass == "seq":
SeqIO.convert(in_path, in_format.lower(), out_file, out_format, alphabet)
elif format_setting.bioclass == "phylo":
Phylo.convert(in_path, in_format.lower(), out_file, out_format)
elif format_setting.bioclass == "align":
AlignIO.convert(in_path, in_format.lower(), out_file, out_format)
else:
print("Error: invalid BioPython conversion class: %s" % format_setting.bioclass)
sys.exit(1)
except ValueError as e:
print("Error in conversion of " + in_path + " to " + out_format + ": " + str(e))
sys.exit(1)
with open(out_file, "r") as tmp_file:
print(tmp_file.read())
os.remove(out_file) # Is this really necessary?
else:
for out_format in out_formats:
for in_path, in_format in id_results.items():
out_file = out_path
if sys.platform == "win32":
if out_file[-1] != "\\":
out_file += "\\"
out_file += ntpath.basename(in_path).split('.')[0]
else:
if out_file[-1] != "/":
out_file += "/"
out_file += os.path.basename(in_path).split('.')[0]
out_extension = settings[out_format].extension
out_file = out_file + "." + out_extension
print("\nConverting %s file %s to %s file %s" % (in_format, in_path, out_format, out_file))
try:
format_setting = settings[in_format]
if format_setting.bioclass == "seq":
SeqIO.convert(in_path, in_format.lower(), out_file, out_format, alphabet)
elif format_setting.bioclass == "phylo":
Phylo.convert(in_path, in_format.lower(), out_file, out_format)
elif format_setting.bioclass == "align":
AlignIO.convert(in_path, in_format.lower(), out_file, out_format)
else:
print("Error: invalid BioPython conversion class: %s" % format_setting.bioclass)
sys.exit(1)
except ValueError as e:
print("\nError in conversion of " + in_path + " to " + out_format + ": " + str(e))
print("Skipping " + in_path + " ...\n")
continue
if __name__ == '__main__':
# ------------------------------
# Command line interface options
# ------------------------------
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', metavar='INPATH', nargs=1, help='''
A comma-separated list of input file paths. If not specified, input is read from stdin.''')
parser.add_argument('-s', '--stdout', dest="stdout", action='store_true', help='''
Output result of single-file conversion to stdout.''')
parser.add_argument('-o', '--outdir', metavar='OUTPATH', nargs=1, help='''
An output directory for output files. If not specified, the current working directory is used.''')
parser.add_argument('-f', '--outfmt', metavar='OUTFORMAT', nargs=1, help='''
A comma-separated list of output file formats.''')
parser.add_argument('-a', '--alphabet', metavar='ALPHA', nargs=1, help='''
The alphabet to use for conversion (ambigdna, unambigdna, exdna, ambigrna, unambigrna, prot, exprot).''')
parser.add_argument('-j', '--jobs', metavar='JOBS', nargs=1, type=int, help='''
The number of processes to use for multiple files (defaults to the number of processor cores).''')
cli_args = parser.parse_args()
try:
if cli_args.input:
cli_args.input = cli_args.input[0].split(",")
cli_args.outfmt = cli_args.outfmt[0].split(",")
except Exception as ex:
parser.print_help()
sys.exit(1)
try:
main(cli_args)
except Exception as ex:
print("\nError: %s" % ex)
sys.exit(1)
|
LeeBergstrand/BioMagick
|
BioMagick.py
|
Python
|
mit
| 9,222
|
[
"Biopython"
] |
7162fc1a66753fe54336a8b8ddf23e314400b109dffa86ed111b22f706f88fa8
|
from string import Template
import logging
from hs_core.hydroshare.utils import get_resource_types
logger = logging.getLogger(__name__)
def parse_app_url_template(url_template_string, term_dict_list=()):
"""
This func replaces pre-defined HS Terms in url_template_string with real values;
Example: http://www.myapps.com/app1/?res_type=${HS_RES_TYPE}
--> http://www.myapps.com/app1/?res_type=GenericResource
:param url_template_string: The url template string contains HS Terms
:param term_dict_list: a list of dict that stores pairs of Term Name and Term Value
:return: the updated url string, or None if template contains undefined terms
"""
new_url_string = url_template_string
merged_term_dic = {}
try:
for term_dict in term_dict_list:
merged_term_dic.update(term_dict)
new_url_string = Template(new_url_string).substitute(merged_term_dic)
except Exception:
logger.exception("[WebApp] '{0}' cannot be parsed by term_dict {1}.".
format(new_url_string, str(merged_term_dic)))
new_url_string = None
finally:
return new_url_string
def get_SupportedResTypes_choices():
"""
This function harvests all existing resource types in system,
and puts them in a list (except for WebApp (ToolResource) Resource type):
[
["RESOURCE_CLASS_NAME_1", "RESOURCE_VERBOSE_NAME_1"],
["RESOURCE_CLASS_NAME_2", "RESOURCE_VERBOSE_NAME_2"],
...
["RESOURCE_CLASS_NAME_N", "RESOURCE_VERBOSE_NAME_N"],
]
"""
xdci_excluded_types = ["Geographic Raster",
"HIS Referenced Time Series",
"Time Series",
"Multidimensional (NetCDF)",
"Model Program Resource",
"Model Instance Resource",
"SWAT Model Instance Resource",
"Geographic Feature (ESRI Shapefiles)",
"Script Resource",
"MODFLOW Model Instance Resource",
]
result_list = []
res_types_list = get_resource_types()
for r_type in res_types_list:
class_name = r_type.__name__
verbose_name = r_type._meta.verbose_name
if "toolresource" != class_name.lower():
if verbose_name not in xdci_excluded_types:
result_list.append([class_name, verbose_name])
return result_list
|
RENCI/xDCIShare
|
hs_tools_resource/utils.py
|
Python
|
bsd-3-clause
| 2,523
|
[
"NetCDF"
] |
dd1a0515ab3e662eb5fa34116c5bd526d675734a02d72a50641ceec9fa20c8ca
|
# Copyright 2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Wrapper control suite environments that adds Gaussian noise to actions."""
import dm_env
import numpy as np
_BOUNDS_MUST_BE_FINITE = (
'All bounds in `env.action_spec()` must be finite, got: {action_spec}')
class Wrapper(dm_env.Environment):
"""Wraps a control environment and adds Gaussian noise to actions."""
def __init__(self, env, scale=0.01):
"""Initializes a new action noise Wrapper.
Args:
env: The control suite environment to wrap.
scale: The standard deviation of the noise, expressed as a fraction
of the max-min range for each action dimension.
Raises:
ValueError: If any of the action dimensions of the wrapped environment are
unbounded.
"""
action_spec = env.action_spec()
if not (np.all(np.isfinite(action_spec.minimum)) and
np.all(np.isfinite(action_spec.maximum))):
raise ValueError(_BOUNDS_MUST_BE_FINITE.format(action_spec=action_spec))
self._minimum = action_spec.minimum
self._maximum = action_spec.maximum
self._noise_std = scale * (action_spec.maximum - action_spec.minimum)
self._env = env
def step(self, action):
noisy_action = action + self._env.task.random.normal(scale=self._noise_std)
# Clip the noisy actions in place so that they fall within the bounds
# specified by the `action_spec`. Note that MuJoCo implicitly clips out-of-
# bounds control inputs, but we also clip here in case the actions do not
# correspond directly to MuJoCo actuators, or if there are other wrapper
# layers that expect the actions to be within bounds.
np.clip(noisy_action, self._minimum, self._maximum, out=noisy_action)
return self._env.step(noisy_action)
def reset(self):
return self._env.reset()
def observation_spec(self):
return self._env.observation_spec()
def action_spec(self):
return self._env.action_spec()
def __getattr__(self, name):
return getattr(self._env, name)
|
deepmind/dm_control
|
dm_control/suite/wrappers/action_noise.py
|
Python
|
apache-2.0
| 2,630
|
[
"Gaussian"
] |
b872188f4f9da642df416a90965357598ecb47db3b71d719c6cee9560f08240b
|
"""Implementation of Geodis Api."""
from roulier.api import Api
class GeodisApiRestWs(Api):
def _schemas(self):
return {
"service": self._service(),
"auth": self._auth(),
}
def normalize(self, data):
externalApi = super(GeodisApiRestWs, self)
internalApi = self._interal_api()
step1 = externalApi.normalize(data)
step2 = internalApi.normalize(step1)
return step2
def api_values(self):
"""Return a dict containing expected keys.
It's a normalized version of the schema.
only internal api
"""
return self._validator().normalized({}, self.api_schema())
def _interal_api(self):
pass
class GeodisMappingIn(Api):
"""Internal API"""
def flatten(self, data, out):
for (key, val) in data.items():
if isinstance(val, dict):
self.flatten(val, out)
else:
out[key] = val
def normalize(self, data):
without_auth = {key: val for (key, val) in data.items() if key != "auth"}
flat = {"auth": data["auth"], "service": {}}
self.flatten(without_auth, flat["service"])
normalized = super(GeodisMappingIn, self).normalize(flat)
return normalized
class GeodisApiTrackingListMapping(GeodisMappingIn):
"""Internal API
Used to rename fields."""
def _schemas(self):
return {
"service": {
"shippingDate": {"rename": "dateDepart"},
"shippingDateStart": {"rename": "dateDepartDebut"},
"shippingDateEnd": {"rename": "dateDepartFin"},
"agencyId": {"rename": "codeSa"},
"customerId": {"rename": "codeClient"},
"reference1": {"rename": "reference1"},
"reference2": {"rename": "refDest"},
"name": {"rename": "nomDest"},
"zip": {"rename": "codePostalDest"},
"estDeliveryDate": {"rename": "dateLivraison"},
"shippingId": {"rename": "noRecepisse"},
"barcode": {"rename": "cabColis"},
"trackingId": {"rename": "noSuivi"},
}
}
class GeodisApiTrackingMapping(GeodisMappingIn):
"""Internal API
Used to rename fields."""
def _schemas(self):
return {"service": {"trackingId": {"rename": "noSuivi"},}}
class GeodisApiTracking(GeodisApiRestWs):
def _service(self):
schema = {
"refUniExp": {"type": "string", "default": "", "empty": True},
"trackingId": {"type": "string", "default": "", "empty": True},
}
return schema
def _interal_api(self):
return GeodisApiTrackingMapping()
class GeodisApiTrackingList(GeodisApiRestWs):
def _service(self):
return {
"shippingDate": {"type": "string", "default": "", "empty": True},
"shippingDateStart": {"type": "string", "default": "", "empty": True},
"shippingDateEnd": {"type": "string", "default": "", "empty": True},
"agencyId": {"type": "string", "default": "", "empty": True},
"customerId": {"type": "string", "default": "", "empty": True},
"reference1": {"type": "string", "default": "", "empty": True},
"reference2": {"type": "string", "default": "", "empty": True},
}
def _tracking(self):
return {
"estDeliveryDate": {"type": "string", "default": "", "empty": True},
"shippingId": {"type": "string", "default": "", "empty": True},
"barcode": {"type": "string", "default": "", "empty": True},
"trackingId": {"type": "string", "default": "", "empty": True},
}
def _to_address(self):
return {
"name": {"type": "string", "default": "", "empty": True},
"zip": {"type": "string", "default": "", "empty": True},
}
def _schemas(self):
schema = super(GeodisApiTrackingList, self)._schemas()
schema["tracking"] = self._tracking()
schema["to_address"] = self._to_address()
return schema
def _interal_api(self):
return GeodisApiTrackingListMapping()
class GeodisMappingOut(Api):
def normalize(self, data):
schema = self.schema()
# self.add_tracking_code(data)
return self.visit(data, schema)
def visit(self, data, schema):
out = {}
for (key, val) in schema.items():
if isinstance(val, dict):
out[key] = self.visit(data, val)
else:
out[key] = data[val]
return out
class GeodisApiTrackingListOut(GeodisMappingOut):
def to_address(self):
return {
"street1": "adresse1Dest",
"street2": "adresse2Dest",
"country": "codePaysDest",
"zip": "codePostalDest",
"country_name": "libellePaysDest",
"name": "nomDest",
"city": "villeDest",
}
def from_address(self):
return {
"street1": "adresse1Exp",
"street2": "adresse2Exp",
"country": "codePaysExp",
"zip": "codePostalExp",
"country_name": "libellePaysExp",
"name": "nomExp",
"city": "villeExp",
}
def parcels(self):
return {
"weight": "poids",
}
def service(self):
return {
"product": "codeProduit",
"agencyId": "codeSa",
"customerId": "codeClient",
"shippingId": "noRecepisse",
"shippingDate": "dateDepart",
"reference1": "reference1",
"reference2": "reference2",
"reference3": "refDest",
"option": "codeOption",
}
def tracking(self):
return {
"statusDate": "dateEtat",
"estDeliveryDate": "dateLivraison",
"status": "status",
"statusDetails": "libelleLongEtat",
"trackingCode": "noSuivi",
"publicUrl": "urlSuiviDestinataire",
"proofUrl": "urlImageEnlevementLivraison",
}
def schema(self):
return {
"parcels": self.parcels(),
"service": self.service(),
"from_address": self.from_address(),
"to_address": self.to_address(),
"tracking": self.tracking(),
}
|
akretion/roulier
|
roulier/carriers/geodis/geodis_api_rest_ws.py
|
Python
|
agpl-3.0
| 6,450
|
[
"VisIt"
] |
ab1b3ccf32be9fb0efd2faa36d79d9f40efc651548f30f146da1496199f534a8
|
"""Function-like object creating triclinic lattices.
The following lattice creator is defined:
Triclinic
"""
from ase.lattice.bravais import Bravais
import numpy as np
from ase.data import reference_states as _refstate
class TriclinicFactory(Bravais):
"A factory for creating triclinic lattices."
# The name of the crystal structure in ChemicalElements
xtal_name = "triclinic"
# The natural basis vectors of the crystal structure
int_basis = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
basis_factor = 1.0
# Converts the natural basis back to the crystallographic basis
inverse_basis = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
inverse_basis_factor = 1.0
def get_lattice_constant(self):
"Get the lattice constant of an element with triclinic crystal structure."
if _refstate[self.atomicnumber]['symmetry'] != self.xtal_name:
raise ValueError(('Cannot guess the %s lattice constant of'
+ ' an element with crystal structure %s.')
% (self.xtal_name,
_refstate[self.atomicnumber]['symmetry']))
return _refstate[self.atomicnumber].copy()
def make_crystal_basis(self):
"Make the basis matrix for the crystal unit cell and the system unit cell."
lattice = self.latticeconstant
if type(lattice) == type({}):
a = lattice['a']
try:
b = lattice['b']
except KeyError:
b = a * lattice['b/a']
try:
c = lattice['c']
except KeyError:
c = a * lattice['c/a']
alpha = lattice['alpha']
beta = lattice['beta']
gamma = lattice['gamma']
else:
if len(lattice) == 6:
(a,b,c,alpha,beta,gamma) = lattice
else:
raise ValueError, "Improper lattice constants for triclinic crystal."
degree = np.pi / 180.0
cosa = np.cos(alpha*degree)
cosb = np.cos(beta*degree)
sinb = np.sin(beta*degree)
cosg = np.cos(gamma*degree)
sing = np.sin(gamma*degree)
lattice = np.array([[a,0,0],
[b*cosg, b*sing,0],
[c*cosb, c*(cosa-cosb*cosg)/sing,
c*np.sqrt(sinb**2 - ((cosa-cosb*cosg)/sing)**2)]])
self.latticeconstant = lattice
self.miller_basis = lattice
self.crystal_basis = (self.basis_factor *
np.dot(self.int_basis, lattice))
self.basis = np.dot(self.directions, self.crystal_basis)
assert abs(np.dot(lattice[0],lattice[1]) - a*b*cosg) < 1e-5
assert abs(np.dot(lattice[0],lattice[2]) - a*c*cosb) < 1e-5
assert abs(np.dot(lattice[1],lattice[2]) - b*c*cosa) < 1e-5
assert abs(np.dot(lattice[0],lattice[0]) - a*a) < 1e-5
assert abs(np.dot(lattice[1],lattice[1]) - b*b) < 1e-5
assert abs(np.dot(lattice[2],lattice[2]) - c*c) < 1e-5
Triclinic = TriclinicFactory()
|
grhawk/ASE
|
tools/ase/lattice/triclinic.py
|
Python
|
gpl-2.0
| 3,214
|
[
"ASE",
"CRYSTAL"
] |
4aad796ed744ed33d84da1b751cdef5bb4b31f62103d098dd658776f59c15cce
|
#!/usr/bin/env python3
from mayavi import mlab
import numpy as np
import os
def test_scalar_field():
# arbitrary grid
x, y, z = np.mgrid[-10:10:20j, -10:10:20j, -10:10:20j] # type: ignore
# fake data
s = np.sin(x * y * z) / (x * y * z)
scf = mlab.pipeline.scalar_field(x, y, z, s)
vol = mlab.pipeline.volume(scf)
mlab.colorbar(vol)
if __name__ == "__main__":
test_scalar_field()
if not bool(os.environ.get("CI", False)):
mlab.show()
|
scienceopen/mayavi-examples-python
|
test_scalar_field.py
|
Python
|
mit
| 486
|
[
"Mayavi"
] |
4b79640c9cf4920c6ca42c90d3f5a77c6cdf06c446433f5ccafa1f1c06ab27e9
|
import collections
import networkx
import numpy
import os
import pandas
import pysam
import re
import scipy.stats
from grocsvs import step
from grocsvs import structuralvariants
from grocsvs import utilities
from grocsvs.stages import assembly
from grocsvs.stages import cluster_svs
from grocsvs.stages import call_readclouds
BAM_CMATCH = 0
BAM_CINS = 1
BAM_CDEL = 2
BAM_CREF_SKIP = 3
BAM_CSOFT_CLIP = 4
BAM_CHARD_CLIP = 5
class WalkAssembliesStep(step.StepChunk):
@staticmethod
def get_steps(options):
yield WalkAssembliesStep(options)
def __init__(self, options):
self.options = options
def __str__(self):
return ".".join([self.__class__.__name__])
def outpaths(self, final):
directory = self.results_dir if final \
else self.working_dir
walk_assemblies = "walk_assemblies.tsv"
graphs = "walk_assemblies.graphs"
paths = {
"walk_assemblies": os.path.join(directory, walk_assemblies),
"graphs": os.path.join(directory, graphs)
}
return paths
def run(self):
edges_path = cluster_svs.ClusterSVsStep(self.options).outpaths(final=True)["edges"]
clusters = pandas.read_table(edges_path)
clusters["chromx"] = clusters["chromx"].astype("string")
clusters["chromy"] = clusters["chromy"].astype("string")
assembled = []
utilities.ensure_dir(self.outpaths(final=False)["graphs"])
for cluster_number, cluster in clusters.groupby("cluster"):
self.logger.log(cluster_number)
try:
cur_assembled = self.walk(cluster_number, cluster)
assembled.append(cur_assembled)
except IOError:
print "not found", cluster_number
# TODO: deal with empty list
# TODO: normalize coordinates according to reference.compare_chroms()
assembled = pandas.concat(assembled, ignore_index=True)
assembled["x"] = assembled["x"].astype(int)
assembled["y"] = assembled["y"].astype(int)
print self.options.reference.chroms
print assembled["chromx"].unique()
print assembled["chromy"].unique()
outpath = self.outpaths(final=False)["walk_assemblies"]
assembled.to_csv(outpath, sep="\t", index=False)
def walk(self, event_name, cluster):
assembly_directory = assembly.AssemblyStep(self.options, event_name)\
.outpaths(final=True)["assembly_dir"]
bam, contigs = self.get_contigs(assembly_directory)
cluster = cluster.loc[cluster["kind"]=="breakpoint"]
cluster = cluster.sort_values("p")
chains = set()
for i, event in cluster.iterrows():
if event["x"] < 0 or event["y"] < 0:
continue
# print "starting from:\n", event
chain = get_chain(bam, event["chromx"], event["x"], contigs)
# print "chain:", chain
chains.update(chain)
# print "starting from:\n", event
chain = get_chain(bam, event["chromy"], event["y"], contigs)
# print "chain:", chain
chains.update(chain)
# self.analyze_chains(chains, event_name)
cur_edges = pandas.DataFrame(
list(chains), columns=["chromx", "x", "orientationx", "chromy", "y", "orientationy", "contig"])
cur_edges = cur_edges.loc[(cur_edges["chromx"].isin(self.options.reference.chroms)) &
(cur_edges["chromy"].isin(self.options.reference.chroms))]
return cur_edges
def _barcodes_for_breakpoint(self, chromx, x, orientationx,
chromy, y, orientationy, dist1, dist2):
# TODO: refactor to re-use same version as cluster_svs
fragsx, fragsy, merged = structuralvariants.get_supporting_fragments_new(
self.options, self.sample, self.dataset,
chromx, x, chromy, y,
orientationx+orientationy, dist1, dist2)
bcx = set(fragsx["bc"])
bcy = set(fragsy["bc"])
common_barcodes = bcx.intersection(bcy)
if len(common_barcodes) < 1:
return None
return common_barcodes
def _compare_breakpoint_pair_pairs(self, breakpoint1, breakpoint2, good_bc_count, dist1, dist2):
chrom1x, pos1x, orientation1x, chrom1y, pos1y, orientation1y, _ = breakpoint1
chrom2x, pos2x, orientation2x, chrom2y, pos2y, orientation2y, _ = breakpoint2
# TODO: refactor to re-use same version as cluster_svs
barcodes1 = self._barcodes_for_breakpoint(
chrom1x, pos1x, orientation1x, chrom1y, pos1y, orientation1y, dist1, dist2)
barcodes2 = self._barcodes_for_breakpoint(
chrom2x, pos2x, orientation2x, chrom2y, pos2y, orientation2y, dist1, dist2)
if barcodes1 is None or barcodes2 is None:
return None
total_barcodes = barcodes1.union(barcodes2)
common_barcodes = barcodes1.intersection(barcodes2)
contingency_table = numpy.array([[len(common_barcodes), len(barcodes1-barcodes2)],
[len(barcodes2-barcodes1), good_bc_count-len(total_barcodes)]])
p = scipy.stats.fisher_exact(contingency_table, alternative="greater")[1]
return len(common_barcodes), len(total_barcodes), p
def get_contigs(self, assembly_directory):
bam_path = os.path.join(assembly_directory, "contigs.sorted.bam")
names_to_reads = collections.defaultdict(list)
self.logger.log(bam_path)
bam = pysam.AlignmentFile(bam_path)
for read in bam.fetch():
if read.reference_length > 45:
names_to_reads[read.query_name].append(read)
return bam, names_to_reads
# def analyze_chains(self, chains, event_name):
# dist1 = -500
# dist2 = 5000
# good_bc_count = utilities.get_good_bc_count(self)
# full_graph = networkx.Graph()
# barcode_supported_graph = networkx.Graph()
# for chain in chains:
# for breakpoint in chain:
# chromx, x, orientationx, chromy, y, orientationy, _ = breakpoint
# nodex = get_node_label(chromx, x, orientationx)
# nodey = get_node_label(chromy, y, orientationy)
# if not full_graph.has_edge(nodex, nodey):
# quant = quantify_breakpoint(
# chromx, x,
# chromy, y,
# orientationx+orientationy,
# self.options, self.sample, self.dataset,
# good_bc_count, dist1, dist2)
# if quant is None: continue
# cur_common_counts, cur_total_counts, p = quant
# print breakpoint, cur_common_counts, cur_total_counts, p
# ratio = cur_common_counts/float(cur_total_counts)
# label = "{}/{}={:.2g};{:.2g}".format(
# int(cur_common_counts),
# int(cur_total_counts),
# ratio,
# p)
# if ratio > 0.08 and p < 1e-4 and cur_total_counts > 10:
# barcode_supported_graph.add_edge(nodex, nodey, label=label)
# full_graph.add_edge(nodex, nodey, label=label)
# for j, breakpoint1 in enumerate(chain[:-1]):
# breakpoint2 = chain[j+1]
# quant = self._compare_breakpoint_pair_pairs(
# breakpoint1, breakpoint2, good_bc_count, dist1, dist2)
# if quant is None: continue
# common_counts, total_counts, p = quant
# ratio = common_counts/float(total_counts)
# node1y = get_node_label(*breakpoint1[3:6])
# node2x = get_node_label(*breakpoint2[:3])
# label = "[{}/{}={:.2g};{:.2g}]".format(
# int(common_counts),
# int(total_counts),
# ratio,
# p)
# if ratio > 0.08 and p < 1e-4 and total_counts > 10:
# barcode_supported_graph.add_edge(node1y, node2x, label=label, style="dashed")
# full_graph.add_edge(node1y, node2x, label=label, style="dashed")
# print ":: ALL:", full_graph.edges(data=True)
# print ":: SUPPORTED:", barcode_supported_graph.edges(data=True)
# outdir = self.outpaths(final=False)["graphs"]
# barcode_supported_dot = networkx.nx_agraph.to_agraph(barcode_supported_graph)
# barcode_supported_dot.draw("{}/barcode_supported.{}.pdf".format(outdir, event_name), prog="dot")
# full_dot = networkx.nx_agraph.to_agraph(full_graph)
# full_dot.draw("{}/full_graph.{}.pdf".format(outdir, event_name), prog="dot")
# def breakend_from_label(node):
# if node.startswith("]"):
# return (node[1:].split(":")[0], int(node.split(":")[1].replace(",","")), "+")
# elif node.endswith("["):
# return (node.split(":")[0], int(node[:-1].split(":")[1].replace(",","")), "-")
# from rpy2.robjects import r
# r.pdf("{}/raw{}.pdf".format(outdir, event_name))
# for component in networkx.connected_components(barcode_supported_graph):
# subgraph = barcode_supported_graph.subgraph(component)
# ends = [node for node,degree in subgraph.degree_iter() if degree==1]
# breakends = [node for node in list(networkx.shortest_simple_paths(subgraph, ends[0], ends[1]))[0]]
# breakends = [breakend_from_label(node) for node in breakends]
# breakends = breakends[:-1:2] + breakends[-1:]
# # print ")"*100, breakends
# plot_frags(breakends, self.options, self.sample, self.dataset)
# r["dev.off"]()
def sort_by_ref_pos(q1, r1, q2, r2, l=None):
if r1 < r2:
return q1, r1, q2, r2
else:
return q2, r2, q1, r1
def get_query_positions(read):
cigar = read.cigar
qstart = 0
rstart = read.pos
if cigar[0][0] in [BAM_CSOFT_CLIP, BAM_CHARD_CLIP]:
qstart += cigar[0][1]
qend = 0
rend = read.pos
for op, length in cigar:
if op in [BAM_CSOFT_CLIP, BAM_CHARD_CLIP, BAM_CINS]:
qend += length
elif op == BAM_CMATCH:
qend += length
rend += length
elif op in [BAM_CDEL, BAM_CREF_SKIP]:
rend += length
total_length = qend
if cigar[-1][0] in [BAM_CSOFT_CLIP, BAM_CHARD_CLIP]:
qend -= cigar[-1][1]
if read.is_reverse:
qstart, rstart, qend, rend = total_length-qend, rend, total_length-qstart, rstart
return qstart, rstart, qend, rend, total_length
def is_clipped(read, min_clip_length=40):
q1, r1, q2, r2, l = get_query_positions(read)
x = ""
# print read.query_name, q1, q2, l
if q1 > min_clip_length:
if r2 > r1:
x += ">"
else:
x += "<"
if q2 < l-min_clip_length:
if r2 > r1:
x += "<"
else:
x += ">"
return x, min(r1,r2), max(r1,r2)
def covered_positions(start, end, reads):
length = end-start
positions = [False]*length
for read in reads:
for pos in read.get_reference_positions():
pos = pos-start
if 0 <= pos < length:
positions[pos] = True
return positions
def walk_local(bam, chrom, pos, direction, offset=4000, window_size=10000):
reads = set()
if direction == "-":
offset = -offset
window_size = -window_size
cur_start = pos
cur_end = pos + window_size
# cur_start, cur_end = sorted([cur_start, cur_end])
while True:
if min(cur_start, cur_end) < abs(offset)*2:
return []
cur_reads = list(bam.fetch(chrom, min(cur_start, cur_end), max(cur_start, cur_end)))
if len(cur_reads) == 0 or max(read.reference_length for read in cur_reads) < 40:
break
reads.update(cur_reads)
cur_start += offset
cur_end += offset
if len(reads) == 0:
return []
reads = sorted(reads, key=lambda x: x.pos)
if direction == "+":
start_index = 0
end_index = len(reads)-1
increment = 1
elif direction == "-":
start_index = len(reads)-1
end_index = 0
increment = -1
positions = set()
for read in reads:
positions.update(read.get_reference_positions())
positions = numpy.array(sorted(positions))
# print len(numpy.diff(positions))
# print numpy.where(numpy.diff(positions)>abs(offset))[0]
# print numpy.split(positions, numpy.where(numpy.diff(positions)>abs(offset))[0]+1)
split = numpy.split(positions, numpy.where(numpy.diff(positions)>abs(offset))[0]+1)
# for s in split:
# print " ", direction, s[0], s[-1]
if direction == "+":
positions = split[0]
else:
positions = split[-1]
# print "POSITIONS:", positions[0], positions[-1]
filtered_reads = [read for read in reads if (read.reference_end>=positions[0] and
read.reference_start<=positions[-1])]
if direction == "-":
filtered_reads = sorted(filtered_reads, key=lambda x: x.reference_start)
elif direction == "+":
filtered_reads = sorted(filtered_reads, key=lambda x: x.reference_end)
return filtered_reads
def are_matched(read1, orientation1, read2, orientation2):
if reads_overlap(read1, read2):
# TODO: should figure out if we can improve the assemblies
# so that we can detect small events
return False, None, None
qstart1, rstart1, qend1, rend1 = sort_by_ref_pos(*get_query_positions(read1))
qstart2, rstart2, qend2, rend2 = sort_by_ref_pos(*get_query_positions(read2))
# print ":: {} {:,} {} {:,}|| {} {:,} {} {:,}".format(qstart1, rstart1, qend1, rend1, qstart2, rstart2, qend2, rend2)
switched = False
if (qstart1+qend1) > (qstart2+qend2):
# print "switch"
switched = True
qstart1, rstart1, qend1, rend1, qstart2, rstart2, qend2, rend2 = qstart2, rstart2, qend2, rend2, qstart1, rstart1, qend1, rend1
orientation1, orientation2 = orientation2, orientation1
if orientation1 == "+":
qadj1 = qend1
radj1 = rend1
elif orientation1 == "-":
qadj1 = qstart1
radj1 = rstart1
if orientation2 == "+":
qadj2 = qend2
radj2 = rend2
elif orientation2 == "-":
qadj2 = qstart2
radj2 = rstart2
# print qadj1, qadj2
if switched:
radj1, radj2 = radj2, radj1
if abs(qadj1 - qadj2) < 20:
return True, radj1, radj2
return False, None, None
def reads_overlap(read1, read2):
if read1.reference_id != read2.reference_id:
return False
if read1.reference_end < read2.reference_start:
return False
if read2.reference_end < read1.reference_start:
return False
return True
def _get_chain(bam, chrom, pos, direction, reads_by_name, previous=None):
# direction is "-" if we're walking from the right to the left, ie looking
# for a "-" orientation breakpoint; and "+" if we're walking from left to right
# print "\n** _get_chain", chrom, pos, direction
reads = walk_local(bam, chrom, pos, direction)
if len(reads) == 0:
return []
if direction == "+":
read = reads[-1]
clipping = is_clipped(read)
if "<" in clipping[0]:
end = clipping[2]
else:
return []
elif direction == "-":
read = reads[0]
clipping = is_clipped(read)
if ">" in clipping[0]:
end = clipping[1]
else:
return []
# print "last read:", read.reference_start, read.reference_end
for other_read in reads_by_name[read.query_name]:
if other_read == read: continue
is_match = False
for other_orientation in "+-":
match = are_matched(read, direction, other_read, other_orientation)
if match[0]:
is_match = True
other_end = match[2]
break
if is_match:
next_chrom = other_read.reference_name
next_start = (other_read.pos+other_read.reference_end)/2
next_orientation = {"+":"-", "-":"+"}[other_orientation]
# if reads_overlap(read, other_read):
# return []
bad_chrom = re.search(r"gl|hap|un|_", next_chrom, re.IGNORECASE)
# print ">>>>>>>>>>>", chrom, end, direction, next_chrom, next_start, next_orientation
if bad_chrom is not None:
return []
if previous is None:
previous = set()
if (next_chrom, next_start, next_orientation) in previous:
return []
previous.add((next_chrom, next_start, next_orientation))
next_chain = _get_chain(bam, next_chrom, next_start, next_orientation,
reads_by_name, previous)
chain = [(chrom, end, direction, next_chrom, other_end, other_orientation, read.query_name)]
if next_chain is not None:
chain.extend(next_chain)
return chain
return []
def get_chain(bam, chrom, pos, reads_by_name):
chain = []
# print "orientation:", "-"*30
left_chain = _get_chain(bam, chrom, pos+5000, "-", reads_by_name)
for link in left_chain[::-1]:
chain.append(tuple(list(link[3:6])+list(link[:3])+[link[6]]))
# print "orientation:", "+"*30
right_chain = _get_chain(bam, chrom, max(0, pos-5000), "+", reads_by_name)
if right_chain is not None:
chain.extend(right_chain)
return chain
# def visualize_chain(chain):
# graph = networkx.Graph()
# prev = None
# for link in chain:
# chromx, posx, orientationx = link[:3]
# chromy, posy, orientationy = link[3:6]
# graph.add_edge(
# get_node_label(chromx, posx, orientationx),
# get_node_label(chromy, posy, orientationy)
# )
def get_node_label(chrom, position, orientation):
if orientation == "+":
return "]{}:{:,}".format(chrom, int(position))
else:
return "{}:{:,}[".format(chrom, int(position))
|
grocsvs/grocsvs
|
src/grocsvs/stages/walk_assemblies.py
|
Python
|
mit
| 18,814
|
[
"pysam"
] |
f816ebc208d6119537ace914fec0a691c963fe56a428aba5100039f815299309
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.apis.drugs_api import DrugsApi
class TestDrugsApi(unittest.TestCase):
""" DrugsApi unit test stubs """
def setUp(self):
self.api = vericred_client.apis.drugs_api.DrugsApi()
def tearDown(self):
pass
def test_list_drugs(self):
"""
Test case for list_drugs
Drug Search
"""
pass
if __name__ == '__main__':
unittest.main()
|
vericred/vericred-python
|
test/test_drugs_api.py
|
Python
|
apache-2.0
| 10,007
|
[
"VisIt"
] |
bba89d86ffbfbd954e0c7598893f57f374592aeed2267ea331e1b5ba24bf6f6f
|
# Orca
# Copyright (C) 2016 UrbanSim Inc.
# See full license in LICENSE.
import pandas as pd
import pytest
from .. import testing
def test_frames_equal_not_frames():
frame = pd.DataFrame({'a': [1]})
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(frame, 1)
assert 'Inputs must both be pandas DataFrames.' in str(info.value)
def test_frames_equal_mismatched_columns():
expected = pd.DataFrame({'a': [1]})
actual = pd.DataFrame({'b': [2]})
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(actual, expected)
assert "Expected column 'a' not found." in str(info.value)
def test_frames_equal_mismatched_rows():
expected = pd.DataFrame({'a': [1]}, index=[0])
actual = pd.DataFrame({'a': [1]}, index=[1])
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(actual, expected)
assert "Expected row 0 not found." in str(info.value)
def test_frames_equal_mismatched_items():
expected = pd.DataFrame({'a': [1]})
actual = pd.DataFrame({'a': [2]})
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(actual, expected)
assert ("""
Items are not equal:
ACTUAL: 2
DESIRED: 1
Column: 'a'
Row: 0""" in str(info.value))
def test_frames_equal():
frame = pd.DataFrame({'a': [1]})
testing.assert_frames_equal(frame, frame)
def test_frames_equal_close():
frame1 = pd.DataFrame({'a': [1]})
frame2 = pd.DataFrame({'a': [1.00000000000002]})
with pytest.raises(AssertionError):
testing.assert_frames_equal(frame1, frame2)
testing.assert_frames_equal(frame1, frame2, use_close=True)
def test_index_equal_order_agnostic():
left = pd.Index([1, 2, 3])
right = pd.Index([3, 2, 1])
testing.assert_index_equal(left, right)
def test_index_equal_order_agnostic_raises_left():
left = pd.Index([1, 2, 3, 4])
right = pd.Index([3, 2, 1])
with pytest.raises(AssertionError):
testing.assert_index_equal(left, right)
def test_index_equal_order_agnostic_raises_right():
left = pd.Index([1, 2, 3])
right = pd.Index([3, 2, 1, 4])
with pytest.raises(AssertionError):
testing.assert_index_equal(left, right)
|
jiffyclub/orca
|
orca/utils/tests/test_testing.py
|
Python
|
bsd-3-clause
| 2,253
|
[
"ORCA"
] |
ea727cfb6071a60d4f6b7983c0ca47c7cca2b4fc844bfd734caf883e6a0bdb70
|
#!/usr/bin/env python
# encoding: utf-8
"""Pyvona : an IVONA python library
Author: Zachary Bears
Contact Email: bears.zachary@gmail.com
"""
import datetime
import hashlib
import hmac
import json
import tempfile
import contextlib
import os
from ..audio import sounds
class PyvonaException(Exception):
pass
try:
import requests
requests.packages.urllib3.disable_warnings()
except ImportError:
msg = 'The requests library is essential for Pyvona operation. '
msg += 'Without it, Pyvona will not function correctly.'
raise PyvonaException(msg)
_amazon_date_format = '%Y%m%dT%H%M%SZ'
_date_format = '%Y%m%d'
def create_voice():
"""Creates and returns a voice object to interact with
"""
return Voice()
class Voice(object):
"""An object that contains all the required methods for interacting
with the IVONA text-to-speech system
"""
voice_name = 'Agata'
language = 'pl-PL'
gender = None
speech_rate = None
sentence_break = None
paragraph_break = None
_codec = "ogg"
region_options = {
'us-east': 'us-east-1',
'us-west': 'us-west-2',
'eu-west': 'eu-west-1',
}
access_key = None
secret_key = None
algorithm = 'AWS4-HMAC-SHA256'
signed_headers = 'content-type;host;x-amz-content-sha256;x-amz-date'
_region = None
_host = None
_session = None
@property
def region(self):
return self._region
@region.setter
def region(self, region_name):
self._region = self.region_options.get(region_name, 'eu-west-1')
self._host = 'tts.{}.ivonacloud.com'.format(self._region)
@property
def codec(self):
return self._codec
@codec.setter
def codec(self, codec):
if codec not in ["mp3", "ogg"]:
raise PyvonaException(
"Invalid codec specified. Please choose 'mp3' or 'ogg'")
self._codec = codec
@contextlib.contextmanager
def use_ogg_codec(self):
current_codec = self.codec
self.codec = "ogg"
try:
yield
finally:
self.codec = current_codec
def fetch_voice_ogg(self, text_to_speak, filename):
"""Fetch an ogg file for given text and save it to the given file name
"""
with self.use_ogg_codec():
self.fetch_voice(text_to_speak, filename)
def fetch_voice(self, text_to_speak, filename):
"""Fetch a voice file for given text and save it to the given file name
"""
file_extension = ".{codec}".format(codec=self.codec)
filename += file_extension if not filename.endswith(
file_extension) else ""
with open(filename, 'wb') as f:
self.fetch_voice_fp(text_to_speak, f)
def fetch_voice_fp(self, text_to_speak, fp):
"""Fetch a voice file for given text and save it to the given file pointer
"""
r = self._send_amazon_auth_packet_v4(
'POST', 'tts', 'application/json', '/CreateSpeech', '',
self._generate_payload(text_to_speak), self._region, self._host)
if r.content.startswith(b'{'):
raise PyvonaException('Error fetching voice: {}'.format(r.content))
else:
fp.write(r.content)
def speak(self, text_to_speak, use_cache=True, async=False):
"""Speak a given text
"""
try:
text = u'' + text_to_speak
text_to_speak = text.encode('utf-8')
except Exception as e:
print('Pyvona speak exception ' + str(e))
if use_cache is False:
with tempfile.NamedTemporaryFile(delete=False) as f:
with self.use_ogg_codec():
self.fetch_voice_fp(text_to_speak, f)
f.seek(0)
else:
cache_f = hashlib.md5(text_to_speak).hexdigest() + '.ogg'
if not os.path.isdir(self.speech_cache_dir):
os.makedirs(self.speech_cache_dir)
# remove the empty file
if os.path.isfile(self.speech_cache_dir + cache_f):
if not os.path.getsize(self.speech_cache_dir + cache_f) > 0:
os.remove(self.speech_cache_dir + cache_f)
if not os.path.isfile(self.speech_cache_dir + cache_f):
with self.use_ogg_codec():
self.fetch_voice(
text_to_speak, self.speech_cache_dir + cache_f)
f = self.speech_cache_dir + cache_f
sounds.play_file(f, async)
def list_voices(self):
"""Returns all the possible voices
"""
r = self._send_amazon_auth_packet_v4(
'POST', 'tts', 'application/json', '/ListVoices', '', '',
self._region, self._host)
return r.json()
def _generate_payload(self, text_to_speak):
return json.dumps({
'Input': {
"Type": "application/ssml+xml",
'Data': text_to_speak
},
'OutputFormat': {
'Codec': self.codec.upper()
},
'Parameters': {
'Rate': self.speech_rate,
'SentenceBreak': self.sentence_break,
'ParagraphBreak': self.paragraph_break
},
'Voice': {
'Name': self.voice_name,
'Language': self.language,
'Gender': self.gender
}
})
def _send_amazon_auth_packet_v4(self, method, service, content_type,
canonical_uri, canonical_querystring,
request_parameters, region, host):
"""Send a packet to a given amazon server using Amazon's signature Version 4,
Returns the resulting response object
"""
# Create date for headers and the credential string
t = datetime.datetime.utcnow()
amazon_date = t.strftime(_amazon_date_format)
date_stamp = t.strftime(_date_format)
# Step 1: Create canonical request
payload_hash = self._sha_hash(request_parameters)
canonical_headers = 'content-type:{}\n'.format(content_type)
canonical_headers += 'host:{}\n'.format(host)
canonical_headers += 'x-amz-content-sha256:{}\n'.format(payload_hash)
canonical_headers += 'x-amz-date:{}\n'.format(amazon_date)
canonical_request = '\n'.join([
method, canonical_uri, canonical_querystring, canonical_headers,
self.signed_headers, payload_hash])
# Step 2: Create the string to sign
credential_scope = '{}/{}/{}/aws4_request'.format(
date_stamp, region, service)
string_to_sign = '\n'.join([
self.algorithm, amazon_date, credential_scope,
self._sha_hash(canonical_request)])
# Step 3: Calculate the signature
signing_key = self._get_signature_key(
self.secret_key, date_stamp, region, service)
signature = hmac.new(
signing_key, string_to_sign.encode('utf-8'),
hashlib.sha256).hexdigest()
# Step 4: Create the signed packet
endpoint = 'https://{}{}'.format(host, canonical_uri)
authorization_header = '{} Credential={}/{}, ' +\
'SignedHeaders={}, Signature={}'
authorization_header = authorization_header.format(
self.algorithm, self.access_key, credential_scope,
self.signed_headers, signature)
headers = {
'Host': host,
'Content-type': content_type,
'X-Amz-Date': amazon_date,
'Authorization': authorization_header,
'x-amz-content-sha256': payload_hash,
'Content-Length': str(len(request_parameters))
}
# Send the packet and return the response
# Use requests.Session() for HTTP keep-alive
if self._session is None:
self._session = requests.Session()
return self._session.post(
endpoint, data=request_parameters, headers=headers)
def _sha_hash(self, to_hash):
return hashlib.sha256(to_hash.encode('utf-8')).hexdigest()
def _sign(self, key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
def _get_signature_key(self, key, date_stamp, region_name, service_name):
k_date = self._sign(('AWS4{}'.format(key)).encode('utf-8'), date_stamp)
k_region = self._sign(k_date, region_name)
k_service = self._sign(k_region, service_name)
k_signing = self._sign(k_service, 'aws4_request')
return k_signing
def __init__(self):
"""Set initial voice object parameters
"""
from mopidy_rstation.config.settings import Config
config = Config.get_config()
print(str(config))
self.access_key = config['ivona_access_key']
self.secret_key = config['ivona_secret_key']
self.language = config['language']
self.voice_name = config['voice_name']
self.region = 'eu-west'
print('----------------------------------------')
print('the language is: ' + self.language + ' ----')
print('the voice is: ' + self.voice_name + ' ----')
print('----------------------------------------')
self.speech_rate = 'medium'
self.sentence_break = 400
self.paragraph_break = 650
# speech_cache_dir = os.getcwd() + '/speech_cache/'
self.speech_cache_dir = '/home/pi/mopidy-rstation/speech_cache/' + \
self.language + '/' + self.voice_name + '/'
# [Nicole, Enrique, Agnieszka, Tatyana, Russell,
# Lotte, Geraint, Carmen, Mads, Penelope, Jennifer,
# Brian, Eric, Ruben, Ricardo, Maxim, Giorgio, Carla,
# Naja, Maja, Astrid, Ivy, Kimberly, Chantal, Amy, Marlene,
# Ewa, Conchita, Karl, Miguel, Mathieu, Justin, Chipmunk,
# Jacek, Ines, Gwyneth, Cristiano, Celine, Jan, Liv,
# Joey, Raveena, Filiz, Dora, Salli, Vitoria, Emma, Hans, Kendra]
|
araczkowski/mopidy-rstation
|
mopidy_rstation/audio/pyvona.py
|
Python
|
apache-2.0
| 10,038
|
[
"Brian"
] |
876328e3b0e4cea745f89b774d46440f17ef77667abd7cccb78708218d48c459
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyVmdPython(PythonPackage):
"""Installable VMD as a python module"""
homepage = "https://github.com/Eigenstate"
url = "https://github.com/Eigenstate/vmd-python/archive/v2.0.10.tar.gz"
version('2.0.10', '8c746d961497a676053b66e3dd692794')
depends_on('python@2.7:2.8', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-setuptools', type='run')
depends_on('tcl')
depends_on('netcdf')
depends_on('expat')
|
mfherbst/spack
|
var/spack/repos/builtin/packages/py-vmd-python/package.py
|
Python
|
lgpl-2.1
| 1,746
|
[
"NetCDF",
"VMD"
] |
d3b22118d594f95fe8f24ec1f1faf5c4e5c5b1022ff832954643205cddb9eed7
|
"""
/******************************************************************************
This source file is part of the Avogadro project.
Copyright 2013 Kitware, Inc.
This source code is released under the New BSD License, (the "License").
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/
"""
import argparse
import json
import sys
def getMetaData():
metaData = {}
metaData['inputFormat'] = 'xyz'
metaData['outputFormat'] = 'xyz'
metaData['operations'] = ['read', 'write']
metaData['identifier'] = 'ZYX Example Format'
metaData['name'] = 'ZYX Example Format'
metaData['description'] = "Mostly useless file format that reads xyz-style " +\
"files with reversed coordinates. Demonstrates " +\
"the implementation of a user-scripted file format."
metaData['fileExtensions'] = ['zyx']
metaData['mimeTypes'] = ['chemical/x-zyx']
return metaData
def write():
result = ""
# Just copy the first two lines: numAtoms and comment/title
result += sys.stdin.readline()
result += sys.stdin.readline()
for line in sys.stdin:
words = line.split()
result += '%-3s %9.5f %9.5f %9.5f'%\
(words[0], float(words[3]), float(words[2]), float(words[1]))
if len(words) > 4:
result += words[4:].join(' ')
result += '\n'
return result
def read():
result = ""
# Just copy the first two lines: numAtoms and comment/title
result += sys.stdin.readline()
result += sys.stdin.readline()
for line in sys.stdin:
words = line.split()
result += '%-3s %9.5f %9.5f %9.5f'%\
(words[0], float(words[3]), float(words[2]), float(words[1]))
if len(words) > 4:
result += words[4:].join(' ')
result += '\n'
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser('Testing file format script.')
parser.add_argument('--metadata', action='store_true')
parser.add_argument('--read', action='store_true')
parser.add_argument('--write', action='store_true')
args = vars(parser.parse_args())
if args['metadata']:
print(json.dumps(getMetaData()))
elif args['read']:
print(read())
elif args['write']:
print(write())
|
cjh1/mongochemweb-avogadrolibs
|
avogadro/qtplugins/scriptfileformats/formatScripts/zyx.py
|
Python
|
bsd-3-clause
| 2,553
|
[
"Avogadro"
] |
2104955494c58814af3acb14add2ed67b9f65eccda767cfa5f0121c8a44f21a3
|
"""
This module contains the core code needed for post selection
inference based on affine selection procedures as
described in the papers `Kac Rice`_, `Spacings`_, `covTest`_
and `post selection LASSO`_.
.. _covTest: http://arxiv.org/abs/1301.7161
.. _Kac Rice: http://arxiv.org/abs/1308.3020
.. _Spacings: http://arxiv.org/abs/1401.3889
.. _post selection LASSO: http://arxiv.org/abs/1311.6238
.. _sample carving: http://arxiv.org/abs/1410.2597
"""
from warnings import warn
from copy import copy
import numpy as np
from ..truncated.T import truncated_T
from ..distributions.discrete_family import discrete_family
from mpmath import mp
import pyinter
WARNINGS = False
class constraints(object):
r"""
This class is the core object for quasiaffine selection procedures.
It is meant to describe sets of the form $C$
where
.. math::
C = \left\{z: Az + u \leq \|Pz\|_2b \right \}
where $u$ is `LHS_offset`, $b$ is `RHS_offset`, $P$
is a projection assumed to satisfy $AP=0$,
and $A$ is `linear_part`, some fixed matrix.
Notes
-----
In this parameterization, the parameter `self.mean` corresponds
to the *reference measure* that is being truncated. It is not the
mean of the truncated Gaussian.
"""
def __init__(self,
linear_part,
LHS_offset,
RHS_offset,
residual_projector,
covariance=None,
mean=None):
r"""
Create a new inequality.
Parameters
----------
linear_part : np.float((q,p))
The linear part, $A$ of the quasi-affine constraint
$C$.
LHS_offset: np.float(q)
The value of $u$ in the quasi-affine constraint
C.
RHS_offset: np.float(q)
The value of $b$ in the quasi-affine constraint
C.
residual_projector: np.float((p,p))
The matrix $P$ above.
C. If `covariance` is not identity, then $\|Pz\|_2$
should be interpreted as a Mahalanobis distance.
covariance : np.float((p,p))
Covariance matrix of Gaussian distribution to be
truncated. Defaults to `np.identity(self.dim)`.
mean : np.float(p)
Mean vector of Gaussian distribution to be
truncated. Defaults to `np.zeros(self.dim)`.
"""
(self.linear_part,
self.LHS_offset,
self.RHS_offset,
self.residual_projector) = (np.asarray(linear_part),
np.asarray(LHS_offset),
np.asarray(RHS_offset),
residual_projector)
if self.linear_part.ndim == 2:
self.dim = self.linear_part.shape[1]
else:
self.dim = self.linear_part.shape[0]
if covariance is None:
covariance = np.identity(self.dim)
else:
raise NotImplementedError('need to take into account nonidentity covariance for residual projector')
self.covariance = covariance
self.RSS_df = np.diag(self.residual_projector).sum()
if mean is None:
mean = np.zeros(self.dim)
self.mean = mean
def _repr_latex_(self):
return """$$Z \sim N(\mu,\Sigma) | AZ + u \leq \|PZ\|_2 b$$"""
def __copy__(self):
r"""
A copy of the constraints.
Also copies _sqrt_cov, _sqrt_inv if attributes are present.
"""
con = constraints(self.linear_part.copy(),
self.LHS.offset.copy(),
self.RHS.offset.copy(),
self.residual_projector.copy(),
mean=copy(self.mean),
covariance=copy(self.covariance))
if hasattr(self, "_sqrt_cov"):
con._sqrt_cov = self._sqrt_cov.copy()
con._sqrt_inv = self._sqrt_inv.copy()
return con
def _value(self, Y):
sqrt_RSS = np.linalg.norm(np.dot(self.residual_projector, Y))
V1 = np.dot(self.linear_part, Y) + self.LHS_offset - self.RHS_offset * sqrt_RSS
return V1
def __call__(self, Y, tol=1.e-3):
sqrt_RSS = np.linalg.norm(np.dot(self.residual_projector, Y))
V1 = np.dot(self.linear_part, Y) + self.LHS_offset - self.RHS_offset * sqrt_RSS
return np.all(V1 < tol * np.fabs(V1).max())
def conditional(self, linear_part, value):
"""
Return an equivalent constraint
after having conditioned on a linear equality.
Let the inequality constraints be specified by
`(A,b)` and the equality constraints be specified
by `(C,d)`. We form equivalent inequality constraints by
considering the residual
Parameters
----------
linear_part : np.float((k,q))
Linear part of equality constraint, `C` above.
value : np.float(k)
Value of equality constraint, `b` above.
.. math::
AZ - E(AZ|CZ=d)
Returns
-------
conditional_con : `constraints`
Quasi-affine constraints having applied equality constraint.
"""
raise NotImplementedError('class is incomplete; calculation should not assume that PZ is independent of CZ')
# A, S = (self.linear_part,
# self.covariance)
# C, d = linear_part, value
# M1 = np.dot(S, C.T)
# M2 = np.dot(C, M1)
# if M2.shape:
# M2i = np.linalg.pinv(M2)
# delta_cov = np.dot(M1, np.dot(M2i, M1.T))
# delta_mean = \
# np.dot(M1,
# np.dot(M2i,
# np.dot(C,
# self.mean) - d))
# else:
# M2i = 1. / M2
# delta_cov = np.multiply.outer(M1, M1) / M2i
# delta_mean = M1 * d / M2i
# return constraints(self.linear_part,
# self.LHS_offset,
# self.RHS_offset,
# self.residual_projector,
# covariance=self.covariance - delta_cov,
# mean=self.mean - delta_mean)
def bounds(self, direction_of_interest, Y):
r"""
For a realization $Y$ of the random variable $N(\mu,\Sigma)$
truncated to $C$ specified by `self.constraints` compute
the slice of the inequality constraints in a
given direction $\eta$.
Parameters
----------
direction_of_interest: np.float
A direction $\eta$ for which we may want to form
selection intervals or a test.
Y : np.float
A realization of $N(\mu,\Sigma)$ where
$\Sigma$ is `self.covariance`.
Returns
-------
intervals : []
Set of truncation intervals for the $T$ statistic.
Tobs : np.float
The observed $T$ statistic.
"""
raise NotImplementedError('class is incomplete')
intervals, Tobs = constraints_unknown_sigma( \
self.linear_part,
self.RHS_offset * np.sqrt(self.RSS_df),
self.LHS_offset,
Y,
direction_of_interest,
self.residual_projector)
return intervals, Tobs
def pivot(self, direction_of_interest, Y,
alternative='greater'):
r"""
For a realization $Y$ of the random variable $N(\mu,\Sigma)$
truncated to $C$ specified by `self.constraints` compute
the slice of the inequality constraints in a
given direction $\eta$ and test whether
$\eta^T\mu$ is greater then 0, less than 0 or equal to 0.
Note
----
Conditions on some direction vector!
Parameters
----------
direction_of_interest: np.float
A direction $\eta$ for which we may want to form
selection intervals or a test.
Y : np.float
A realization of $N(0,\Sigma)$ where
$\Sigma$ is `self.covariance`.
alternative : ['greater', 'less', 'twosided']
What alternative to use.
Returns
-------
P : np.float
$p$-value of corresponding test.
Notes
-----
All of the tests are based on the exact pivot $F$ given
by the truncated T distribution for the
given direction $\eta$. If the alternative is 'greater'
then we return $1-F$; if it is 'less' we return $F$
and if it is 'twosided' we return $2 \min(F,1-F)$.
"""
if alternative not in ['greater', 'less', 'twosided']:
raise ValueError("alternative should be one of ['greater', 'less', 'twosided']")
intervals, Tobs = self.bounds(direction_of_interest, Y)
truncT = truncated_T(np.array([(interval.lower_value,
interval.upper_value) for interval in intervals]), self.RSS_df)
P = float(truncT.sf(Tobs))
if (truncT.intervals.shape == ((1,2)) and np.all(truncT.intervals == [[-np.inf, np.inf]])):
raise ValueError('should be truncated')
if alternative == 'greater':
return P
elif alternative == 'less':
return 1 - P
else:
return 2 * min(P, 1-P)
def whiten(self):
"""
Parameters
----------
Return a whitened version of constraints in a different
basis, and a change of basis matrix.
If `self.covariance` is rank deficient, the change-of
basis matrix will not be square.
"""
raise NotImplementedError('class is only defined for multiple of identity covariance')
# if not hasattr(self, "_sqrt_cov"):
# rank = np.linalg.matrix_rank(self.covariance)
# D, U = np.linalg.eigh(self.covariance)
# D = np.sqrt(D[-rank:])
# U = U[:,-rank:]
# self._sqrt_cov = U * D[None,:]
# self._sqrt_inv = (U / D[None,:]).T
# sqrt_cov = self._sqrt_cov
# sqrt_inv = self._sqrt_inv
# # original matrix is np.dot(U, U.T)
# # NEEDS FIX residual projector should also be whitened!!
# new_A = np.dot(self.linear_part, sqrt_cov)
# new_con = constraints(new_A,
# self.LHS_offset,
# self.RHS_offset,
# self.residual_projector)
# mu = self.mean.copy()
# inverse_map = lambda Z: np.dot(sqrt_cov, Z) + mu[:,None]
# forward_map = lambda W: np.dot(sqrt_inv, W - mu)
# return inverse_map, forward_map, new_con
class orthogonal(constraints):
r"""
This class is the core object for quasiaffine selection procedures.
It is meant to describe sets of the form $C$
where
.. math::
C = \left\{z: Az + u \leq \|Pz\|_2b \right \}
where $u$ is `LHS_offset`, $b$ is `RHS_offset`, $P$
is a projection assumed to satisfy $AP=0$,
and $A$ is `linear_part`, some fixed matrix.
The condition $AP=0$ is why this class is called `orthogonal`.
Notes
-----
In this parameterization, the parameter `self.mean` corresponds
to the *reference measure* that is being truncated. It is not the
mean of the truncated Gaussian.
"""
def __init__(self,
linear_part,
LHS_offset,
RHS_offset,
RSS,
RSS_df,
covariance=None,
mean=None):
r"""
Create a new inequality.
Parameters
----------
linear_part : np.float((q,p))
The linear part, $A$ of the quasi-affine constraint
$C$.
LHS_offset: np.float(q)
The value of $u$ in the quasi-affine constraint
C.
RHS_offset: np.float(q)
The value of $b$ in the quasi-affine constraint
C.
RSS : float
The value of $\|Pz\|_2$ above.
If `covariance` is not identity, then $\|Pz\|_2$
should be interpreted as a Mahalanobis distance
relative to `self.covariance`.
RSS_df : int
Degrees of freedom in $\|Pz\|_2$,
when `covariance` is a multiple of identity, then this
should be trace(P).
covariance : np.float((p,p))
Covariance matrix of Gaussian distribution to be
truncated. Defaults to `np.identity(self.dim)`.
mean : np.float(p)
Mean vector of Gaussian distribution to be
truncated. Defaults to `np.zeros(self.dim)`.
"""
(self.linear_part,
self.LHS_offset,
self.RHS_offset,
self.RSS,
self.RSS_df) = (np.asarray(linear_part),
np.asarray(LHS_offset),
np.asarray(RHS_offset),
RSS,
RSS_df)
if self.linear_part.ndim == 2:
self.dim = self.linear_part.shape[1]
else:
self.dim = self.linear_part.shape[0]
if covariance is None:
covariance = np.identity(self.dim)
self.covariance = covariance
if mean is None:
mean = np.zeros(self.dim)
self.mean = mean
def _repr_latex_(self):
return """$$Z \sim N(\mu,\Sigma) | AZ + u \leq \|PZ\|_2 b$$"""
def __copy__(self):
r"""
A copy of the constraints.
Also copies _sqrt_cov, _sqrt_inv if attributes are present.
"""
con = orthogonal(self.linear_part.copy(),
self.LHS.offset.copy(),
self.RHS.offset.copy(),
copy(self.RSS),
copy(self.RSS_df),
mean=copy(self.mean),
covariance=copy(self.covariance))
if hasattr(self, "_sqrt_cov"):
con._sqrt_cov = self._sqrt_cov.copy()
con._sqrt_inv = self._sqrt_inv.copy()
return con
def __call__(self, Y, tol=1.e-3):
V1 = np.dot(self.linear_part, Y) + self.LHS_offset - self.RHS_offset * np.sqrt(self.RSS)
return np.all(V1 < tol * np.fabs(V1).max())
def conditional(self, linear_part, value):
"""
Return an equivalent constraint
after having conditioned on a linear equality.
Let the inequality constraints be specified by
`(A,b)` and the equality constraints be specified
by `(C,d)`. We form equivalent inequality constraints by
considering the residual
Parameters
----------
linear_part : np.float((k,q))
Linear part of equality constraint, `C` above.
value : np.float(k)
Value of equality constraint, `b` above.
.. math::
AZ - E(AZ|CZ=d)
Returns
-------
conditional_con : `orthogonal`
Quasi-affine constraints having applied equality constraint.
Notes
-----
The calculations here assume that $CZ$ is independent of $PZ$.
"""
A, S = (self.linear_part,
self.covariance)
C, d = linear_part, value
M1 = np.dot(S, C.T)
M2 = np.dot(C, M1)
if M2.shape:
M2i = np.linalg.pinv(M2)
delta_cov = np.dot(M1, np.dot(M2i, M1.T))
delta_mean = \
np.dot(M1,
np.dot(M2i,
np.dot(C,
self.mean) - d))
else:
M2i = 1. / M2
delta_cov = np.multiply.outer(M1, M1) / M2i
delta_mean = M1 * d / M2i
return orthogonal(self.linear_part,
self.LHS_offset,
self.RHS_offset,
self.RSS,
self.RSS_df,
covariance=self.covariance - delta_cov,
mean=self.mean - delta_mean)
def bounds(self, direction_of_interest, Y):
r"""
For a realization $Y$ of the random variable $N(\mu,\Sigma)$
truncated to $C$ specified by `self.constraints` compute
the slice of the inequality constraints in a
given direction $\eta$.
Parameters
----------
direction_of_interest: np.float
A direction $\eta$ for which we may want to form
selection intervals or a test.
Y : np.float
A realization of $N(\mu,\Sigma)$ where
$\Sigma$ is `self.covariance`.
Returns
-------
intervals : []
Set of truncation intervals for the $T$ statistic.
Tobs : np.float
The observed $T$ statistic.
"""
intervals, Tobs = constraints_unknown_sigma( \
self.linear_part,
self.RHS_offset * np.sqrt(self.RSS_df),
self.LHS_offset,
Y,
direction_of_interest,
self.RSS,
self.RSS_df)
return intervals, Tobs
def pivot(self, direction_of_interest, Y,
alternative='greater'):
r"""
For a realization $Y$ of the random variable $N(\mu,\Sigma)$
truncated to $C$ specified by `self.constraints` compute
the slice of the inequality constraints in a
given direction $\eta$ and test whether
$\eta^T\mu$ is greater then 0, less than 0 or equal to 0.
Parameters
----------
direction_of_interest: np.float
A direction $\eta$ for which we may want to form
selection intervals or a test.
Y : np.float
A realization of $N(0,\Sigma)$ where
$\Sigma$ is `self.covariance`.
alternative : ['greater', 'less', 'twosided']
What alternative to use.
Returns
-------
P : np.float
$p$-value of corresponding test.
Notes
-----
All of the tests are based on the exact pivot $F$ given
by the truncated T distribution for the
given direction $\eta$. If the alternative is 'greater'
then we return $1-F$; if it is 'less' we return $F$
and if it is 'twosided' we return $2 \min(F,1-F)$.
"""
if alternative not in ['greater', 'less', 'twosided']:
raise ValueError("alternative should be one of ['greater', 'less', 'twosided']")
intervals, Tobs = self.bounds(direction_of_interest, Y)
truncT = truncated_T(np.array([(interval.lower_value,
interval.upper_value) for interval in intervals]), self.RSS_df)
P = float(truncT.sf(Tobs))
if (truncT.intervals.shape == ((1,2)) and np.all(truncT.intervals == [[-np.inf, np.inf]])):
raise ValueError('should be truncated')
if alternative == 'greater':
return P
elif alternative == 'less':
return 1 - P
else:
return 2 * min(P, 1-P)
def whiten(self):
"""
Parameters
----------
Return a whitened version of constraints in a different
basis, and a change of basis matrix.
If `self.covariance` is rank deficient, the change-of
basis matrix will not be square.
"""
if not hasattr(self, "_sqrt_cov"):
rank = np.linalg.matrix_rank(self.covariance)
D, U = np.linalg.eigh(self.covariance)
D = np.sqrt(D[-rank:])
U = U[:,-rank:]
self._sqrt_cov = U * D[None,:]
self._sqrt_inv = (U / D[None,:]).T
sqrt_cov = self._sqrt_cov
sqrt_inv = self._sqrt_inv
# original matrix is np.dot(U, U.T)
new_linear = np.dot(self.linear_part, sqrt_cov)
new_con = orthogonal(new_linear,
self.LHS_offset + np.dot(self.linear_part, self.mean),
self.RHS_offset,
self.RSS,
self.RSS_df)
mu = self.mean.copy()
inverse_map = lambda Z: np.dot(sqrt_cov, Z) + mu[:,None]
forward_map = lambda W: np.dot(sqrt_inv, W - mu)
return inverse_map, forward_map, new_con
def stack(*cons):
"""
Combine constraints into a large constaint
by intersection.
Parameters
----------
cons : [`selection.affine.constraints`_]
A sequence of constraints.
Returns
-------
intersection : `selection.quasi_affine.constraints`_
Notes
-----
Resulting constraint will have mean 0 and covariance $I$.
If each is of type `constraints`, then quietly assumes that all residual projectors
are the same, so it uses the first residual projector
in the stack. If they are of type `orthogonal` then quietly
assumes that all RSS and RSS_df are the same.
If they are of mixed type, raises an exception.
"""
ineq, ineq_LHS_off, ineq_RHS_off = [], [], []
if np.all([isinstance(con, constraints) for con in cons]):
for con in cons:
ineq.append(con.linear_part)
ineq_LHS_off.append(con.LHS_offset)
ineq_RHS_off.append(con.RHS_offset)
intersection = constraints(np.vstack(ineq),
np.hstack(ineq_LHS_off),
np.hstack(ineq_RHS_off),
cons[0].residual_projector
)
elif np.all([isinstance(con, orthogonal) for con in cons]):
for con in cons:
ineq.append(con.linear_part)
ineq_LHS_off.append(con.LHS_offset)
ineq_RHS_off.append(con.RHS_offset)
intersection = constraints(np.vstack(ineq),
np.hstack(ineq_LHS_off),
np.hstack(ineq_RHS_off),
cons[0].RSS,
cons[0].RSS_df
)
else:
raise ValueError('all constraints must of same type')
return intersection
def sample_from_constraints(con,
Y,
direction_of_interest=None,
how_often=-1,
ndraw=1000,
burnin=1000,
white=False,
use_constraint_directions=True):
r"""
Use Gibbs sampler to simulate from `con`.
Parameters
----------
con : `selection.affine.constraints`_
Y : np.float
Point satisfying the constraint.
direction_of_interest : np.float (optional)
Which projection is of most interest?
how_often : int (optional)
How often should the sampler make a move along `direction_of_interest`?
If negative, defaults to ndraw+burnin (so it will never be used).
ndraw : int (optional)
Defaults to 1000.
burnin : int (optional)
Defaults to 1000.
white : bool (optional)
Is con.covariance equal to identity?
use_constraint_directions : bool (optional)
Use the directions formed by the constraints as in
the Gibbs scheme?
Returns
-------
Z : np.float((ndraw, n))
Sample from the sphere intersect the constraints.
"""
raise NotImplementedError("first get the sphere sampler working.")
# this will be different than data carving sqrtlasso
if direction_of_interest is None:
direction_of_interest = np.random.standard_normal(Y.shape)
if how_often < 0:
how_often = ndraw + burnin
if not white:
inverse_map, forward_map, white_con = con.whiten()
white_Y = forward_map(Y)
white_direction_of_interest = forward_map(np.dot(con.covariance, direction_of_interest))
else:
white_con = con
inverse_map = lambda V: V
white_samples = sample_truncnorm_white(white_con.linear_part,
white_con.offset,
white_Y,
white_direction_of_interest,
how_often=how_often,
ndraw=ndraw,
burnin=burnin,
sigma=1.,
use_A=use_constraint_directions)
Z = inverse_map(white_samples.T).T
return Z
def sample_from_sphere(con,
Y,
direction_of_interest=None,
how_often=-1,
ndraw=1000,
burnin=1000,
white=False):
r"""
Use Gibbs sampler to simulate from `con`
intersected with (whitened) sphere of radius `np.linalg.norm(Y)`.
When `con.covariance` is not $I$, it samples from the
ellipse of constant Mahalanobis distance from `con.mean`.
Parameters
----------
con : `selection.affine.constraints`_
Y : np.float
Point satisfying the constraint.
direction_of_interest : np.float (optional)
Which projection is of most interest?
how_often : int (optional)
How often should the sampler make a move along `direction_of_interest`?
If negative, defaults to ndraw+burnin (so it will never be used).
ndraw : int (optional)
Defaults to 1000.
burnin : int (optional)
Defaults to 1000.
white : bool (optional)
Is con.covariance equal to identity?
Returns
-------
Z : np.float((ndraw, n))
Sample from the sphere intersect the constraints.
weights : np.float(ndraw)
Importance weights for the sample.
"""
# this is data carving sqrt_lasso
if direction_of_interest is None:
direction_of_interest = np.random.standard_normal(Y.shape)
if how_often < 0:
how_often = ndraw + burnin
if not white:
inverse_map, forward_map, white_con = con.whiten()
white_Y = forward_map(Y)
white_direction_of_interest = forward_map(direction_of_interest)
else:
white_con = con
inverse_map = lambda V: V
RSS = np.linalg.norm(np.dot(white_con.residual_projector, white_Y))
white_samples, weights = sample_quasi_white_sphere(white_con.linear_part,
white_con.RHS_offset,
white_con.LHS_offset,
white_Y,
white_direction_of_interest,
np.linalg.norm(white_Y)**2,
white_con.dim,
RSS,
white_con.RSS_df,
how_often=how_often,
ndraw=ndraw,
burnin=burnin)
Z = inverse_map(white_samples.T).T
return Z, weights
def gibbs_test(quasi_affine_con,
Y,
direction_of_interest,
how_often=-1,
ndraw=5000,
burnin=2000,
white=False,
alternative='twosided',
UMPU=True,
sigma_known=False,
alpha=0.05,
use_constraint_directions=False):
"""
A Monte Carlo significance test for
a given function of `con.mean`.
Parameters
----------
quasi_affine_con : `orthogonal`
Y : np.float
Point satisfying the constraint.
direction_of_interest: np.float
Which linear function of `con.mean` is of interest?
(a.k.a. $\eta$ in many of related papers)
how_often : int (optional)
How often should the sampler make a move along `direction_of_interest`?
If negative, defaults to ndraw+burnin (so it will never be used).
ndraw : int (optional)
Defaults to 1000.
burnin : int (optional)
Defaults to 1000.
white : bool (optional)
Is con.covariance equal to identity?
alternative : str
One of ['greater', 'less', 'twosided']
UMPU : bool
Perform the UMPU test?
sigma_known : bool
Is $\sigma$ assumed known?
alpha :
Level for UMPU test.
use_constraint_directions : bool (optional)
Use the directions formed by the constraints as in
the Gibbs scheme?
Returns
-------
pvalue : float
P-value (using importance weights) for specified hypothesis test.
Z : np.float((ndraw, n))
Sample from the sphere intersect the constraints.
weights : np.float(ndraw)
Importance weights for the sample.
"""
eta = direction_of_interest # shorthand
if alternative not in ['greater', 'less', 'twosided']:
raise ValueError("expecting alternative to be in ['greater', 'less', 'twosided']")
if not sigma_known:
Z, W = sample_from_sphere(affine_con,
Y,
eta,
how_often=how_often,
ndraw=ndraw,
burnin=burnin,
white=white)
else:
Z = sample_from_constraints(affine_con,
Y,
eta,
how_often=how_often,
ndraw=ndraw,
burnin=burnin,
white=white,
use_constraint_directions=\
use_constraint_directions)
W = np.ones(Z.shape[0], np.float)
null_statistics = np.dot(Z, eta)
observed = (eta*Y).sum()
if alternative == 'greater':
pvalue = (W*(null_statistics >= observed)).sum() / W.sum()
elif alternative == 'less':
pvalue = (W*(null_statistics <= observed)).sum() / W.sum()
elif not UMPU:
pvalue = (W*(null_statistics <= observed)).sum() / W.sum()
pvalue = 2 * min(pvalue, 1 - pvalue)
else:
dfam = discrete_family(null_statistics, W)
decision = dfam.two_sided_test(0, observed, alpha=alpha)
return decision, Z, W
return pvalue, Z, W
def constraints_unknown_sigma( \
support_directions,
RHS_offsets,
LHS_offsets,
observed_data,
direction_of_interest,
RSS,
RSS_df,
value_under_null=0.,
tol = 1.e-4,
DEBUG=False):
r"""
Given a quasi-affine constraint $\{z:Az+u \leq \hat{\sigma}b\}$
(elementwise)
specified with $A$ as `support_directions` and $b$ as
`support_offset`, a new direction of interest $\eta$, and
an `observed_data` is Gaussian vector $Z \sim N(\mu,\sigma^2 I)$
with $\sigma$ unknown, this
function returns $\eta^TZ$ as well as a set
bounding this value. The value of $\hat{\sigma}$ is taken to be
sqrt(RSS/RSS_df)
The interval constructed is such that the endpoints are
independent of $\eta^TZ$, hence the
selective $T$ distribution of
of `sample carving`_
can be used to form an exact pivot.
To construct the interval, we are in effect conditioning
on all randomness perpendicular to the direction of interest,
i.e. $P_{\eta}^{\perp}X$ where $X$ is the Gaussian data vector.
Notes
-----
Covariance is assumed to be an unknown multiple of the identity.
Parameters
----------
support_directions : np.float
Matrix specifying constraint, $A$.
RHS : np.float
Offset in constraint, $b$.
LHS_offsets : np.float
Offset in LHS of constraint, $u$.
observed_data : np.float
Observations.
direction_of_interest : np.float
Direction in which we're interested for the
contrast.
RSS : float
Residual sum of squares.
RSS_df : int
Degrees of freedom of RSS.
tol : float
Relative tolerance parameter for deciding
sign of $Az-b$.
Returns
-------
lower_bound : float
observed : float
upper_bound : float
sigma : float
"""
# shorthand
A, b, L, X, w, theta = (support_directions,
RHS_offsets,
LHS_offsets,
observed_data,
direction_of_interest,
value_under_null)
# make direction of interest a unit vector
normw = np.linalg.norm(w)
w = w / normw
theta = theta / normw
sigma_hat = np.sqrt(RSS / RSS_df)
# compute the sufficient statistics
U = (w*X).sum() - theta
V = X - (X*w).sum() * w
W = sigma_hat**2 * RSS_df + U**2
Tobs = U / np.sqrt((W - U**2) / RSS_df)
sqrtW = np.sqrt(W)
alpha = np.dot(A, w)
gamma = theta * alpha + np.dot(A, V) + L
Anorm = np.fabs(A).max()
intervals = []
intervals = []
for _a, _b, _c in zip(alpha, b, gamma):
_a = _a * sqrtW
_b = _b * sqrtW
cur_intervals = sqrt_inequality_solver(_a, _c, _b, RSS_df)
intervals.append(pyinter.IntervalSet([pyinter.closed(*i) for i in cur_intervals if i]))
truncation_set = intervals[0]
for interv in intervals[1:]:
truncation_set = truncation_set.intersection(interv)
if not truncation_set:
raise ValueError("empty truncation intervals")
return truncation_set, Tobs
def quadratic_inequality_solver(a, b, c, direction="less than"):
'''
solves a * x**2 + b * x + c \leq 0, if direction is "less than",
solves a * x**2 + b * x + c \geq 0, if direction is "greater than",
returns:
the truancated interval, may include [-infty, + infty]
the returned interval(s) is a list of disjoint intervals indicating the union.
when the left endpoint of the interval is equal to the right, return empty list
'''
if direction not in ["less than", "greater than"]:
raise ValueError("direction should be in ['less than', 'greater than']")
if direction == "less than":
d = b**2 - 4*a*c
if a > 0:
if d <= 0:
#raise ValueError("No valid solution")
return [[]]
else:
lower = (-b - np.sqrt(d)) / (2*a)
upper = (-b + np.sqrt(d)) / (2*a)
return [[lower, upper]]
elif a < 0:
if d <= 0:
return [[float("-inf"), float("inf")]]
else:
lower = (-b + np.sqrt(d)) / (2*a)
upper = (-b - np.sqrt(d)) / (2*a)
return [[float("-inf"), lower], [upper, float("inf")]]
else:
if b > 0:
return [[float("-inf"), -c/b]]
elif b < 0:
return [[-c/b, float("inf")]]
else:
raise ValueError("Both coefficients are equal to zero")
else:
return quadratic_inequality_solver(-a, -b, -c, direction="less than")
def intersection(I1, I2):
if (not I1) or (not I2) or min(I1[1], I2[1]) <= max(I1[0], I2[0]):
return []
else:
return [max(I1[0], I2[0]), min(I1[1], I2[1])]
def sqrt_inequality_solver(a, b, c, n):
'''
find the intervals for t such that,
a*t + b*sqrt(n + t**2) \leq c
returns:
should return a single interval
'''
if b >= 0:
intervals = quadratic_inequality_solver(b**2 - a**2, 2*a*c, b**2 * n - c**2)
if a > 0:
'''
the intervals for c - at \geq 0 is
[-inf, c/a]
'''
return [intersection(I, [float("-inf"), c/a]) for I in intervals]
elif a < 0:
'''
the intervals for c - at \geq 0 is
[c/a, inf]
'''
return [intersection(I, [c/a, float("inf")]) for I in intervals]
elif c >= 0:
return intervals
else:
return [[]]
else:
'''
the intervals we will return is {c - at \geq 0} union
{c - at \leq 0} \cap {quadratic_inequality_solver(b**2 - a**2, 2*a*c, b**2 * n - c**2, "greater than")}
'''
intervals = quadratic_inequality_solver(b**2 - a**2, 2*a*c, b**2 * n - c**2, "greater than")
if a > 0:
return [intersection(I, [c/a, float("inf")]) for I in intervals] + [[float("-inf"), c/a]]
elif a < 0:
return [intersection(I, [float("-inf"), c/a]) for I in intervals] + [[c/a, float("inf")]]
elif c >= 0:
return [[float("-inf"), float("inf")]]
else:
return intervals
|
selective-inference/selective-inference
|
selectinf/constraints/quasi_affine.py
|
Python
|
bsd-3-clause
| 37,606
|
[
"Gaussian"
] |
7cc4f79c191fb54321a351a001c7772699d43536f750e613e68727043a98b314
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
RDKit topology parser --- :mod:`MDAnalysis.converters.RDKitParser`
==================================================================
Converts an `RDKit <https://www.rdkit.org/docs/source/rdkit.Chem.rdchem.html#rdkit.Chem.rdchem.Mol>`_ :class:`rdkit.Chem.rdchem.Mol` into a :class:`MDAnalysis.core.Topology`.
See Also
--------
:mod:`MDAnalysis.converters.RDKit`
Classes
-------
.. autoclass:: RDKitParser
:members:
:inherited-members:
"""
import logging
import warnings
import numpy as np
from ..topology.base import TopologyReaderBase, change_squash
from ..topology import guessers
from ..core.topologyattrs import (
Atomids,
Atomnames,
Atomtypes,
Elements,
Masses,
Charges,
Aromaticities,
Bonds,
Resids,
Resnums,
Resnames,
RSChirality,
Segids,
AltLocs,
ChainIDs,
ICodes,
Occupancies,
Tempfactors,
)
from ..core.topology import Topology
logger = logging.getLogger("MDAnalysis.converters.RDKitParser")
def _rdkit_atom_to_RS(atom):
"""Fetches RDKit chiral tags"""
try:
return atom.GetProp("_CIPCode")
except KeyError:
return ""
class RDKitParser(TopologyReaderBase):
"""
For RDKit structures
Creates the following Attributes:
- Atomids
- Atomnames
- Aromaticities
- Elements
- Masses
- Bonds
- Resids
- Resnums
- RSChirality
- Segids
Guesses the following:
- Atomtypes
Depending on RDKit's input, the following Attributes might be present:
- Charges
- Resnames
- AltLocs
- ChainIDs
- ICodes
- Occupancies
- Tempfactors
Attributes table:
+---------------------------------------------+-------------------------+
| RDKit attribute | MDAnalysis equivalent |
+=============================================+=========================+
| atom.GetMonomerInfo().GetAltLoc() | altLocs |
+---------------------------------------------+-------------------------+
| atom.GetIsAromatic() | aromaticities |
+---------------------------------------------+-------------------------+
| atom.GetMonomerInfo().GetChainId() | chainIDs |
+---------------------------------------------+-------------------------+
| atom.GetDoubleProp('_GasteigerCharge') | charges |
| atom.GetDoubleProp('_TriposPartialCharge') | |
+---------------------------------------------+-------------------------+
| atom.GetSymbol() | elements |
+---------------------------------------------+-------------------------+
| atom.GetMonomerInfo().GetInsertionCode() | icodes |
+---------------------------------------------+-------------------------+
| atom.GetIdx() | indices |
+---------------------------------------------+-------------------------+
| atom.GetMass() | masses |
+---------------------------------------------+-------------------------+
| atom.GetMonomerInfo().GetName() | names |
| atom.GetProp('_TriposAtomName') | |
+---------------------------------------------+-------------------------+
| atom.GetProp('_CIPCode') | chiralities |
+---------------------------------------------+-------------------------+
| atom.GetMonomerInfo().GetOccupancy() | occupancies |
+---------------------------------------------+-------------------------+
| atom.GetMonomerInfo().GetResidueName() | resnames |
+---------------------------------------------+-------------------------+
| atom.GetMonomerInfo().GetResidueNumber() | resnums |
+---------------------------------------------+-------------------------+
| atom.GetMonomerInfo().GetTempFactor() | tempfactors |
+---------------------------------------------+-------------------------+
| atom.GetProp('_TriposAtomType') | types |
+---------------------------------------------+-------------------------+
Raises
------
ValueError
If only part of the atoms have MonomerInfo available
.. versionadded:: 2.0.0
.. versionchanged:: 2.1.0
Added R/S chirality support
"""
format = 'RDKIT'
@staticmethod
def _format_hint(thing):
"""Can this Parser read object *thing*?"""
try:
from rdkit import Chem
except ImportError: # if no rdkit, probably not rdkit
return False
else:
return isinstance(thing, Chem.Mol)
def parse(self, **kwargs):
"""Parse RDKit into Topology
Returns
-------
MDAnalysis Topology object
"""
mol = self.filename
# Atoms
names = []
chiralities = []
resnums = []
resnames = []
elements = []
masses = []
charges = []
aromatics = []
ids = []
atomtypes = []
segids = []
altlocs = []
chainids = []
icodes = []
occupancies = []
tempfactors = []
try:
atom = mol.GetAtomWithIdx(0)
except RuntimeError:
top = Topology(n_atoms=0, n_res=0, n_seg=0,
attrs=None,
atom_resindex=None,
residue_segindex=None)
return top
# check if multiple charges present
if atom.HasProp('_GasteigerCharge') and (
atom.HasProp('_TriposPartialCharge')
):
warnings.warn(
'Both _GasteigerCharge and _TriposPartialCharge properties '
'are present. Using Gasteiger charges by default.')
for atom in mol.GetAtoms():
ids.append(atom.GetIdx())
elements.append(atom.GetSymbol())
masses.append(atom.GetMass())
aromatics.append(atom.GetIsAromatic())
chiralities.append(_rdkit_atom_to_RS(atom))
mi = atom.GetMonomerInfo()
if mi: # atom name and residue info are present
names.append(mi.GetName().strip())
resnums.append(mi.GetResidueNumber())
resnames.append(mi.GetResidueName())
segids.append(mi.GetSegmentNumber())
altlocs.append(mi.GetAltLoc().strip())
chainids.append(mi.GetChainId().strip())
icodes.append(mi.GetInsertionCode().strip())
occupancies.append(mi.GetOccupancy())
tempfactors.append(mi.GetTempFactor())
else:
# atom name (MOL2 only)
try:
names.append(atom.GetProp('_TriposAtomName'))
except KeyError:
pass
# atom type (MOL2 only)
try:
atomtypes.append(atom.GetProp('_TriposAtomType'))
except KeyError:
pass
# gasteiger charge (computed):
# if the user took the time to compute them, make it a priority
# over charges read from a MOL2 file
try:
charges.append(atom.GetDoubleProp('_GasteigerCharge'))
except KeyError:
# partial charge (MOL2 only)
try:
charges.append(atom.GetDoubleProp('_TriposPartialCharge'))
except KeyError:
pass
# make Topology attributes
attrs = []
n_atoms = len(ids)
if resnums and (len(resnums) != n_atoms):
raise ValueError(
"ResidueInfo is only partially available in the molecule. "
"If you have added hydrogens to the input RDKit molecule with "
"`Chem.AddHs(mol)`, consider using "
"`Chem.AddHs(mol, addResidueInfo=True)` instead"
)
# * Attributes always present *
# Atom attributes
for vals, Attr, dtype in (
(ids, Atomids, np.int32),
(elements, Elements, object),
(masses, Masses, np.float32),
(aromatics, Aromaticities, bool),
(chiralities, RSChirality, 'U1'),
):
attrs.append(Attr(np.array(vals, dtype=dtype)))
# Bonds
bonds = []
bond_types = []
bond_orders = []
for bond in mol.GetBonds():
bonds.append((bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()))
bond_orders.append(bond.GetBondTypeAsDouble())
bond_types.append(str(bond.GetBondType()))
attrs.append(Bonds(bonds, types=bond_types, order=bond_orders))
# * Optional attributes *
# Atom name
if names:
attrs.append(Atomnames(np.array(names, dtype=object)))
else:
for atom in mol.GetAtoms():
name = "%s%d" % (atom.GetSymbol(), atom.GetIdx())
names.append(name)
attrs.append(Atomnames(np.array(names, dtype=object)))
# Atom type
if atomtypes:
attrs.append(Atomtypes(np.array(atomtypes, dtype=object)))
else:
atomtypes = guessers.guess_types(names)
attrs.append(Atomtypes(atomtypes, guessed=True))
# Partial charges
if charges:
attrs.append(Charges(np.array(charges, dtype=np.float32)))
else:
pass # no guesser yet
# PDB only
for vals, Attr, dtype in (
(altlocs, AltLocs, object),
(chainids, ChainIDs, object),
(occupancies, Occupancies, np.float32),
(tempfactors, Tempfactors, np.float32),
):
if vals:
attrs.append(Attr(np.array(vals, dtype=dtype)))
# Residue
if any(resnums) and not any(val is None for val in resnums):
resnums = np.array(resnums, dtype=np.int32)
resnames = np.array(resnames, dtype=object)
segids = np.array(segids, dtype=object)
icodes = np.array(icodes, dtype=object)
residx, (resnums, resnames, icodes, segids) = change_squash(
(resnums, resnames, icodes, segids),
(resnums, resnames, icodes, segids))
n_residues = len(resnums)
for vals, Attr, dtype in (
(resnums, Resids, np.int32),
(resnums.copy(), Resnums, np.int32),
(resnames, Resnames, object),
(icodes, ICodes, object),
):
attrs.append(Attr(np.array(vals, dtype=dtype)))
else:
attrs.append(Resids(np.array([1])))
attrs.append(Resnums(np.array([1])))
residx = None
n_residues = 1
# Segment
if any(segids) and not any(val is None for val in segids):
segidx, (segids,) = change_squash((segids,), (segids,))
n_segments = len(segids)
attrs.append(Segids(segids))
else:
n_segments = 1
attrs.append(Segids(np.array(['SYSTEM'], dtype=object)))
segidx = None
# create topology
top = Topology(n_atoms, n_residues, n_segments,
attrs=attrs,
atom_resindex=residx,
residue_segindex=segidx)
return top
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/converters/RDKitParser.py
|
Python
|
gpl-2.0
| 12,849
|
[
"MDAnalysis",
"RDKit"
] |
b63a1e376077ff61db995bdf73c72f654548cd8a824b422095b3719b8335d797
|
"""
Harris corner detector
Inspired from Solem's implementation
http://www.janeriksolem.net/2009/01/harris-corner-detector-in-python.html
"""
from scipy import ndimage
from . import peak
def _compute_harris_response(image, eps=1e-6, gaussian_deviation=1):
"""Compute the Harris corner detector response function
for each pixel in the image
Parameters
----------
image : ndarray of floats
Input image.
eps : float, optional
Normalisation factor.
gaussian_deviation : integer, optional
Standard deviation used for the Gaussian kernel.
Returns
--------
image : (M, N) ndarray
Harris image response
"""
if len(image.shape) == 3:
image = image.mean(axis=2)
# derivatives
image = ndimage.gaussian_filter(image, gaussian_deviation)
imx = ndimage.sobel(image, axis=0, mode='constant')
imy = ndimage.sobel(image, axis=1, mode='constant')
Wxx = ndimage.gaussian_filter(imx * imx, 1.5, mode='constant')
Wxy = ndimage.gaussian_filter(imx * imy, 1.5, mode='constant')
Wyy = ndimage.gaussian_filter(imy * imy, 1.5, mode='constant')
# determinant and trace
Wdet = Wxx * Wyy - Wxy**2
Wtr = Wxx + Wyy
# Alternate formula for Harris response.
# Alison Noble, "Descriptions of Image Surfaces", PhD thesis (1989)
harris = Wdet / (Wtr + eps)
return harris
def harris(image, min_distance=10, threshold=0.1, eps=1e-6,
gaussian_deviation=1):
"""Return corners from a Harris response image
Parameters
----------
image : ndarray of floats
Input image.
min_distance : int, optional
Minimum number of pixels separating interest points and image boundary.
threshold : float, optional
Relative threshold impacting the number of interest points.
eps : float, optional
Normalisation factor.
gaussian_deviation : integer, optional
Standard deviation used for the Gaussian kernel.
Returns
-------
coordinates : (N, 2) array
(row, column) coordinates of interest points.
Examples
-------
>>> square = np.zeros([10,10])
>>> square[2:8,2:8] = 1
>>> square
array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> harris(square, min_distance=1)
Corners of the square
array([[3, 3],
[3, 6],
[6, 3],
[6, 6]])
"""
harrisim = _compute_harris_response(image, eps=eps,
gaussian_deviation=gaussian_deviation)
coordinates = peak.peak_local_max(harrisim, min_distance=min_distance,
threshold=threshold)
return coordinates
|
emmanuelle/scikits.image
|
skimage/feature/_harris.py
|
Python
|
bsd-3-clause
| 3,265
|
[
"Gaussian"
] |
f3b312ca8ae2685299a0bc2d0eee6bfe5b4a58a1ab40bda5c18362e67dca7d8e
|
#!/usr/bin/python
'''
Author: Alexander Godlewski
Year: 2011
A script to browse through the leaderboards for SOCOM 4
using multiprocessing to overcome network blocking
Gather the results and dump to a CSV file
There are issues with the implementation of the leaderboards
that causes the time played for a player to often be
synchronized to 60 minute changes
Another issue is that the leaderboards are constantly changing
across page views. So it a player may move from one page to
another and be re-parsed and the another player could move to
an already parsed page and not be recorded. It would take constant
runs of this script to gather all the players.
Expect this process to take approx. 21 minutes or more, depending on how
many processes you choose(variable numproc). It has to gather 100k
players over 5k pages
'''
import urllib2, urllib, re, os, multiprocessing
from time import time
manager = multiprocessing.Manager()
requestcount = manager.Value('d', 0)
pages = manager.Value('d', 0)
playerdata = manager.dict()
numproc = 24
processes = []
waitevent = manager.Event()
procwait = manager.Value('d', numproc)
procwaitlock = manager.Lock()
pagelist = manager.list()
pagelistlock = manager.Lock()
pagecountlock = manager.Lock()
requestcountlock = manager.Lock()
playerdatalock = manager.Lock()
# Regexs
re_viewstate = re.compile(r'__VIEWSTATE" value="(?P<viewstate>.*?)"')
re_records = re.compile(r'Displaying .*? of (?P<records>[0-9,]*) records')
re_pages = re.compile(r'<a id="ctl00_phContent_leaderboards_pager_btnLast".*?>\.\.\. (?P<pages>\d*)</a>')
re_player = re.compile(r'<span id="ctl00_phContent_leaderboards_rptStatsTable_ctl.*?<tr.*?>(?:</span>)?(?P<player>.*?)</tr>', re.DOTALL) # Get a player block
re_playeritems = re.compile(r'<td class=".*?">(?:\s*<a.*?>)?(?P<data>.+?)(?:</a>\s*)?</td>', re.DOTALL) # Individual player fields
re_prevpage = re.compile(r'__PREVIOUSPAGE" value="(?P<prev>.*?)"') # Previous page key
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:6.0) Gecko/20100101 Firefox/6.0 Iceweasel/6.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Connection': 'keep-alive',
'Cache-Control': 'no-cache, no-cache',
}
def scrape():
''' The main scraping function '''
t0 = time()
try:
for i in range(1, numproc + 1):
process = multiprocessing.Process(
target = scrapeproc,
args = (
waitevent,
pagelist,
pagelistlock,
procwait,
procwaitlock,
playerdatalock,
requestcountlock,
pagecountlock,
pages,
i - 1,
playerdata,
requestcount
)
)
processes.append(process)
process.start()
for p in processes:
p.join()
except urllib2.HTTPError, error:
print ''
print 'There has been an error with the following request:'
print '%4d: %d - %s' % (requestcount, error.getcode(), error.geturl())
for p in processes:
p.terminate()
t1 = time()
print ''
print '###########################################################################'
print '%d second%s elapsed(%4d requests, %6d players)' % (t1 - t0, '' if t1 - t0 == 1 else 's', requestcount.value, len(playerdata))
print '###########################################################################'
filename = 'output-%s-%s.csv' % (int(time()), os.getpid())
print ''
print 'Outputting the playerdata to %s' % filename
outputcsv(filename)
def scrapeproc(we, pl, pllock, pw, pwlock, pdlock, rclock, plock, p, offset, pd, rc):
''' A process to scrape pages '''
opener = urllib2.build_opener()
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPCookieProcessor())
opener.add_handler(urllib2.HTTPRedirectHandler())
opener.add_handler(urllib2.UnknownHandler())
data = readurl(rclock, requestcount, opener, 'http://www.socom.com/en-us/Leaderboards/SOCOM4', 'Initial page request')
vs = parseviewstate(data)
postdata = genpostdata(vs, '', 'lbsubmit', {'dlDate1': 7, 'dlDate2': 21, 'dlDate3': 1986, 'scriptManager': 'panelCulture|lbSubmit'})
data = readurl(rclock, requestcount, opener, 'http://www.socom.com/?url=%2fen-us%2fLeaderboards%2fSOCOM4', 'Submit to agegate', postdata)
data = readurl(rclock, requestcount, opener, 'http://www.socom.com/en-us/Leaderboards/SOCOM4', 'Load first leaderboard page')
pagecount = parsepagecount(data)
plock.acquire()
if pagecount > p.value:
p.value = pagecount
plock.release()
# Decrement procwait count, at 0 continue
pwlock.acquire()
if pw.value == 1:
print 'Expecting %d pages' % p.value
pl.extend(range(1, p.value + 1))
we.set()
pw.value = pw.value - 1
pwlock.release()
# Wait until all processes have reached the same point so
# the page count is at the max value found. All
# openerdirectors are prepared to visit the pages
we.wait()
# Loop until there are no more pages left to be parsed
while True:
pllock.acquire()
# No pages left
if not pl:
pllock.release()
break
pagenum = pl.pop(0)
pllock.release()
vs = parseviewstate(data)
prev = parseprevpagekey(data)
postdata = genpostdata(vs, '', '',
{
'__PREVIOUSPAGE': prev,
'ctl00$phContent$leaderboards$txtName': '',
'ctl00$phContent$leaderboards$btnGoToRank': 'GO',
'ctl00$phContent$leaderboards$txtRank': ((pagenum - 1) * 20) + 1,
'ctl00$scriptManager': 'ctl00$phContent$leaderboards$panelLeaderBoards|ctl00$phContent$leaderboards$btnGoToRank'
}
)
data = readurl(rclock, rc, opener, 'http://www.socom.com/en-us/Leaderboards/SOCOM4', 'LB page %d of %d' % (pagenum, p.value), postdata)
parseplayers(pdlock, pd, data)
def readurl(rclock, rc, od, url, name, data = []):
''' Read a url and print info '''
rclock.acquire()
currequestnum = rc.value + 1
rc.value += 1
rclock.release()
req = urllib2.Request(url, urllib.urlencode(data), headers)
page = od.open(req)
print '%4d: %d - (%s)%s' % (currequestnum, page.getcode(), name, page.geturl())
return page.read()
def parseplayers(pdlock, pd, data):
''' Parse the player data for a response '''
matches = re_player.findall(data)
for match in matches:
fields = re_playeritems.findall(match)
name = fields[1].strip().replace(',', '')
pdlock.acquire()
if name in pd:
print 'WARNING: %s already parsed' % name
pd[name] = tuple(fields[i].strip().replace(',', '') for i in (0, 2, 3, 4, 5, 6, 7, 8))
pdlock.release()
def parseviewstate(data):
''' Parse the viewstate for a response '''
rval = None
match = re_viewstate.search(data)
if match:
rval = match.group('viewstate')
return rval
def parsepagecount(data):
''' Get the page count to show the expected number of pages scraped '''
rval = 0
match = re_pages.search(data)
if match:
rval = int(match.group('pages'))
return rval
def parseprevpagekey(data):
''' Get the previous page key '''
rval = None
match = re_prevpage.search(data)
if match:
rval = match.group('prev')
return rval
def genpostdata(vs, ea, et, other = None):
''' Generate a POST dict, just simplifies code
vs = viewstate
ea = event arguement
et = event target
other = other post data (dict)
'''
data = dict()
data['__VIEWSTATE'] = vs
data['__EVENTARGUMENT'] = ea
data['__EVENTTARGET'] = et
if other:
data = dict(data.items() + other.items())
return data
def outputcsv(filename):
''' Output the csv file '''
try:
f = open(filename, 'w')
for name, data in playerdata.items():
f.write('%s,%s\n' % (name, ','.join(data)))
f.close()
except IOError:
print 'There was an issue writing to %s' % filename
if __name__ == '__main__':
scrape()
|
aleximplode/s4scraper
|
scrapef.py
|
Python
|
mit
| 9,681
|
[
"VisIt"
] |
830ffd4ef2068f7e3cb30bf25bc34287a361f9ff6e5525dd4a0b008260a7313c
|
# Developed by Redjumpman for Redbot
# Standard Library
import ast
import csv
import re
from collections import namedtuple
# Discord
import discord
# Redbot
from redbot.core import commands
from redbot.core.data_manager import bundled_data_path
from redbot.core.utils.menus import menu, DEFAULT_CONTROLS
from redbot.core.utils.chat_formatting import box
# Third-Party Requirements
from tabulate import tabulate
__version__ = "3.1.0"
__author__ = "Redjumpman"
switcher = {"1": "I", "2": "II", "3": "III", "4": "IV", "5": "V", "6": "VI",
"7": "VII", "8": "VIII"}
exceptions = ('ho-oh', 'jangmo-o', 'hakamo-o', 'kommo-o', 'porygon-z',
'nidoran-f', 'nidoran-m', 'wormadam-plant', 'wormadam-sandy',
'wormadam-trash', 'shaymin-Land', 'shaymin-Sky',
'hoopa-confined', 'hoopa-unbound', 'lycanroc-midday',
'lycanroc-midnight', 'lycanroc-dusk', 'kyurem-white',
'kyurem-black')
tm_exceptions = ("Beldum", "Burmy", "Cascoon", "Caterpie", "Combee", "Cosmoem", "Cosmog",
"Ditto", "Kakuna", "Kricketot", "Magikarp", "Unown", "Weedle", "Wobbuffet",
"Wurmple", "Wynaut", "Tynamo", "Metapod", "MissingNo.", "Scatterbug",
"Silcoon", "Smeargle")
url = "https://bulbapedia.bulbagarden.net/wiki/{}_(Pokémon\)"
url2 = "https://bulbapedia.bulbagarden.net/wiki/"
class Pokedex(commands.Cog):
"""Search for Pokemon."""
def __init__(self):
self.path = bundled_data_path(self)
@commands.group(autohelp=True)
async def pokemon(self, ctx):
"""This is the list of Pokémon queries you can perform."""
pass
@pokemon.command()
async def version(self, ctx):
"""Display running version of Pokedex
Returns:
Text ouput of your installed version of Pokedex
"""
await ctx.send("You are running pokedex version {}.".format(__version__))
@commands.command(aliases=['dex'])
async def pokedex(self, ctx, *, pokemon: str):
"""Search for information on a Pokémon
Examples:
Regular: [p]pokedex pikachu
Megas: [p]pokedex charizard-mega y
Gigas: [p]pokedex corviknight-giga
Alola: [p]pokedex geodude-alola
Galarian: [p]pokedex meowth-galar
Forms: [p]pokedex hoopa-unbound
Variants: [p]pokedex floette-orange
"""
if pokemon.isdigit():
if len(pokemon) == 3:
poke = self.build_data(f'#{pokemon}', key='ID')
else:
return await ctx.send("When searching by pokedex number, it must "
"be a three digit number. Example: 001")
else:
poke = self.build_data(pokemon.title())
if poke is None:
return await ctx.send('A Pokémon with that name could not be found.')
color = self.color_lookup(poke.Types.split('/')[0])
abilities = self.ability_builder(ast.literal_eval(poke.Abilities))
link_name = self.link_builder(pokemon)
wiki = "[{} {}]({})".format(poke.Pokemon, poke.ID, url.format(link_name))
header = [wiki, poke.Japanese, poke.Species]
# Build embed
embed = discord.Embed(colour=color, description='\n'.join(header))
embed.set_thumbnail(url=poke.Image)
embed.add_field(name="Stats", value="\n".join(ast.literal_eval(poke.Stats)))
embed.add_field(name="Types", value=poke.Types)
embed.add_field(name="Resistances", value="\n".join(ast.literal_eval(poke.Resistances)))
embed.add_field(name="Weaknesses", value="\n".join(ast.literal_eval(poke.Weaknesses)))
embed.add_field(name="Abilities", value="\n".join(abilities))
embed.set_footer(text=poke.Description)
await ctx.send(embed=embed)
@pokemon.command()
async def moves(self, ctx, *, pokemon: str):
"""Search for a Pokémon's moveset
If the generation is not specified it will default to the latest generation.
Examples:
Numbers: [p]pokemon moves charizard-4
Special: [p]pokemon moves hoopa-unbound
Alolan: [p]pokemon moves geodude-alola
"""
pokemon, generation = self.clean_output(pokemon)
poke = self.build_data(pokemon.title())
if poke is None:
return await ctx.send('A Pokémon with that name could not be found.')
try:
move_set = ast.literal_eval(poke.Moves)[generation]
except KeyError:
generation = '7'
try:
move_set = ast.literal_eval(poke.Moves)[generation]
except KeyError:
generation = '8'
move_set = ast.literal_eval(poke.Moves)[generation]
table = box(tabulate(move_set, headers=['Level', 'Move', 'Type', 'Power', 'Acc'],
numalign='right'), lang='ml')
if len(table) <= 900:
color = self.color_lookup(poke.Types.split('/')[0])
embed = discord.Embed(colour=color)
embed.set_author(name=poke.Pokemon, icon_url=poke.Image)
embed.add_field(name='\u200b', value=table, inline=False)
embed.add_field(name="Versions", value='\n'.join(self.game_version(generation)))
embed.set_footer(text="This moveset is based on generation {}.".format(generation))
await ctx.send(embed=embed)
else:
embeds = self.embed_builder(poke, move_set, generation, moves=True)
await menu(ctx, embeds, DEFAULT_CONTROLS)
@pokemon.command()
async def item(self, ctx, *, item_name: str):
"""Search for an item in the Pokémon universe
Args:
item_name: variable length string
Returns:
Discord embed
Raises:
AttributeError: Item not found
Examples:
pokemon item master ball
"""
item = self.item_search(item_name.title())
if item is None:
return await ctx.send("An item with that name could not be found.")
color = self.color_lookup(item.Category)
embed = discord.Embed(colour=color, title=item.Item)
embed.set_thumbnail(url=item.Image)
embed.add_field(name="Cost", value=item.Cost)
embed.add_field(name="Category", value=item.Category)
embed.add_field(name="Effect", value=item.Effect)
await ctx.send(embed=embed)
@pokemon.command()
async def tmset(self, ctx, *, pokemon: str):
"""Get a Pokémon's learnset by generation(1-7).
Example: !pokedex tmset V pikachu """
pokemon, generation = self.clean_output(pokemon)
if pokemon.title() in tm_exceptions:
return await ctx.send("This Pokémon cannot learn TMs.")
poke = self.build_data(pokemon.title())
if poke is None:
return await ctx.send('A Pokémon with that name could not be found.')
try:
tm_set = ast.literal_eval(poke.Tms)[generation]
except KeyError:
try:
generation = '7'
tm_set = ast.literal_eval(poke.Tms)[generation]
except KeyError:
generation = '8'
tm_set = ast.literal_eval(poke.Tms)[generation]
embeds = self.embed_builder(poke, tm_set, generation)
await menu(ctx, embeds, DEFAULT_CONTROLS)
def embed_builder(self, poke, data, gen, moves=False):
color = self.color_lookup(poke.Types.split('/')[0])
table_type = 'Moves' if moves else 'TMs'
col = 'Lvl' if moves else 'TMs'
headers = (col, 'Name', 'Type', 'Power', 'Acc')
embeds = []
for i in range(0, len(data), 12):
table = box(tabulate(data[i:i + 12], headers=headers,
numalign='right'), lang='ml')
e = discord.Embed(colour=color)
e.set_author(name=poke.Pokemon, icon_url=poke.Image)
e.add_field(name='\u200b', value=table, inline=False)
if moves:
e.add_field(name="Versions", value='\n'.join(self.game_version(gen)))
else:
e.add_field(name='\u200b', value='\u200b')
embeds.append(e)
embeds = [x.set_footer(text=f"{table_type} based on generation {switcher[gen]}.\n"
f"You are viewing page {idx} of {len(embeds)}")
for idx, x in enumerate(embeds, 1)]
return embeds
@pokemon.command()
async def location(self, ctx, *, pokemon: str):
"""Get a Pokémon's catch location.
Example !pokedex location voltorb
"""
pokemon, generation = self.clean_output(pokemon)
poke = self.build_data(pokemon.title())
if poke is None:
return await ctx.send('A Pokémon with that name could not be found.')
link_name = self.link_builder(poke.Pokemon)
color = self.color_lookup(poke.Types.split('/')[0])
wiki = "[{} {}]({})".format(poke.Pokemon, poke.ID, url.format(link_name))
header = '\n'.join((wiki, 'Catch Locations'))
locations = ast.literal_eval(poke.Locations)
embeds = []
for idx, (key, value) in enumerate(locations.items()):
e = discord.Embed(colour=color, description=header)
e.set_thumbnail(url=poke.Image)
if value is None:
location = "Not available in this version."
else:
location = value
e.add_field(name=key, value=location)
embeds.append(e)
embeds = [x.set_footer(text='You are viewing page {} of {}'.format(idx, len(embeds)))
for idx, x in enumerate(embeds, 1)]
await menu(ctx, embeds, DEFAULT_CONTROLS)
@staticmethod
def game_version(generation):
versions = {
'1': ['Pokémon Red', 'Pokémon Blue', 'Pokémon Yellow'],
'2': ['Pokémon Gold', 'Pokémon Silver', 'Pokémon Crystal'],
'3': ['Pokémon Ruby', 'Pokémon Sapphire', 'Pokémon FireRed', 'Pokémon LeafGreen',
'Pokémon Emerald'],
'4': ['Pokémon Diamond', 'Pokémon Pearl', 'Pokémon Platinum', 'Pokémon HeartGold',
'Pokémon SoulSilver'],
'5': ['Pokémon Black', 'Pokémon White', 'Pokémon Black 2', 'Pokémon White 2'],
'6': ['Pokémon X', 'Pokémon Y', 'Pokémon Omega Ruby', 'Pokémon Alpha Sapphire'],
'7': ['Pokémon Sun', 'Pokémon Moon', 'Pokémon Ultra Sun', 'Pokémon Ultra Moon',
'Pokémon Sun Let\'s Go Pikachu', 'Pokémon Let\'s Go Eevee'],
'8': ['Pokémon Sword', 'Pokémon Shield']
}
return versions[generation]
@staticmethod
def clean_output(pokemon):
if '-' not in pokemon:
return pokemon, '7'
query = pokemon.split('-')
if len(query) > 2:
partition = pokemon.rpartition('-')
if partition[0].lower() not in exceptions and 'alola' not in partition[0].lower():
return '', ''
else:
return partition[0], partition[2]
elif len(query) == 1:
return query[0], '7'
else:
if pokemon.lower() in exceptions or query[1].lower() == 'alola':
return pokemon, '7'
elif query[1].isdigit():
return query
else:
return '', ''
def item_search(self, name):
fp = self.path / 'Items.csv'
try:
with fp.open('rt', encoding='iso-8859-15') as f:
reader = csv.DictReader(f, delimiter=',')
for row in reader:
if row['Item'] == name:
Item = namedtuple('Item', reader.fieldnames)
return Item(**row)
except FileNotFoundError:
print("The csv file could not be found in pokedex data folder.")
return None
def build_data(self, identifier, key='Pokemon'):
fp = self.path / 'Pokemon.csv'
try:
with fp.open('rt', encoding='iso-8859-15') as f:
reader = csv.DictReader(f, delimiter=',')
for row in reader:
if row[key] == identifier:
Pokemon = namedtuple('Pokemon', reader.fieldnames)
return Pokemon(**row)
except FileNotFoundError:
print("The csv file could not be found in pokedex data folder.")
return None
@staticmethod
def link_builder(name):
link = name.lower().replace(' ', '_')
if link in exceptions:
if 'nidoran' in link:
link = 'nidoran_({}\)'.format(name[-1].upper())
return link
else:
link = link.split('-')[0]
return link
@staticmethod
def ability_builder(abilities):
pattern = '( or )|(\(.*\))'
pattern2 = '(\(.*\))'
fmt1 = "[{}]({}{}_(Ability\)) or [{}]({}{}_(Ability\)) {}"
fmt2 = "[{}]({}{}_(Ability\)) or [{}]({}{}_(Ability\))"
fmt3 = "[{}]({}{}_(Ability\)) {}"
fmt4 = "[{}]({}{}_(Ability\))"
linked = []
for ability in abilities:
if ' or ' in ability and '(' in ability:
ab_set = [x for x in re.split(pattern, ability) if x and x != ' or ']
params = [ab_set[0], url2, ab_set[0].replace(' ', '_'), ab_set[1],
url2, ab_set[1].replace(' ', '_'), ab_set[2]]
linked.append(fmt1.format(*params))
elif ' or ' in ability:
ab_set = [x for x in re.split(pattern, ability) if x and x != ' or ']
params = [ab_set[0], url2, ab_set[0].replace(' ', '_'), ab_set[1],
url2, ab_set[1].replace(' ', '_')]
linked.append(fmt2.format(*params))
elif '(' in ability:
ab_set = [x for x in re.split(pattern2, ability) if x]
params = [ab_set[0], url2, ab_set[0].replace(' ', '_'), ab_set[1]]
linked.append(fmt3.format(*params))
else:
linked.append(fmt4.format(ability, url2, ability.replace(' ', '_')))
return linked
@staticmethod
def color_lookup(key):
color_table = {"Normal": 0x999966, "Fire": 0xFF6600, "Fighting": 0xFF0000, "Ice": 0x99FFFF,
"Water": 0x3399FF, "Flying": 0x9999FF, "Grass": 0x33FF00, "Poison": 0x660099,
"Electric": 0xFFFF00, "Ground": 0xFFCC33, "Psychic": 0xFF3399,
"Rock": 0xCC9966, "Bug": 0x669900, "Dragon": 0x003399, "Dark": 0x333333,
"Ghost": 0x9933FF, "Steel": 0x999999, "Fairy": 0xFF99FF,
"Key Item": 0xAC00EB, "Berries": 0xF5F794, "Battle Items": 0xED002B,
"General Items": 0xFFFFFF, "Hold Items": 0xC976A8, "Machines": 0x999999,
"Medicine": 0x79EdA1, "Poké Balls": 0xFF0000}
color = color_table.get(key, 0xFFFFFF)
return color
|
Redjumpman/Jumper-Cogs
|
pokedex/pokedex.py
|
Python
|
gpl-3.0
| 15,336
|
[
"CRYSTAL"
] |
b7166a3b848f928f2c648f9b26470b89169c77ab3092dd798da810db0f2a33fe
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import json
from pymatgen.core.structure import Molecule
from pymatgen.io.nwchem import NwTask, NwInput, NwInputError, NwOutput
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', "nwchem")
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
mol = Molecule(["C", "H", "H", "H", "H"], coords)
class NwTaskTest(unittest.TestCase):
def setUp(self):
self.task = NwTask(0, 1, basis_set={"H": "6-31g"}, theory="dft",
theory_directives={"xc": "b3lyp"})
self.task_cosmo = NwTask(0, 1, basis_set={"H": "6-31g"}, theory="dft",
theory_directives={"xc": "b3lyp"},
alternate_directives={'cosmo': "cosmo"})
self.task_esp = NwTask(0, 1, basis_set={"H": "6-31g"}, theory="esp")
def test_multi_bset(self):
t = NwTask.from_molecule(
mol, theory="dft", basis_set={"C": "6-311++G**",
"H": "6-31++G**"},
theory_directives={"xc": "b3lyp"})
ans = """title "H4C1 dft optimize"
charge 0
basis cartesian
C library "6-311++G**"
H library "6-31++G**"
end
dft
xc b3lyp
end
task dft optimize"""
self.assertEqual(str(t), ans)
def test_str_and_from_string(self):
ans = """title "dft optimize"
charge 0
basis cartesian
H library "6-31g"
end
dft
xc b3lyp
end
task dft optimize"""
self.assertEqual(str(self.task), ans)
def test_to_from_dict(self):
d = self.task.as_dict()
t = NwTask.from_dict(d)
self.assertIsInstance(t, NwTask)
def test_init(self):
self.assertRaises(NwInputError, NwTask, 0, 1, {"H": "6-31g"},
theory="bad")
self.assertRaises(NwInputError, NwTask, 0, 1, {"H": "6-31g"},
operation="bad")
def test_dft_task(self):
task = NwTask.dft_task(mol, charge=1, operation="energy")
ans = """title "H4C1 dft energy"
charge 1
basis cartesian
C library "6-31g"
H library "6-31g"
end
dft
mult 2
xc b3lyp
end
task dft energy"""
self.assertEqual(str(task), ans)
def test_dft_cosmo_task(self):
task = NwTask.dft_task(
mol, charge=mol.charge, operation="energy",
xc="b3lyp", basis_set="6-311++G**",
alternate_directives={'cosmo': {"dielec": 78.0}})
ans = """title "H4C1 dft energy"
charge 0
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 1
xc b3lyp
end
cosmo
dielec 78.0
end
task dft energy"""
self.assertEqual(str(task), ans)
def test_esp_task(self):
task = NwTask.esp_task(mol, charge=mol.charge, operation="",
basis_set="6-311++G**")
ans = """title "H4C1 esp "
charge 0
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
task esp """
self.assertEqual(str(task), ans)
class NwInputTest(unittest.TestCase):
def setUp(self):
tasks = [
NwTask.dft_task(mol, operation="optimize", xc="b3lyp",
basis_set="6-31++G*"),
NwTask.dft_task(mol, operation="freq", xc="b3lyp",
basis_set="6-31++G*"),
NwTask.dft_task(mol, operation="energy", xc="b3lyp",
basis_set="6-311++G**"),
NwTask.dft_task(mol, charge=mol.charge + 1, operation="energy",
xc="b3lyp", basis_set="6-311++G**"),
NwTask.dft_task(mol, charge=mol.charge - 1, operation="energy",
xc="b3lyp", basis_set="6-311++G**")
]
self.nwi = NwInput(mol, tasks,
geometry_options=["units", "angstroms", "noautoz"],
memory_options="total 1000 mb")
self.nwi_symm = NwInput(mol, tasks,
geometry_options=["units", "angstroms",
"noautoz"],
symmetry_options=["c1"])
def test_str(self):
ans = """memory total 1000 mb
geometry units angstroms noautoz
C 0.0 0.0 0.0
H 0.0 0.0 1.089
H 1.026719 0.0 -0.363
H -0.51336 -0.889165 -0.363
H -0.51336 0.889165 -0.363
end
title "H4C1 dft optimize"
charge 0
basis cartesian
C library "6-31++G*"
H library "6-31++G*"
end
dft
mult 1
xc b3lyp
end
task dft optimize
title "H4C1 dft freq"
charge 0
basis cartesian
C library "6-31++G*"
H library "6-31++G*"
end
dft
mult 1
xc b3lyp
end
task dft freq
title "H4C1 dft energy"
charge 0
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 1
xc b3lyp
end
task dft energy
title "H4C1 dft energy"
charge 1
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 2
xc b3lyp
end
task dft energy
title "H4C1 dft energy"
charge -1
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 2
xc b3lyp
end
task dft energy
"""
self.assertEqual(str(self.nwi), ans)
ans_symm = """geometry units angstroms noautoz
symmetry c1
C 0.0 0.0 0.0
H 0.0 0.0 1.089
H 1.026719 0.0 -0.363
H -0.51336 -0.889165 -0.363
H -0.51336 0.889165 -0.363
end
title "H4C1 dft optimize"
charge 0
basis cartesian
C library "6-31++G*"
H library "6-31++G*"
end
dft
mult 1
xc b3lyp
end
task dft optimize
title "H4C1 dft freq"
charge 0
basis cartesian
C library "6-31++G*"
H library "6-31++G*"
end
dft
mult 1
xc b3lyp
end
task dft freq
title "H4C1 dft energy"
charge 0
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 1
xc b3lyp
end
task dft energy
title "H4C1 dft energy"
charge 1
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 2
xc b3lyp
end
task dft energy
title "H4C1 dft energy"
charge -1
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 2
xc b3lyp
end
task dft energy
"""
self.assertEqual(str(self.nwi_symm), ans_symm)
def test_to_from_dict(self):
d = self.nwi.as_dict()
nwi = NwInput.from_dict(d)
self.assertIsInstance(nwi, NwInput)
# Ensure it is json-serializable.
json.dumps(d)
d = self.nwi_symm.as_dict()
nwi_symm = NwInput.from_dict(d)
self.assertIsInstance(nwi_symm, NwInput)
json.dumps(d)
def test_from_string_and_file(self):
nwi = NwInput.from_file(os.path.join(test_dir, "ch4.nw"))
self.assertEqual(nwi.tasks[0].theory, "dft")
self.assertEqual(nwi.memory_options, "total 1000 mb stack 400 mb")
self.assertEqual(nwi.tasks[0].basis_set["C"], "6-31++G*")
self.assertEqual(nwi.tasks[-1].basis_set["C"], "6-311++G**")
# Try a simplified input.
str_inp = """start H4C1
geometry units angstroms
C 0.0 0.0 0.0
H 0.0 0.0 1.089
H 1.026719 0.0 -0.363
H -0.51336 -0.889165 -0.363
H -0.51336 0.889165 -0.363
end
title "H4C1 dft optimize"
charge 0
basis cartesian
H library "6-31++G*"
C library "6-31++G*"
end
dft
xc b3lyp
mult 1
end
task scf optimize
title "H4C1 dft freq"
charge 0
task scf freq
title "H4C1 dft energy"
charge 0
basis cartesian
H library "6-311++G**"
C library "6-311++G**"
end
task dft energy
title "H4C1 dft energy"
charge 1
dft
xc b3lyp
mult 2
end
task dft energy
title "H4C1 dft energy"
charge -1
task dft energy
"""
nwi = NwInput.from_string(str_inp)
self.assertEqual(nwi.geometry_options, ['units', 'angstroms'])
self.assertEqual(nwi.tasks[0].theory, "scf")
self.assertEqual(nwi.tasks[0].basis_set["C"], "6-31++G*")
self.assertEqual(nwi.tasks[-1].theory, "dft")
self.assertEqual(nwi.tasks[-1].basis_set["C"], "6-311++G**")
str_inp_symm = str_inp.replace("geometry units angstroms",
"geometry units angstroms\n symmetry "
"c1")
nwi_symm = NwInput.from_string(str_inp_symm)
self.assertEqual(nwi_symm.geometry_options, ['units', 'angstroms'])
self.assertEqual(nwi_symm.symmetry_options, ['c1'])
self.assertEqual(nwi_symm.tasks[0].theory, "scf")
self.assertEqual(nwi_symm.tasks[0].basis_set["C"], "6-31++G*")
self.assertEqual(nwi_symm.tasks[-1].theory, "dft")
self.assertEqual(nwi_symm.tasks[-1].basis_set["C"], "6-311++G**")
class NwOutputTest(unittest.TestCase):
def test_read(self):
nwo = NwOutput(os.path.join(test_dir, "CH4.nwout"))
nwo_cosmo = NwOutput(os.path.join(test_dir, "N2O4.nwout"))
self.assertEqual(0, nwo[0]["charge"])
self.assertEqual(-1, nwo[-1]["charge"])
self.assertEqual(len(nwo), 5)
self.assertAlmostEqual(-1102.6224491715582, nwo[0]["energies"][-1], 2)
self.assertAlmostEqual(-1102.9986291578023, nwo[2]["energies"][-1])
self.assertAlmostEqual(-11156.354030653656,
nwo_cosmo[5]["energies"][0]["cosmo scf"])
self.assertAlmostEqual(-11153.374133394364,
nwo_cosmo[5]["energies"][0]["gas phase"])
self.assertAlmostEqual(-11156.353632962995,
nwo_cosmo[5]["energies"][0]["sol phase"], 2)
self.assertAlmostEqual(-11168.818934311605,
nwo_cosmo[6]["energies"][0]["cosmo scf"], 2)
self.assertAlmostEqual(-11166.3624424611462,
nwo_cosmo[6]["energies"][0]['gas phase'], 2)
self.assertAlmostEqual(-11168.818934311605,
nwo_cosmo[6]["energies"][0]['sol phase'], 2)
self.assertAlmostEqual(-11165.227959110889,
nwo_cosmo[7]["energies"][0]['cosmo scf'], 2)
self.assertAlmostEqual(-11165.025443612385,
nwo_cosmo[7]["energies"][0]['gas phase'], 2)
self.assertAlmostEqual(-11165.227959110154,
nwo_cosmo[7]["energies"][0]['sol phase'], 2)
self.assertAlmostEqual(nwo[1]["hessian"][0][0], 4.60187e+01)
self.assertAlmostEqual(nwo[1]["hessian"][1][2], -1.14030e-08)
self.assertAlmostEqual(nwo[1]["hessian"][2][3], 2.60819e+01)
self.assertAlmostEqual(nwo[1]["hessian"][6][6], 1.45055e+02)
self.assertAlmostEqual(nwo[1]["hessian"][11][14], 1.35078e+01)
# CH4.nwout, line 722
self.assertAlmostEqual(nwo[0]["forces"][0][3], -0.001991)
# N2O4.nwout, line 1071
self.assertAlmostEqual(nwo_cosmo[0]["forces"][0][4], 0.011948)
# There should be four DFT gradients.
self.assertEqual(len(nwo_cosmo[0]["forces"]), 4)
ie = (nwo[4]["energies"][-1] - nwo[2]["energies"][-1])
ea = (nwo[2]["energies"][-1] - nwo[3]["energies"][-1])
self.assertAlmostEqual(0.7575358648355177, ie)
self.assertAlmostEqual(-14.997877958701338, ea)
self.assertEqual(nwo[4]["basis_set"]["C"]["description"],
"6-311++G**")
nwo = NwOutput(os.path.join(test_dir, "H4C3O3_1.nwout"))
self.assertTrue(nwo[-1]["has_error"])
self.assertEqual(nwo[-1]["errors"][0], "Bad convergence")
nwo = NwOutput(os.path.join(test_dir, "CH3CH2O.nwout"))
self.assertTrue(nwo[-1]["has_error"])
self.assertEqual(nwo[-1]["errors"][0], "Bad convergence")
nwo = NwOutput(os.path.join(test_dir, "C1N1Cl1_1.nwout"))
self.assertTrue(nwo[-1]["has_error"])
self.assertEqual(nwo[-1]["errors"][0], "autoz error")
nwo = NwOutput(os.path.join(test_dir,
"anthrachinon_wfs_16_ethyl.nwout"))
self.assertTrue(nwo[-1]["has_error"])
self.assertEqual(nwo[-1]["errors"][0],
"Geometry optimization failed")
nwo = NwOutput(os.path.join(test_dir,
"anthrachinon_wfs_15_carboxyl.nwout"))
self.assertEqual(nwo[1]['frequencies'][0][0], -70.47)
self.assertEqual(len(nwo[1]['frequencies'][0][1]), 27)
self.assertEqual(nwo[1]['frequencies'][-1][0], 3696.74)
self.assertEqual(nwo[1]['frequencies'][-1][1][-1],
(0.20498, -0.94542, -0.00073))
self.assertEqual(nwo[1]["normal_frequencies"][1][0], -70.72)
self.assertEqual(nwo[1]["normal_frequencies"][3][0], -61.92)
self.assertEqual(nwo[1]["normal_frequencies"][1][1][-1],
(0.00056, 0.00042, 0.06781))
def test_parse_tddft(self):
nwo = NwOutput(os.path.join(test_dir, "phen_tddft.log"))
roots = nwo.parse_tddft()
self.assertEqual(len(roots["singlet"]), 20)
self.assertAlmostEqual(roots["singlet"][0]["energy"], 3.9291)
self.assertAlmostEqual(roots["singlet"][0]["osc_strength"], 0.0)
self.assertAlmostEqual(roots["singlet"][1]["osc_strength"], 0.00177)
def test_get_excitation_spectrum(self):
nwo = NwOutput(os.path.join(test_dir, "phen_tddft.log"))
spectrum = nwo.get_excitation_spectrum()
self.assertEqual(len(spectrum.x), 2000)
self.assertAlmostEqual(spectrum.x[0], 1.9291)
self.assertAlmostEqual(spectrum.y[0], 0.0)
self.assertAlmostEqual(spectrum.y[1000], 0.0007423569947114812)
if __name__ == "__main__":
unittest.main()
|
tschaume/pymatgen
|
pymatgen/io/tests/test_nwchem.py
|
Python
|
mit
| 13,627
|
[
"NWChem",
"pymatgen"
] |
6909cb3d669fa5ecc15fee84e1bbdf8f692418d763a339c3966aff2be04fcbdb
|
# -*- coding: latin-1 -*-
#
# Copyright (c) 2015-2016 Paul Bomke
# Distributed under the GNU GPL v2.
#
# This file is part of monkeyprint.
#
# monkeyprint is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# monkeyprint is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You have received a copy of the GNU General Public License
# along with monkeyprint. If not, see <http://www.gnu.org/licenses/>.
import vtk
import monkeyprintModelHandling
import monkeyprintGuiHelper
from PyQt4 import QtCore, QtGui
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
class renderView(QtGui.QFrame):
def __init__(self, settings, console=None, backgroundColour = (0.329412, 0.34902, 0.427451)):
# Call base class initializer.
super(renderView, self).__init__()
# Internalise objects.
self.settings = settings
self.console = console
# Create the widget.
# Create layout box.
self.box = QtGui.QVBoxLayout()
# Create vtk widget and pass main frame.
self.vtkWidget = QVTKRenderWindowInteractor(self)
# Pack vtk widget in box.
self.box.addWidget(self.vtkWidget)
# Get render window.
self.renderWindow = self.vtkWidget.GetRenderWindow()
# Create renderer.
self.renderer = vtk.vtkRenderer()
self.renderWindow.AddRenderer(self.renderer)
# Create other elements.
# Create camera and set view options..
self.camera =vtk.vtkCamera();
self.camera.SetViewUp(0,0,1)
self.camera.SetPosition(self.settings['buildSizeX'].value/2+200, self.settings['buildSizeY'].value/2-300,300);
self.camera.SetFocalPoint(self.settings['buildSizeX'].value/2, self.settings['buildSizeY'].value/2, self.settings['buildSizeZ'].value/2);
self.camera.SetClippingRange(0.0001, 10000)
self.renderer.SetActiveCamera(self.camera);
# Background color.
self.renderer.SetBackground( backgroundColour )
# Get interactor.
self.renderWindowInteractor = self.renderWindow.GetInteractor()
self.renderWindowInteractor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera()) # This is set as default.
# Pack the frame into the widget.
self.setLayout(self.box)
# Create options box.
self.boxOptions = QtGui.QHBoxLayout()
self.box.addLayout(self.boxOptions)
# Make reset button.
self.buttonReset = monkeyprintGuiHelper.button("Reset view", self.callbackResetButton)
self.boxOptions.addWidget(self.buttonReset)
# Make axes view toggle.
self.buttonAxes = monkeyprintGuiHelper.checkbox("Show axes", self.callbackCheckButtonAxes, True)
self.boxOptions.addWidget(self.buttonAxes)
self.boxOptions.addStretch(1)
# Add text and axes info.
self.createAnnotations()
# Make build volume box.
self.buildVolume = monkeyprintModelHandling.buildVolume([self.settings['buildSizeX'].value, self.settings['buildSizeY'].value, self.settings['buildSizeZ'].value])
self.addActor(self.buildVolume.getActor())
'''
# Make colour checkbox.
self.checkButtonColour = gtk.CheckButton(label="Show colours (coming soon)")
self.checkButtonColour.connect("toggled", self.callbackCheckButtonColour)
self.optionsBox.pack_start(self.checkButtonColour)
self.checkButtonColour.show()
# Make axes checkbox.
self.checkButtonAxes = gtk.CheckButton(label="Show axes")
self.checkButtonAxes.connect("toggled", self.callbackCheckButtonAxes)
self.optionsBox.pack_start(self.checkButtonAxes)
self.checkButtonAxes.set_active(True)
self.checkButtonAxes.show()
'''
def callbackResetButton(self):
self.reset()
def callbackCheckButtonColour(self, widget, data=None):
pass
def callbackCheckButtonAxes(self, visibility):
self.axesActor.SetVisibility(visibility)
self.render()
def createAnnotations(self):
# Add axes.
self.axesActor = vtk.vtkAxesActor()
self.axesActor.SetTotalLength(30,30,30)
self.axesActor.SetShaftTypeToCylinder()
self.axesActor.SetCylinderRadius(.05)
self.axesActor.GetXAxisCaptionActor2D().GetTextActor().SetTextScaleModeToNone()
self.axesActor.GetXAxisCaptionActor2D().GetCaptionTextProperty().SetFontSize(12)
self.axesActor.GetXAxisCaptionActor2D().GetCaptionTextProperty().ItalicOff()
self.axesActor.GetXAxisCaptionActor2D().GetCaptionTextProperty().BoldOff()
self.axesActor.GetXAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
self.axesActor.GetYAxisCaptionActor2D().GetTextActor().SetTextScaleModeToNone()
self.axesActor.GetYAxisCaptionActor2D().GetCaptionTextProperty().SetFontSize(12)
self.axesActor.GetYAxisCaptionActor2D().GetCaptionTextProperty().ItalicOff()
self.axesActor.GetYAxisCaptionActor2D().GetCaptionTextProperty().BoldOff()
self.axesActor.GetYAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
self.axesActor.GetZAxisCaptionActor2D().GetTextActor().SetTextScaleModeToNone()
self.axesActor.GetZAxisCaptionActor2D().GetCaptionTextProperty().SetFontSize(12)
self.axesActor.GetZAxisCaptionActor2D().GetCaptionTextProperty().ItalicOff()
self.axesActor.GetZAxisCaptionActor2D().GetCaptionTextProperty().BoldOff()
self.axesActor.GetZAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
self.addActor(self.axesActor)
# Add handling info.
self.infoText = vtk.vtkTextActor()
self.infoText.SetInput("Rotate: Left mouse button\nPan: Middle mouse button\nZoom: Right mouse button")
self.infoText.GetTextProperty()
self.infoText.GetTextProperty().SetFontFamilyToArial()
self.infoText.GetTextProperty().SetFontSize(11)
self.infoText.GetTextProperty().SetColor(.6,.6,.6)
self.infoText.SetDisplayPosition(20,30)
self.addActor(self.infoText)
# Reset the camera of the render window.
def reset(self):
if self.console:
self.console.message("View reset.")
self.camera.SetViewUp(0,0,1)
self.camera.SetPosition(self.settings['buildSizeX'].value/2+200, self.settings['buildSizeY'].value/2-300,300);
self.camera.SetFocalPoint(self.settings['buildSizeX'].value/2, self.settings['buildSizeY'].value/2, self.settings['buildSizeZ'].value/2);
self.camera.SetClippingRange(0.0001, 10000)
self.render()
# Add an actor to the render window.
def addActor(self,actor):
self.renderer.AddActor(actor)
def addActors(self,actors):
for actor in actors:
self.renderer.AddActor(actor)
# Remove an actor from the render view.
def removeActors(self, actors):
if type(actors) == tuple or type(actors) == list:
for actor in actors:
self.renderer.RemoveActor(actor)
else:
self.renderer.RemoveActor(actors)
# Refresh the render window.
def render(self):
self.renderWindowInteractor.Render()
# Override superclass destroy function.
def destroy(self):
# Destroy the render window.
self.renderWindow.Finalize()
self.renderWindowInteractor.TerminateApp()
del self.renderWindow, self.renderWindowInteractor
# Don't forget to destroy the frame itself!
Tkinter.Frame.destroy(self)
# IMPORTANT: call this AFTER the widget has been placed!
def initialize(self):
self.renderWindowInteractor.Initialize()
self.renderWindowInteractor.Start()
|
robotsinthesun/monkeyprint
|
monkeyprintModelViewer.py
|
Python
|
gpl-2.0
| 7,353
|
[
"VTK"
] |
08c5ecfd44f47ae5efbdf6ad9a263d0087753a71a6f739998ea5162a09217f98
|
from gammaspy.gammaData import peak, bg
import numpy as np
from six import iteritems
class FitModel(object):
"""!
@brief Combines background and peak models via Composition.
"""
def __init__(self, bg_order=1, n_peaks=1, peak_centers=[1000.]):
self.model_params = np.array([])
self.model_params_bounds = [[],[]]
self.model_bank = {}
self.build(bg_order, n_peaks, peak_centers)
def build(self, bg_order, n_peaks, peak_centers):
"""!
@brief Quickly build a multi-peak model with background
"""
bg_model = bg.LinModel() # todo add more bg model flexibility
self.add_model(bg_model)
for i in range(n_peaks):
name = "gauss_" + str(i)
self.add_model(peak.GaussModel(init_params=[1.e2, peak_centers[i], 1.0],
name=name))
def add_model(self, in_model):
"""!
@brief Add a sub-model to the total model.
Appends models sequentially
"""
self.model_bank[in_model.name] = {}
self.model_bank[in_model.name]["model"] = in_model
input_model_nparams = len(in_model._params)
current_nparams = len(self.model_params)
self.model_params = np.concatenate((self.model_params, in_model._params))
self.model_bank[in_model.name]["idxs"] = list(range(current_nparams, current_nparams + input_model_nparams))
# parameter bounds for optimization
self.model_params_bounds[0] += in_model.bounds[0]
self.model_params_bounds[1] += in_model.bounds[1]
print("Model Added: %s" % in_model.name)
def opti_eval(self, x, *params):
"""!
@brief Evaluates all sub models.
Automatically partitions *params list into sublists
for each submodel.
@param x np_array of abscissa to evaluate gauss model at
@param params Gaussian model parameter array (len=3)
"""
output = np.zeros(len(x))
for model_name, model in iteritems(self.model_bank):
output += model["model"].eval(np.array(params)[model["idxs"]], x)
return output
def set_params(self, params):
"""!
@biref Freeze internal model parameters.
"""
if len(params) == len(self.model_params):
self.model_params = params
else:
print("WARNING: invalid number of parameters specified")
def set_cov(self, cov):
self.model_params_cov = cov
def eval(self, x):
"""!
@biref Evaluate model.
"""
output = np.zeros(len(x))
for model_name, model in iteritems(self.model_bank):
output += model["model"].eval(np.array(self.model_params)[model["idxs"]], x)
return output
def net_area(self):
"""!
@brief Area with background subtracted.
"""
peak_area_list, net_area = [], 0.
for model_name, model in iteritems(self.model_bank):
if "gauss" in model_name:
area = model["model"].area(np.array(self.model_params)[model["idxs"]])
net_area += area
peak_area_list.append(area)
return net_area, peak_area_list
def bg_area(self, lbound=None, ubound=None):
"""!
@brief Estimates the number of background counts under the
peak. This is found by integrating the background model for
+/- 3sigma from the mean of the peak (~99.7% of the peak)
@return Area of background
"""
bg_areas = []
for model_name, model in iteritems(self.model_bank):
if "gauss" not in model_name:
avg_model_mean = np.array(self.peak_means())
avg_model_sd = np.array(self.peak_sigmas())
a_s = avg_model_mean - 3. * avg_model_sd
b_s = avg_model_mean + 3. * avg_model_sd
for a, b in zip(a_s, b_s):
if lbound is None or ubound is None:
bg_areas.append(model["model"].integral(a, b, self.model_params[model["idxs"]]))
else:
bg_areas.append(model["model"].integral(lbound, ubound, self.model_params[model["idxs"]]))
bg_areas = np.array(bg_areas)
return np.sum(bg_areas), bg_areas
def net_area_uncert(self, lbound, ubound, cov):
"""!
@brief Computes Jacobian of the area fn for uncertainty calcs.
@param lbound Float. Lower bound of ROI (for integral of bg model)
@param ubound Float. Upper bound of ROI (for integral of bg model)
@param covariance matrix
@return total area 1sigma uncert and list of individual submodel 1sigma uncerts
"""
assert(cov.shape[0] == len(self.model_params))
assert(cov.shape[1] == len(self.model_params))
area_jac_all = np.array([])
scaling_factor = 1.5
for model_name, model in iteritems(self.model_bank):
if "gauss" in model_name:
# jacobian of a gaussian peak area
area_jac = model["model"].area_jac(np.array(self.model_params)[model["idxs"]])
else:
# jacobian of area under the bg model
avg_model_mean = np.array(self.peak_means())
avg_model_sd = np.array(self.peak_sigmas())
a_s = avg_model_mean - 3. * avg_model_sd
b_s = avg_model_mean + 3. * avg_model_sd
sd_markers = np.concatenate((a_s, b_s))
a, b = np.min(sd_markers), np.max(sd_markers)
scaling_factor = 1. + (b - a) / ((ubound - lbound) - (b - a))
print("a: %f, b: %f" % (a, b))
print("peak/bg ratio: %f" % (scaling_factor - 1.))
assert(b > a)
area_jac = model["model"].int_jac(a, b, np.array(self.model_params)[model["idxs"]])
if len(area_jac.shape) == 2:
area_jac_all = np.concatenate((area_jac_all, area_jac[0]))
else:
area_jac_all = np.concatenate((area_jac_all, area_jac))
# std prop of uncetainty J * C * J.T
net_uncert = np.dot(area_jac_all, cov)
net_uncert = np.dot(net_uncert, area_jac_all)
peak_area_list = np.array(self.net_area()[1])
peak_area_ratio = peak_area_list / np.sum(peak_area_list)
peak_area_uncerts = net_uncert * (peak_area_ratio)
# return varience, not SD!
return net_uncert, peak_area_uncerts, scaling_factor
def peak_means(self):
"""!
@brief Mean of each subpeak
"""
peak_means = []
for model_name, model in iteritems(self.model_bank):
if "gauss" in model_name:
# mean of gaussian peak
peak_means.append(np.array(self.model_params)[model["idxs"]][1])
return peak_means
def peak_sigmas(self):
"""!
@brief Mean of each subpeak
"""
sigmas = []
for model_name, model in iteritems(self.model_bank):
if "gauss" in model_name:
# 1sd of gaussian peak
sigmas.append(np.abs(np.array(self.model_params)[model["idxs"]][2]))
return sigmas
def tot_area(self):
"""!
@brief Total area under the model.
"""
pass
def pprint_params(self):
"""!
@brief Return nicely formatted table of fitted parameters.
"""
pass
|
wgurecky/GammaSpy
|
gammaspy/gammaData/fitmodel.py
|
Python
|
gpl-3.0
| 7,497
|
[
"Gaussian"
] |
01e0f8cb2c8b735bad1520d9296e7ecf1af44cc777e710a96ed383f9ff46d3b8
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with utility classes and functions related
to data tables and text.
"""
import sys
import warnings
from psi4 import core
from psi4.driver import constants
class Table(object):
"""Class defining a flexible Table object for storing data."""
def __init__(self, rows=(), row_label_width=10, row_label_precision=4, cols=(), width=16, precision=10):
warnings.warn(
"Using `psi4.driver.p4util.Table` is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
self.row_label_width = row_label_width
self.row_label_precision = row_label_precision
self.width = width
self.precision = precision
self.rows = rows
if isinstance(cols, str):
self.cols = (cols, )
else:
self.cols = cols
self.labels = []
self.data = []
def format_label(self):
"""Function to pad the width of Table object labels."""
#str = lambda x: (('%%%d.%df' % (self.row_label_width, self.row_label_precision)) % x)
str = lambda x: (('%%%ds' % (self.row_label_width)) % x)
return " ".join(map(str, self.labels))
def format_values(self, values):
"""Function to pad the width of Table object data cells."""
str = lambda x: (('%%%d.%df' % (self.width, self.precision)) % x)
return " ".join(map(str, values))
def __getitem__(self, value):
self.labels.append(value)
return self
def __setitem__(self, name, value):
self.labels.append(name)
label = self.format_label()
self.labels = []
if isinstance(value, list):
self.data.append((label, value))
else:
self.data.append((label, [value]))
def save(self, file):
"""Function to save string of the Table object to *file*."""
import pickle
pickle_str = pickle.dumps(self)
fileobj = open(file, "w")
fileobj.write(str(self))
fileobj.close()
def __str__(self):
rowstr = lambda x: '%%%ds' % self.row_label_width % x
colstr = lambda x: '%%%ds' % self.width % x
lines = []
table_header = ""
if isinstance(self.rows, str):
table_header += "%%%ds" % self.row_label_width % self.rows
else:
table_header += " ".join(map(rowstr, self.rows))
table_header += " ".join(map(colstr, self.cols))
lines.append(table_header)
for datarow in self.data:
#print datarow
row_data = datarow[0]
row_data += self.format_values(datarow[1])
lines.append(row_data)
return "\n".join(lines) + "\n"
def copy(self):
"""Function to return a copy of the Table object."""
import copy
return copy.deepcopy(self)
def absolute_to_relative(self, Factor=constants.hartree2kcalmol):
"""Function to shift the data of each column of the Table object
such that the lowest value is zero. A scaling factor of *Factor* is applied.
"""
import copy
if len(self.data) == 0:
return
current_min = list(copy.deepcopy(self.data[0][1]))
for datarow in self.data:
for col in range(0, len(datarow[1])):
if current_min[col] > datarow[1][col]:
current_min[col] = datarow[1][col]
for datarow in self.data:
for col in range(0, len(datarow[1])):
#print datarow[1][col]
datarow[1][col] = (datarow[1][col] - current_min[col]) * Factor
def scale(self, Factor=constants.hartree2kcalmol):
"""Function to apply a scaling factor *Factor* to the
data of the Table object.
"""
if len(self.data) == 0:
return
for datarow in self.data:
for col in range(0, len(datarow[1])):
#print datarow[1][col]
datarow[1][col] = datarow[1][col] * Factor
def banner(text, type=1, width=35, strNotOutfile=False):
"""Function to print *text* to output file in a banner of
minimum width *width* and minimum three-line height for
*type* = 1 or one-line height for *type* = 2. If *strNotOutfile*
is True, function returns string rather than printing it
to output file.
"""
lines = text.split('\n')
max_length = 0
for line in lines:
max_length = max(len(line), max_length)
max_length = max(width, max_length)
null = ''
if type == 1:
banner = ' //' + null.center(max_length, '>') + '//\n'
for line in lines:
banner += ' //' + line.center(max_length) + '//\n'
banner += ' //' + null.center(max_length, '<') + '//\n'
if type == 2:
banner = ''
for line in lines:
banner += (' ' + line + ' ').center(max_length, '=')
if strNotOutfile:
return banner
else:
core.print_out(banner)
def print_stdout(stuff):
"""Function to print *stuff* to standard output stream."""
warnings.warn(
"Using `psi4.driver.p4util.print_stdout` instead of `print` is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
print(stuff, file=sys.stdout)
def print_stderr(stuff):
"""Function to print *stuff* to standard error stream."""
warnings.warn(
"Using `psi4.driver.p4util.print_stderr` instead of `print(..., file=sys.stderr)` is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
print(stuff, file=sys.stderr)
def levenshtein(seq1, seq2):
"""Compute the Levenshtein distance between two strings."""
oneago = None
thisrow = list(range(1, len(seq2) + 1)) + [0]
for x in range(len(seq1)):
twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]
for y in range(len(seq2)):
delcost = oneago[y] + 1
addcost = thisrow[y - 1] + 1
subcost = oneago[y - 1] + (seq1[x] != seq2[y])
thisrow[y] = min(delcost, addcost, subcost)
return thisrow[len(seq2) - 1]
def find_approximate_string_matches(seq1, options, max_distance):
"""Find list of approximate (within `max_distance`) matches to string `seq1` among `options`."""
return [seq2 for seq2 in options if (levenshtein(seq1, seq2) <= max_distance)]
|
CDSherrill/psi4
|
psi4/driver/p4util/text.py
|
Python
|
lgpl-3.0
| 7,357
|
[
"Psi4"
] |
7f132d524a34d73aa3906bb57e8c4d610365bac8d080e2aab926eb082677d042
|
from base import *
from globs import *
from types_builtin import app_map, subst
# Sure could use graphs here!
VatContents = DT('VatContents', ('copiedExtrinsics', ['*Extrinsic']),
('replacements', {'a': 'a'}),
('transmute', {'a': 'a'}))
VAT = new_env('VAT', VatContents)
Original = new_extrinsic('Original', 'a')
def set_orig(clone, orig):
# Don't need to recurse since there's only one level of clones
if has_extrinsic(Original, orig):
orig = extrinsic(Original, orig)
add_extrinsic(Original, clone, orig)
def orig_loc(obj):
# Ugh, I don't like the conditional check...
if has_extrinsic(Original, obj):
obj = extrinsic(Original, obj)
return extrinsic(Location, obj)
def original(extr, obj):
return extrinsic(extr, extrinsic(Original, obj))
def original_has(extr, obj):
if not has_extrinsic(Original, obj):
return False
return has_extrinsic(extr, extrinsic(Original, obj))
def in_vat(func):
return in_env(VAT, VatContents([], {}, False), func)
# Clone structured data, recording information about its clone in the vat
def clone(src, extrinsics):
env(VAT).copiedExtrinsics = extrinsics
return clone_structured(src)
def clone_structured(src, apps=None):
ctor = instance_ctor(src)
fs = []
for field in ctor.fields:
fnm = extrinsic(Name, field)
ft = field.type
if apps:
ft = subst(apps, ft)
fs.append(clone_by_type(getattr(src, fnm), ft))
ctor_cls = extrinsic(TrueRepresentation, ctor)
vat = env(VAT)
if vat.transmute:
destData = vat.transmute.get(extrinsic(FormSpec, SUPERS[ctor_cls]))
if destData is not None:
ctor = transmuted_ctor(src, destData)
ctor_cls = extrinsic(TrueRepresentation, ctor)
o = ctor_cls(*fs)
for extr in vat.copiedExtrinsics:
if has_extrinsic(extr, src):
add_extrinsic(extr, o, extrinsic(extr, src))
if in_extrinsic_scope(Original):
set_orig(o, src)
vat.replacements[src] = o
return o
def clone_by_type(src, t):
cls = t.__class__
if cls is TVar:
assert isinstance(Structured, src), \
"Can't clone unstructured %r without type info" % (src,)
return clone_structured(src)
elif cls in (TPrim, TFunc, TWeak):
return src
elif cls is TTuple:
assert isinstance(src, tuple)
return tuple(clone_by_type(v, tt) for v, tt in ezip(src, t.tupleTypes))
elif cls is TData:
assert isinstance(src, extrinsic(TrueRepresentation, t.data)), \
"Expected %s, got: %r" % (t.data, obj)
apps = t.appTypes and app_map(t.data, t.appTypes)
return clone_structured(src, apps)
elif cls is TArray:
assert isinstance(src, list)
return [clone_by_type(s, t.elemType) for s in src]
else:
assert False, "Bad type to clone: %r" % (t,)
def instance_ctor(obj):
ctors = t_DT(type(obj)).data.ctors
return ctors[obj._ctor_ix if len(ctors) > 1 else 0]
def transmuted_ctor(obj, destData):
ctors = destData.ctors
ix = obj._ctor_ix if len(ctors) > 1 else 0
assert ix < len(ctors), "Don't know how to transmute %s!" % (obj,)
return ctors[ix]
# Update an object's weak references to point at new clones from this vat
def rewrite(obj):
return rewrite_by_type(obj, t_DT(type(obj)))
def rewrite_by_type(obj, t):
cls = t.__class__
if cls is TVar:
assert isinstance(Structured, obj), \
"Can't rewrite unstructured %r without type info" % (obj,)
rewrite_by_type(obj, t_DT(type(obj)))
elif cls in (TPrim, TFunc):
pass
elif cls is TTuple:
assert isinstance(obj, tuple)
for v, tt in ezip(obj, t.tupleTypes):
assert not isinstance(tt, TWeak), "TODO"
rewrite_by_type(v, tt)
elif cls is TData:
assert isinstance(obj, extrinsic(TrueRepresentation, t.data)), \
"Expected %s, found %s %s" % (t.data, type(obj), obj)
apps = t.appTypes and app_map(t.data, t.appTypes)
ctor = instance_ctor(obj)
repls = env(VAT).replacements
for field in ctor.fields:
fnm = extrinsic(Name, field)
ft = field.type
if apps:
ft = subst(apps, ft)
val = getattr(obj, fnm)
if isinstance(ft, TWeak):
if val in repls:
setattr(obj, fnm, repls[val])
else:
rewrite_by_type(val, ft)
elif cls is TArray:
et = t.elemType
assert isinstance(obj, list)
if isinstance(et, TWeak):
repls = env(VAT).replacements
for i, w in enumerate(obj):
if w in repls:
obj[i] = repls[w]
else:
for s in obj:
rewrite_by_type(s, et)
elif cls is TWeak:
assert False, "Shouldn't get here (should be rewritten in other cases)"
else:
assert False, "Bad type to rewrite: %r" % (t,)
# Clone a structured object, changing its type in the process
def transmute(obj, mapping, extrinsics):
vat = env(VAT)
vat.copiedExtrinsics = extrinsics
vat.transmute = dict((src.data, dest.data)
for src, dest in mapping.iteritems())
obj = clone_structured(obj)
vat.transmute = False
return obj
# AST visitor&mutator (not really vat)
# Env+class is redundant; could just put this all in the class.
# But this is plumbing anyway
VISIT = new_env('VISIT', None)
def visit(visitor, obj, t):
inst = visitor()
inst.obj = inst.t = inst.fts = None
if isinstance(t, basestring):
t = parse_type(t)
in_env(VISIT, inst, lambda: visit_by_type(obj, t))
class Visitor(object):
def visit(self, *path):
obj, t = self.obj, self.t
for field in path:
if isinstance(field, int):
assert isinstance(t, TArray), "Can't index %s" % (t,)
obj = obj[field]
t = t.elemType
continue
assert field in self.fts, \
"%s is not a field {%s}" % (field, ', '.join(self.fts))
t = self.fts[field]
# Catch some stupidity
if len(path) == 1:
assert t is not None, "Already visited this field!"
self.fts[field] = None
assert not isinstance(t, TWeak), \
"%s is weak and won't be visited" % (field,)
obj = getattr(obj, field)
return visit_by_type(obj, t, bool(path))
def visit_by_type(obj, t, customVisitors=True):
cls = t.__class__
if cls in (TVar, TPrim, TFunc, TWeak):
pass
elif cls is TTuple:
assert isinstance(obj, tuple)
for v, tt in ezip(obj, t.tupleTypes):
visit_by_type(v, tt)
elif cls is TData:
data = t.data
assert isinstance(obj, extrinsic(TrueRepresentation, data)), \
"Expected %s, got %s %s" % (data, type(obj), obj)
apps = t.appTypes and app_map(data, t.appTypes)
visitor = env(VISIT)
ctor = extrinsic(FormSpec, type(obj))
fts = dict((extrinsic(Name, f), subst(apps,f.type) if apps else f.type)
for f in ctor.fields)
if customVisitors:
custom = getattr(visitor, extrinsic(Name, ctor), None)
if custom is None:
custom = getattr(visitor, 't_'+extrinsic(Name, data), None)
if custom is not None:
# Scope field types for recursive visiting
old = visitor.obj, visitor.t, visitor.fts
visitor.obj, visitor.t, visitor.fts = obj, t, fts
custom(obj)
visitor.obj, visitor.t, visitor.fts = old
return
# Default to recursive visits
for field in ctor.fields:
fnm = extrinsic(Name, field)
ft = fts[fnm]
if not isinstance(ft, TWeak):
visit_by_type(getattr(obj, fnm), ft)
elif cls is TArray:
assert isinstance(obj, list)
if not isinstance(t.elemType, TWeak):
for o in obj:
visit_by_type(o, t.elemType)
else:
assert False, "Bad type to visit: %r" % (t,)
MUTATE = new_env('MUTATE', None)
def mutate(mutator, obj, t):
inst = mutator()
inst.obj = inst.t = inst.fts = None
if isinstance(t, basestring):
t = parse_type(t)
return in_env(MUTATE, inst, lambda: mutate_by_type(obj, t))
class Mutator(object):
def mutate(self, *path):
obj, t = self.obj, self.t
for field in path:
if isinstance(field, int):
assert isinstance(t, TArray), "Can't index %s" % (t,)
obj = obj[field]
t = t.elemType
continue
assert field in self.fts, \
"%s is not a field {%s}" % (field, ', '.join(self.fts))
t = self.fts[field]
# Catch some stupidity
if len(path) == 1:
assert t is not None, "Already mutated this field!"
self.fts[field] = None
assert not isinstance(t, TWeak), \
"%s is weak and won't be mutated" % (field,)
obj = getattr(obj, field)
return mutate_by_type(obj, t, bool(path))
def mutate_by_type(obj, t, customMutators=True):
cls = t.__class__
if cls in (TVar, TPrim, TFunc, TWeak):
return obj
elif cls is TTuple:
assert isinstance(obj, tuple)
return tuple(rewrite_by_type(v, tt)
for v, tt in ezip(obj, t.tupleTypes))
elif cls is TData:
data = t.data
assert isinstance(obj, extrinsic(TrueRepresentation, data)), \
"Expected %s, got %r: %r" % (data, type(obj), obj)
apps = t.appTypes and app_map(data, t.appTypes)
mutator = env(MUTATE)
ctor = extrinsic(FormSpec, type(obj))
fts = dict((extrinsic(Name, f), subst(apps,f.type) if apps else f.type)
for f in ctor.fields)
if customMutators:
custom = getattr(mutator, extrinsic(Name, ctor), None)
if custom is None:
custom = getattr(mutator, 't_'+extrinsic(Name, data), None)
if custom is not None:
# Scope field types for recursive mutatino
old = mutator.obj, mutator.t, mutator.fts
mutator.obj, mutator.t, mutator.fts = obj, t, fts
obj = custom(obj)
mutator.obj, mutator.t, mutator.fts = old
return obj
# Default to recursive mutation
for field in ctor.fields:
fnm = extrinsic(Name, field)
ft = fts[fnm]
if not isinstance(ft, TWeak):
val = getattr(obj, fnm)
setattr(obj, fnm, mutate_by_type(val, ft))
return obj
elif cls is TArray:
et = t.elemType
assert isinstance(obj, list)
if isinstance(et, TWeak):
return obj
return [mutate_by_type(o, et) for o in obj]
else:
assert False, "Bad type to mutate: %r" % (t,)
# COMBINATORIAL EXPLOSIONS
REIFIED_MONOTYPES = {}
def cement_type(t):
key = type_key(t)
if key in CONCRETE_TYPES:
return CONCRETE_TYPES[key]
else:
assert False
def type_key(t):
m = match(t)
if m('TPrim(prim)'):
return prim_key(m.prim)
elif m('TData(dt, ts)'):
nm = extrinsic(Name, dt)
return '%s(%s)' % (nm, ', '.join(type_key(a) for a in m.ts))
elif m('TArray(t, _)'): # kind?
return '[%s]' % (type_key(m.t),)
elif m('TWeak(t)'):
return '*%s' % (type_key(m.t),)
elif m('TTuple(ts)'):
return 't(%s)' % (', '.join(type_key(t) for t in m.ts),)
elif m('TVar(tv)'):
return 'ax%x' % (id(tv),)
elif m('TFunc(ps, r, _)'):
ts = map(m.ps, type_key)
ts.append(match(m.r, ('Ret(t)', type_key),
('Void()', lambda: 'void'),
('Bottom()', lambda: 'bottom')))
return 'f(%s)' % (' -> '.join(ts),)
else:
assert False
def prim_key(p):
m = match(p)
if m('PInt()'): return 'int'
elif m('PFloat()'): return 'float'
elif m('PStr()'): return 'str'
elif m('PChar()'): return 'char'
elif m('PBool()'): return 'bool'
else: assert False
# vi: set sw=4 ts=4 sts=4 tw=79 ai et nocindent:
|
pshc/archipelago
|
vat.py
|
Python
|
mit
| 12,572
|
[
"VisIt"
] |
ddb76256476173e39acad81c1ee8a1cbd83f229f1e4c19872b582d0f327f7d61
|
#!/usr/bin/env python3
##########################################################################
#
# Copyright 2011 Jose Fonseca
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
'''Run two retrace instances in parallel, comparing generated snapshots.
'''
import math
import optparse
import os.path
import subprocess
import platform
import sys
from PIL import Image
from snapdiff import Comparer
from highlight import AutoHighlighter
import jsondiff
# Null file, to use when we're not interested in subprocesses output
NULL = open(os.path.devnull, 'wb')
class RetraceRun:
def __init__(self, process):
self.process = process
def nextSnapshot(self):
image, comment = read_pnm(self.process.stdout)
if image is None:
return None, None
callNo = int(comment.strip())
return image, callNo
def terminate(self):
try:
self.process.terminate()
except OSError:
# Avoid http://bugs.python.org/issue14252
pass
class Retracer:
def __init__(self, retraceExe, args, env=None):
self.retraceExe = retraceExe
self.args = args
self.env = env
def _retrace(self, args, stdout=subprocess.PIPE):
cmd = [
self.retraceExe,
] + args + self.args
if self.env:
for name, value in self.env.items():
sys.stderr.write('%s=%s ' % (name, value))
sys.stderr.write(' '.join(cmd) + '\n')
try:
return subprocess.Popen(cmd, env=self.env, stdout=stdout, stderr=NULL)
except OSError as ex:
sys.stderr.write('error: failed to execute %s: %s\n' % (cmd[0], ex.strerror))
sys.exit(1)
def retrace(self, args):
p = self._retrace([])
p.wait()
return p.returncode
def snapshot(self, call_nos):
process = self._retrace([
'-s', '-',
'-S', call_nos,
])
return RetraceRun(process)
def dump_state(self, call_no):
'''Get the state dump at the specified call no.'''
p = self._retrace([
'-D', str(call_no),
])
state = jsondiff.load(p.stdout)
p.wait()
return state.get('parameters', {})
def diff_state(self, ref_call_no, src_call_no, stream):
'''Compare the state between two calls.'''
ref_state = self.dump_state(ref_call_no)
src_state = self.dump_state(src_call_no)
stream.flush()
differ = jsondiff.Differ(stream)
differ.visit(ref_state, src_state)
stream.write('\n')
def read_pnm(stream):
'''Read a PNM from the stream, and return the image object, and the comment.'''
magic = stream.readline()
if not magic:
return None, None
magic = magic.rstrip()
if magic == b'P5':
channels = 1
bytesPerChannel = 1
mode = 'L'
elif magic == b'P6':
channels = 3
bytesPerChannel = 1
mode = 'RGB'
elif magic == b'Pf':
channels = 1
bytesPerChannel = 4
mode = 'R'
elif magic == b'PF':
channels = 3
bytesPerChannel = 4
mode = 'RGB'
elif magic == b'PX':
channels = 4
bytesPerChannel = 4
mode = 'RGB'
else:
raise Exception('Unsupported magic %r' % magic)
comment = b''
line = stream.readline()
while line.startswith(b'#'):
comment += line[1:]
line = stream.readline()
width, height = list(map(int, line.strip().split()))
maximum = int(stream.readline().strip())
if bytesPerChannel == 1:
assert maximum == 255
else:
assert maximum == 1
data = stream.read(height * width * channels * bytesPerChannel)
if bytesPerChannel == 4:
# Image magic only supports single channel floating point images, so
# represent the image as numpy arrays
import numpy
pixels = numpy.fromstring(data, dtype=numpy.float32)
pixels.resize((height, width, channels))
return pixels, comment
image = Image.frombuffer(mode, (width, height), data, 'raw', mode, 0, 1)
return image, comment
def dumpNumpyImage(output, pixels, filename):
height, width, channels = pixels.shape
import numpy
pixels = (pixels*255).clip(0, 255).astype('uint8')
if 0:
# XXX: Doesn't work somehow
im = Image.fromarray(pixels)
else:
# http://code.activestate.com/recipes/577591-conversion-of-pil-image-and-numpy-array/
pixels = pixels.reshape(height*width, channels)
if channels == 4:
mode = 'RGBA'
else:
if channels < 3:
pixels = numpy.c_[arr, 255*numpy.ones((heigth * width, 3 - channels), numpy.uint8)]
assert channels == 3
mode = 'RGB'
im = Image.frombuffer(mode, (width, height), pixels.tostring(), 'raw', mode, 0, 1)
im.save(filename)
if 0:
# Dump to stdout
for y in range(height):
output.write(' ')
for x in range(width):
for c in range(channels):
output.write('%0.9g,' % pixels[y, x, c])
output.write(' ')
output.write('\n')
def parse_env(optparser, entries):
'''Translate a list of NAME=VALUE entries into an environment dictionary.'''
if not entries:
return None
env = os.environ.copy()
for entry in entries:
try:
name, var = entry.split('=', 1)
except Exception:
optparser.error('invalid environment entry %r' % entry)
env[name] = var
return env
def main():
'''Main program.
'''
global options
# Parse command line options
optparser = optparse.OptionParser(
usage='\n\t%prog [options] -- [glretrace options] <trace>',
version='%%prog')
optparser.add_option(
'-r', '--retrace', metavar='PROGRAM',
type='string', dest='retrace', default='glretrace',
help='retrace command [default: %default]')
optparser.add_option(
'--ref-driver', metavar='DRIVER',
type='string', dest='ref_driver', default=None,
help='force reference driver')
optparser.add_option(
'--src-driver', metavar='DRIVER',
type='string', dest='src_driver', default=None,
help='force source driver')
optparser.add_option(
'--ref-arg', metavar='OPTION',
type='string', action='append', dest='ref_args', default=[],
help='pass argument to reference retrace')
optparser.add_option(
'--src-arg', metavar='OPTION',
type='string', action='append', dest='src_args', default=[],
help='pass argument to source retrace')
optparser.add_option(
'--ref-env', metavar='NAME=VALUE',
type='string', action='append', dest='ref_env', default=[],
help='add variable to reference environment')
optparser.add_option(
'--src-env', metavar='NAME=VALUE',
type='string', action='append', dest='src_env', default=[],
help='add variable to source environment')
optparser.add_option(
'--diff-prefix', metavar='PATH',
type='string', dest='diff_prefix', default='.',
help='prefix for the difference images')
optparser.add_option(
'-t', '--threshold', metavar='BITS',
type="float", dest="threshold", default=12.0,
help="threshold precision [default: %default]")
optparser.add_option(
'-S', '--snapshot-frequency', metavar='CALLSET',
type="string", dest="snapshot_frequency", default='draw',
help="calls to compare [default: %default]")
optparser.add_option(
'--diff-state',
action='store_true', dest='diff_state', default=False,
help='diff state between failing calls')
optparser.add_option(
'-o', '--output', metavar='FILE',
type="string", dest="output",
help="output file [default: stdout]")
(options, args) = optparser.parse_args(sys.argv[1:])
ref_env = parse_env(optparser, options.ref_env)
src_env = parse_env(optparser, options.src_env)
if not args:
optparser.error("incorrect number of arguments")
if options.ref_driver:
options.ref_args.insert(0, '--driver=' + options.ref_driver)
if options.src_driver:
options.src_args.insert(0, '--driver=' + options.src_driver)
refRetracer = Retracer(options.retrace, options.ref_args + args, ref_env)
srcRetracer = Retracer(options.retrace, options.src_args + args, src_env)
if options.output:
output = open(options.output, 'wt')
else:
output = sys.stdout
highligher = AutoHighlighter(output)
highligher.write('call\tprecision\n')
last_bad = -1
last_good = 0
refRun = refRetracer.snapshot(options.snapshot_frequency)
try:
srcRun = srcRetracer.snapshot(options.snapshot_frequency)
try:
while True:
# Get the reference image
refImage, refCallNo = refRun.nextSnapshot()
if refImage is None:
break
# Get the source image
srcImage, srcCallNo = srcRun.nextSnapshot()
if srcImage is None:
break
assert refCallNo == srcCallNo
callNo = refCallNo
# Compare the two images
if isinstance(refImage, Image.Image) and isinstance(srcImage, Image.Image):
# Using PIL
numpyImages = False
comparer = Comparer(refImage, srcImage)
precision = comparer.precision()
else:
# Using numpy (for floating point images)
# TODO: drop PIL when numpy path becomes general enough
import numpy
assert not isinstance(refImage, Image.Image)
assert not isinstance(srcImage, Image.Image)
numpyImages = True
assert refImage.shape == srcImage.shape
diffImage = numpy.square(srcImage - refImage)
height, width, channels = diffImage.shape
square_error = numpy.sum(diffImage)
square_error += numpy.finfo(numpy.float32).eps
rel_error = square_error / float(height*width*channels)
bits = -math.log(rel_error)/math.log(2.0)
precision = bits
mismatch = precision < options.threshold
if mismatch:
highligher.color(highligher.red)
highligher.bold()
highligher.write('%u\t%f\n' % (callNo, precision))
if mismatch:
highligher.normal()
if mismatch:
if options.diff_prefix:
prefix = os.path.join(options.diff_prefix, '%010u' % callNo)
prefix_dir = os.path.dirname(prefix)
if not os.path.isdir(prefix_dir):
os.makedirs(prefix_dir)
if numpyImages:
dumpNumpyImage(output, refImage, prefix + '.ref.png')
dumpNumpyImage(output, srcImage, prefix + '.src.png')
else:
refImage.save(prefix + '.ref.png')
srcImage.save(prefix + '.src.png')
comparer.write_diff(prefix + '.diff.png')
if last_bad < last_good and options.diff_state:
srcRetracer.diff_state(last_good, callNo, output)
last_bad = callNo
else:
last_good = callNo
highligher.flush()
finally:
srcRun.terminate()
finally:
refRun.terminate()
if __name__ == '__main__':
main()
|
apitrace/apitrace
|
scripts/retracediff.py
|
Python
|
mit
| 13,177
|
[
"VisIt"
] |
0e8760791362b1f5e75eb8df4759c3772e0280281b71dbb52230c484388a1271
|
# encoding: utf-8
"""
Enable pygtk to be used interacive by setting PyOS_InputHook.
Authors: Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import gtk
import gobject
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def _main_quit(*args, **kwargs):
gtk.main_quit()
return False
def inputhook_gtk():
gobject.io_add_watch(sys.stdin, gobject.IO_IN, _main_quit)
gtk.main()
return 0
|
mattvonrocketstein/smash
|
smashlib/ipy3x/lib/inputhookgtk.py
|
Python
|
mit
| 1,023
|
[
"Brian"
] |
c37248861fdddac5cf7a0139f11095771f9a31385bb766ac484950897dbdfef7
|
from DIRAC import S_OK
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.AccountingSystem.private.Plotters.BaseReporter import BaseReporter
class DataOperationPlotter(BaseReporter):
_typeName = "DataOperation"
_typeKeyFields = [dF[0] for dF in DataOperation().definitionKeyFields]
def _translateGrouping(self, grouping):
if grouping == "Channel":
return ("%s, %s", ['Source', 'Destination'], "CONCAT( %s, ' -> ', %s )")
else:
return ("%s", [grouping])
_reportSuceededTransfersName = "Successful transfers"
def _reportSuceededTransfers(self, reportRequest):
return self.__reportTransfers(reportRequest, 'Suceeded', ('Failed', 0))
_reportFailedTransfersName = "Failed transfers"
def _reportFailedTransfers(self, reportRequest):
return self.__reportTransfers(reportRequest, 'Failed', ('Suceeded', 1))
def __reportTransfers(self, reportRequest, titleType, togetherFieldsToPlot):
selectFields = (
self._getSelectStringForGrouping(
reportRequest['groupingFields']) + ", %s, %s, SUM(%s), SUM(%s)-SUM(%s)",
reportRequest['groupingFields'][1] + [
'startTime',
'bucketLength',
'TransferOK',
'TransferTotal',
'TransferOK',
])
retVal = self._getTimedData(reportRequest['startTime'],
reportRequest['endTime'],
selectFields,
reportRequest['condDict'],
reportRequest['groupingFields'],
{})
if not retVal['OK']:
return retVal
dataDict, granularity = retVal['Value']
strippedData = self.stripDataField(dataDict, togetherFieldsToPlot[1])
if strippedData:
dataDict[togetherFieldsToPlot[0]] = strippedData[0]
dataDict, maxValue = self._divideByFactor(dataDict, granularity)
dataDict = self._fillWithZero(granularity, reportRequest['startTime'], reportRequest['endTime'], dataDict)
baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableRateUnit(
dataDict, self._getAccumulationMaxValue(dataDict), "files")
return S_OK({'data': baseDataDict, 'graphDataDict': graphDataDict,
'granularity': granularity, 'unit': unitName})
def _plotSuceededTransfers(self, reportRequest, plotInfo, filename):
return self.__plotTransfers(reportRequest, plotInfo, filename, 'Suceeded', ('Failed', 0))
def _plotFailedTransfers(self, reportRequest, plotInfo, filename):
return self.__plotTransfers(reportRequest, plotInfo, filename, 'Failed', ('Suceeded', 1))
def __plotTransfers(self, reportRequest, plotInfo, filename, titleType, togetherFieldsToPlot):
metadata = {'title': '%s Transfers by %s' % (titleType, reportRequest['grouping']),
'ylabel': plotInfo['unit'],
'starttime': reportRequest['startTime'],
'endtime': reportRequest['endTime'],
'span': plotInfo['granularity']}
return self._generateTimedStackedBarPlot(filename, plotInfo['graphDataDict'], metadata)
_reportQualityName = "Efficiency by protocol"
def _reportQuality(self, reportRequest):
selectFields = (self._getSelectStringForGrouping(reportRequest['groupingFields']) + ", %s, %s, SUM(%s), SUM(%s)",
reportRequest['groupingFields'][1] + ['startTime', 'bucketLength',
'TransferOK', 'TransferTotal'
]
)
retVal = self._getTimedData(reportRequest['startTime'],
reportRequest['endTime'],
selectFields,
reportRequest['condDict'],
reportRequest['groupingFields'],
{'checkNone': True,
'convertToGranularity': 'sum',
'calculateProportionalGauges': False,
'consolidationFunction': self._efficiencyConsolidation})
if not retVal['OK']:
return retVal
dataDict, granularity = retVal['Value']
self.stripDataField(dataDict, 0)
if len(dataDict) > 1:
# Get the total for the plot
selectFields = ("'Total', %s, %s, SUM(%s),SUM(%s)",
['startTime', 'bucketLength',
'TransferOK', 'TransferTotal'
]
)
retVal = self._getTimedData(reportRequest['startTime'],
reportRequest['endTime'],
selectFields,
reportRequest['condDict'],
reportRequest['groupingFields'],
{'checkNone': True,
'convertToGranularity': 'sum',
'calculateProportionalGauges': False,
'consolidationFunction': self._efficiencyConsolidation})
if not retVal['OK']:
return retVal
totalDict = retVal['Value'][0]
self.stripDataField(totalDict, 0)
for key in totalDict:
dataDict[key] = totalDict[key]
return S_OK({'data': dataDict, 'granularity': granularity})
def _plotQuality(self, reportRequest, plotInfo, filename):
metadata = {'title': 'Transfer quality by %s' % reportRequest['grouping'],
'starttime': reportRequest['startTime'],
'endtime': reportRequest['endTime'],
'span': plotInfo['granularity']}
return self._generateQualityPlot(filename, plotInfo['data'], metadata)
_reportTransferedDataName = "Cumulative transferred data"
def _reportTransferedData(self, reportRequest):
selectFields = (self._getSelectStringForGrouping(reportRequest['groupingFields']) + ", %s, %s, SUM(%s)",
reportRequest['groupingFields'][1] + ['startTime', 'bucketLength',
'TransferSize'
]
)
retVal = self._getTimedData(reportRequest['startTime'],
reportRequest['endTime'],
selectFields,
reportRequest['condDict'],
reportRequest['groupingFields'],
{})
if not retVal['OK']:
return retVal
dataDict, granularity = retVal['Value']
self.stripDataField(dataDict, 0)
dataDict = self._fillWithZero(granularity, reportRequest['startTime'], reportRequest['endTime'], dataDict)
dataDict = self._accumulate(granularity, reportRequest['startTime'], reportRequest['endTime'], dataDict)
baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableUnit(dataDict,
self._getAccumulationMaxValue(dataDict),
"bytes")
return S_OK({'data': baseDataDict, 'graphDataDict': graphDataDict,
'granularity': granularity, 'unit': unitName})
def _plotTransferedData(self, reportRequest, plotInfo, filename):
metadata = {'title': 'Transfered data by %s' % reportRequest['grouping'],
'starttime': reportRequest['startTime'],
'endtime': reportRequest['endTime'],
'span': plotInfo['granularity'],
'ylabel': plotInfo['unit'],
'sort_labels': 'last_value'}
return self._generateCumulativePlot(filename, plotInfo['graphDataDict'], metadata)
def _reportThroughput(self, reportRequest):
selectFields = (self._getSelectStringForGrouping(reportRequest['groupingFields']) + ", %s, %s, SUM(%s)",
reportRequest['groupingFields'][1] + ['startTime', 'bucketLength',
'TransferSize'
]
)
retVal = self._getTimedData(reportRequest['startTime'],
reportRequest['endTime'],
selectFields,
reportRequest['condDict'],
reportRequest['groupingFields'],
{})
if not retVal['OK']:
return retVal
dataDict, granularity = retVal['Value']
self.stripDataField(dataDict, 0)
dataDict, maxValue = self._divideByFactor(dataDict, granularity)
dataDict = self._fillWithZero(granularity, reportRequest['startTime'], reportRequest['endTime'], dataDict)
baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableRateUnit(
dataDict, self._getAccumulationMaxValue(dataDict), "bytes")
return S_OK({'data': baseDataDict, 'graphDataDict': graphDataDict,
'granularity': granularity, 'unit': unitName})
def _plotThroughput(self, reportRequest, plotInfo, filename):
metadata = {'title': 'Throughput by %s' % reportRequest['grouping'],
'ylabel': plotInfo['unit'],
'starttime': reportRequest['startTime'],
'endtime': reportRequest['endTime'],
'span': plotInfo['granularity']}
return self._generateTimedStackedBarPlot(filename, plotInfo['graphDataDict'], metadata)
_reportDataTransferedName = "Pie chart of transferred data"
def _reportDataTransfered(self, reportRequest):
selectFields = (self._getSelectStringForGrouping(reportRequest['groupingFields']) + ", SUM(%s)",
reportRequest['groupingFields'][1] + ['TransferSize'
]
)
retVal = self._getSummaryData(reportRequest['startTime'],
reportRequest['endTime'],
selectFields,
reportRequest['condDict'],
reportRequest['groupingFields'],
{})
if not retVal['OK']:
return retVal
dataDict = retVal['Value']
for key in dataDict:
dataDict[key] = int(dataDict[key])
return S_OK({'data': dataDict})
def _plotDataTransfered(self, reportRequest, plotInfo, filename):
metadata = {'title': 'Total data transfered by %s' % reportRequest['grouping'],
'ylabel': 'bytes',
'starttime': reportRequest['startTime'],
'endtime': reportRequest['endTime']
}
return self._generatePiePlot(filename, plotInfo['data'], metadata)
|
andresailer/DIRAC
|
AccountingSystem/private/Plotters/DataOperationPlotter.py
|
Python
|
gpl-3.0
| 10,852
|
[
"DIRAC"
] |
e21714dc1a51b33d621d6f025b5365e3369481feac4ead20556325573313482c
|
#!/usr/bin/env python
from __future__ import print_function
import optparse
import os
import os.path
import re
import shutil
import sys
parser = optparse.OptionParser()
parser.add_option('-d', '--export_dir', help='Directory where to export the datasets')
parser.add_option('-p', '--dir_prefix', help='How the export dir should start')
(options, args) = parser.parse_args()
if not options.export_dir:
parser.error('Export directory cannot be empty')
if not options.dir_prefix:
parser.error('Directory prefix cannot be empty')
if len(args) < 3:
parser.error('Require at least two arguments')
if len(args) % 3 != 0:
parser.error('Require an even number of arguments')
real_export_dir = os.path.realpath(options.export_dir)
dir_prefix = options.dir_prefix.rstrip(os.sep)
if not real_export_dir.startswith(dir_prefix):
raise Exception("'%s' must be a subdirectory of '%s'" % (options.export_dir, dir_prefix))
if not os.path.exists(real_export_dir):
raise Exception("'%s' directory does not exist or it is not accessible by the Galaxy user" % options.export_dir)
if not os.path.isdir(real_export_dir):
raise Exception("'%s' is not a directory" % options.export_dir)
dataset_paths = args[::3]
dataset_names = args[1::3]
dataset_exts = args[2::3]
exit_code = 0
for dp, dn, de in zip(dataset_paths, dataset_names, dataset_exts):
"""
Copied from get_valid_filename from django
https://github.com/django/django/blob/master/django/utils/text.py
"""
dn_de = "%s.%s" % (dn, de)
dn_de_safe = re.sub(r'(?u)[^-\w.]', '', dn_de.strip().replace(' ', '_'))
dest = os.path.join(real_export_dir, dn_de_safe)
try:
shutil.copy2(dp, dest)
print("Dataset '%s' copied to '%s'" % (dn, dest))
except Exception as e:
msg = "Error copying dataset '%s' to '%s', %s" % (dn, dest, e)
print(msg, file=sys.stderr)
exit_code = 1
sys.exit(exit_code)
|
TGAC/tgac-galaxytools
|
tools/export_to_cluster/export_to_cluster.py
|
Python
|
mit
| 1,921
|
[
"Galaxy"
] |
12ddd4f4fb8fd98af8bcf396f5c462a734aebe189ddbafe73080aa8accd27743
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2015 MUJIN Inc
from . import json
from . import planningclient
import logging
log = logging.getLogger(__name__)
class RealtimeRobotControllerClient(planningclient.PlanningControllerClient):
"""Mujin controller client for realtimerobot task
"""
_robotname = None # Optional name of the robot selected
_robotspeed = None # Speed of the robot, e.g. 0.4
_robotaccelmult = None # Current robot accel mult
_envclearance = None # Environment clearance in millimeters, e.g. 20
_robotBridgeConnectionInfo = None # dict holding the connection info for the robot bridge.
def __init__(self, robotname, robotspeed=None, robotaccelmult=None, envclearance=10.0, robotBridgeConnectionInfo=None, **kwargs):
"""
:param robotname: (Optional) Name of the robot selected
:param robotspeed: Speed of the robot, e.g. 0.4. Default: None
:param robotaccelmult: Current robot acceleration multiplication. Default: None
:param envclearance: Environment clearance in millimeter, e.g. 20
:param robotBridgeConnectionInfo: dict holding the connection info for the robot bridge. Default: None
"""
super(RealtimeRobotControllerClient, self).__init__(**kwargs)
self._robotname = robotname
self._robotspeed = robotspeed
self._robotaccelmult = robotaccelmult
self._envclearance = envclearance
self._robotBridgeConnectionInfo = robotBridgeConnectionInfo
def GetRobotConnectionInfo(self):
return self._robotBridgeConnectionInfo
def SetRobotConnectionInfo(self, robotBridgeConnectionInfo):
self._robotBridgeConnectionInfo = robotBridgeConnectionInfo
def GetRobotName(self):
return self._robotname
def SetRobotName(self, robotname):
self._robotname = robotname
def SetRobotSpeed(self, robotspeed):
self._robotspeed = robotspeed
def SetRobotAccelMult(self, robotaccelmult):
self._robotaccelmult = robotaccelmult
def ExecuteCommand(self, taskparameters, robotname=None, toolname=None, robotspeed=None, robotaccelmult=None, envclearance=None, usewebapi=None, timeout=10, fireandforget=False, respawnopts=None):
"""Wrapper to ExecuteCommand with robot info set up in taskparameters
Executes A command on the task.
:return: A dictionary that contains:
- robottype: robot type, string
- currentjointvalues: current joint values, DOF floats
- elapsedtime: elapsed time in seconds, float
- numpoints: the number of points, int
- error: optional error info, dictionary
- desc: error message, string
- type: error type, string
- errorcode: error code, string
"""
if robotname is None:
robotname = self._robotname
# caller wants to use a different tool
if toolname is not None:
# set at the first level
taskparameters['toolname'] = toolname
if robotname is not None:
taskparameters['robotname'] = robotname
if 'robotspeed' not in taskparameters:
if robotspeed is None:
robotspeed = self._robotspeed
if robotspeed is not None:
taskparameters['robotspeed'] = robotspeed
if 'robotaccelmult' not in taskparameters:
if robotaccelmult is None:
robotaccelmult = self._robotaccelmult
if robotaccelmult is not None:
taskparameters['robotaccelmult'] = robotaccelmult
if self._robotBridgeConnectionInfo is not None:
taskparameters['robotBridgeConnectionInfo'] = self._robotBridgeConnectionInfo
if 'envclearance' not in taskparameters or taskparameters['envclearance'] is None:
if envclearance is None:
envclearance = self._envclearance
if envclearance is not None:
taskparameters['envclearance'] = envclearance
return super(RealtimeRobotControllerClient, self).ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget, respawnopts=respawnopts)
def ExecuteTrajectory(self, trajectoryxml, robotspeed=None, timeout=10, **kwargs):
"""Executes a trajectory on the robot from a serialized Mujin Trajectory XML file.
"""
taskparameters = {'command': 'ExecuteTrajectory',
'trajectory': trajectoryxml,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def GetJointValues(self, timeout=10, **kwargs):
"""Gets the current robot joint values
:return: Current joint values in a json dictionary with
- currentjointvalues: [0,0,0,0,0,0]
"""
taskparameters = {'command': 'GetJointValues'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def MoveJointStraight(self, deltagoalvalue, jointName, timeout=10, robotspeed=None, **kwargs):
"""Moves a single joint by a given amount
:param jointName: Name of the joint to move
:param deltagoalvalue: How much to move joint (delta)
"""
taskparameters = {'command': 'MoveJointStraight',
'deltagoalvalue': deltagoalvalue,
'jointName': jointName,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def MoveToolLinear(self, goaltype, goals, toolname=None, timeout=10, robotspeed=None, **kwargs):
"""Moves the tool linear
:param goaltype: Type of the goal, e.g. translationdirection5d
:param goals: Flat list of goals, e.g. two 5D ik goals: [380,450,50,0,0,1, 380,450,50,0,0,-1]
:param toolname: Name of the manipulator, default is self.toolname
:param maxdeviationangle: How much the tool tip can rotationally deviate from the linear path
:param plannername:
:param workspeed: [anglespeed, transspeed] in deg/s and mm/s
:param workaccel: [angleaccel, transaccel] in deg/s^2 and mm/s^2
:param worksteplength: Discretization for planning MoveHandStraight, in seconds.
:param workminimumcompletetime: Set to trajduration - 0.016s. EMU_MUJIN example requires at least this much
:param workminimumcompleteratio: In case the duration of the trajectory is now known, can specify in terms of [0,1]. 1 is complete everything
:param numspeedcandidates: If speed/accel are not specified, the number of candiates to consider
:param workignorefirstcollisionee: time, necessary in case initial is in collision, has to be multiples of step length?
:param workignorelastcollisionee: time, necessary in case goal is in collision, has to be multiples of step length?
:param workignorefirstcollision:
"""
taskparameters = {'command': 'MoveToolLinear',
'goaltype': goaltype,
'goals': goals,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotspeed=robotspeed, toolname=toolname, timeout=timeout)
def MoveToHandPosition(self, goaltype, goals, toolname=None, envclearance=None, closegripper=0, robotspeed=None, robotaccelmult=None, timeout=10, **kwargs):
"""Computes the inverse kinematics and moves the manipulator to any one of the goals specified.
:param goaltype: Type of the goal, e.g. translationdirection5d
:param goals: Flat list of goals, e.g. two 5d ik goals: [380,450,50,0,0,1, 380,450,50,0,0,-1]
:param toolname: Name of the manipulator. Default: self.toolname
:param envclearance: Clearance in millimeter. Default: self.envclearances
:param closegripper: Whether to close gripper once the goal is reached. Default: 0
"""
taskparameters = {'command': 'MoveToHandPosition',
'goaltype': goaltype,
'goals': goals,
'closegripper': closegripper,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotspeed=robotspeed, robotaccelmult=robotaccelmult, envclearance=envclearance, toolname=toolname, timeout=timeout)
def UpdateObjects(self, envstate, targetname=None, state=None, unit="mm", timeout=10, **kwargs):
"""Updates objects in the scene with the envstate
:param envstate: A list of dictionaries for each instance object in world frame. Quaternion is specified in w,x,y,z order. e.g. [{'name': 'target_0', 'translation_': [1,2,3], 'quat_': [1,0,0,0], 'object_uri':'mujin:/asdfas.mujin.dae'}, {'name': 'target_1', 'translation_': [2,2,3], 'quat_': [1,0,0,0]}]
:param unit: Unit of envstate. Default: mm
"""
taskparameters = {'command': 'UpdateObjects',
'envstate': envstate,
'unit': unit,
}
if targetname is not None:
taskparameters['objectname'] = targetname
taskparameters['object_uri'] = u'mujin:/%s.mujin.dae' % (targetname)
taskparameters.update(kwargs)
if state is not None:
taskparameters['state'] = json.dumps(state)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def Grab(self, targetname, toolname=None, timeout=10, **kwargs):
"""Grabs an object with tool
:param targetname: Name of the object
:param toolname: Name of the manipulator, Default: self.toolname
"""
taskparameters = {'command': 'Grab',
'targetname': targetname,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, toolname=toolname, timeout=timeout)
def Release(self, targetname, timeout=10, **kwargs):
"""Releases a grabbed object.
:param targetname: Name of the object
"""
taskparameters = {'command': 'Release',
'targetname': targetname,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetGrabbed(self, timeout=10, **kwargs):
"""Gets the names of the objects currently grabbed
:return: Names of the grabbed object in a json dictionary, e.g. {'names': ['target_0']}
"""
taskparameters = {'command': 'GetGrabbed',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetTransform(self, targetname, connectedBodyName='', linkName='', geometryName='', geometryPk='', unit='mm', timeout=10, **kwargs):
"""Gets the transform of an object
:param targetname: OpenRave kinbody name
:param connectedBodyName: OpenRave connected body name
:param linkName: OpenRave link name
:param geometryName: OpenRave geometry id name
:param geometryPk: OpenRave geometry primary key (pk)
:param unit: Unit of the result translation
:return: Transform of the object in a json dictionary, e.g. {'translation': [100,200,300], 'rotationmat': [[1,0,0],[0,1,0],[0,0,1]], 'quaternion': [1,0,0,0]}
"""
taskparameters = {'command': 'GetTransform',
'targetname': targetname,
'connectedBodyName': connectedBodyName,
'linkName': linkName,
'geometryName': geometryName,
'geometryPk': geometryPk,
'unit': unit,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def SetTransform(self, targetname, translation, unit='mm', rotationmat=None, quaternion=None, timeout=10, **kwargs):
"""Sets the transform of an object
:param targetname: Name of the object
:param translation: List of x,y,z value of the object in millimeters
:param unit: Unit of translation
:param rotationmat: List specifying the rotation matrix in row major format, e.g. [1,0,0,0,1,0,0,0,1]
:param quaternion: List specifying the quaternion in w,x,y,z format, e.g. [1,0,0,0]
"""
taskparameters = {'command': 'SetTransform',
'targetname': targetname,
'unit': unit,
'translation': translation,
}
taskparameters.update(kwargs)
if rotationmat is not None:
taskparameters['rotationmat'] = rotationmat
if quaternion is not None:
taskparameters['quaternion'] = quaternion
if rotationmat is None and quaternion is None:
taskparameters['quaternion'] = [1, 0, 0, 0]
log.warn('no rotation is specified, using identity quaternion')
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetOBB(self, targetname, unit='mm', timeout=10, **kwargs):
"""Get the oriented bounding box (OBB) of object.
:param targetname: Name of the object
:param linkname: Name of link to target for AABB. If not specified, uses entire target.
:param unit: Unit of the OBB. Default: mm
:return: A dict describing the OBB of the object with keys: extents, boxLocalTranslation, originalBodyTranslation, quaternion, rotationmat, translation
"""
taskparameters = {'command': 'GetOBB',
'targetname': targetname,
'unit': unit,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetInnerEmptyRegionOBB(self, targetname, linkname=None, unit='mm', timeout=10, **kwargs):
""" Get the inner empty oriented bounding box (OBB) of a container.
:param targetname: Name of the object
:param linkname: Can target a specific link
:param unit: Unit of the OBB. Default: mm
:return: OBB of the object
"""
taskparameters = {'command': 'GetInnerEmptyRegionOBB',
'targetname': targetname,
'unit': unit,
}
if linkname is not None:
taskparameters['linkname'] = linkname
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetInstObjectAndSensorInfo(self, instobjectnames=None, sensornames=None, unit='mm', timeout=10, **kwargs):
"""Returns information about the inst objects and sensors that are a part of those inst objects.
"""
taskparameters = {'command': 'GetInstObjectAndSensorInfo', 'unit':unit}
if instobjectnames is not None:
taskparameters['instobjectnames'] = instobjectnames
if sensornames is not None:
taskparameters['sensornames'] = sensornames
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetInstObjectInfoFromURI(self, instobjecturi=None, unit='mm', timeout=10, **kwargs):
"""Opens a URI and returns info about the internal/external and geometry info from it.
"""
taskparameters = {'command': 'GetInstObjectInfoFromURI', 'unit':unit}
if instobjecturi is not None:
taskparameters['objecturi'] = instobjecturi
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetAABB(self, targetname, unit='mm', timeout=10, **kwargs):
"""Gets the axis-aligned bounding box (AABB) of an object.
:param targetname: Name of the object
:param linkname: Name of link to target for AABB. If not specified, uses entire target
:param unit: Unit of the AABB. Default: mm
:return: AABB of the object, e.g. {'pos': [1000,400,100], 'extents': [100,200,50]}
"""
taskparameters = {'command': 'GetAABB',
'targetname': targetname,
'unit': unit,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def SetLocationTracking(self, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""Resets the tracking of specific containers
:param cycleIndex: The cycle index to track the locations for
:param locationReplaceInfos: A dict that should have the keys: name, containerDynamicProperties, rejectContainerIds, uri, pose, cycleIndex
:param removeLocationNames:
:param doRemoveOnlyDynamic:
"""
taskparameters = {'command': 'SetLocationTracking'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def ResetLocationTracking(self, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""Resets tracking updates for locations
:param resetAllLocations: If True, then will reset all the locations
:param resetLocationName: Resets only the location with matching name
:param resetLocationNames: Resets only locations with matching name
:param checkIdAndResetLocationName: (locationName, containerId) - only reset the location if the container id matches
:return: clearedLocationNames
"""
taskparameters = {'command': 'ResetLocationTracking' }
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)['clearedLocationNames']
def GetLocationTrackingInfos(self, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""Gets the active tracked locations
:return: activeLocationTrackingInfos
"""
taskparameters = {'command': 'GetLocationTrackingInfos' }
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)['activeLocationTrackingInfos']
def UpdateLocationContainerIdType(self, locationName, containerName, containerId, containerType, updateTimeStampMS=None, trackingCycleIndex=None, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""Resets the tracking of specific containers
:param updateTimeStampMS if specified then setup updatetimestamp on container (time when container arrives and becomes valid for usage)
:param trackingCycleIndex if specified then cycle with same cycleIndex will update location tracking in the same call
"""
taskparameters = {
'command': 'UpdateLocationContainerIdType',
'locationName': locationName,
'containerName': containerName,
'containerId': containerId,
'containerType': containerType,
}
if updateTimeStampMS is not None:
taskparameters['updateTimeStampMS'] = updateTimeStampMS
if trackingCycleIndex is not None:
taskparameters['trackingCycleIndex'] = trackingCycleIndex
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def ResetLocationTrackingContainerId(self, locationName, checkContainerId, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""Resets the containerId of self._activeLocationTrackingInfos if it matches checkContainerId.
:param checkContainerId: if checkContainerId is specified and not empty and it matches the current containerId of the tracking location, then reset the current tracking location
:return isTrackingUpdated: True if the containerId was updated
"""
taskparameters = {
'command': 'ResetLocationTrackingContainerId',
'locationName': locationName,
'checkContainerId': checkContainerId,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def RemoveObjectsWithPrefix(self, prefix=None, removeNamePrefixes=None, timeout=10, usewebapi=None, fireandforget=False, removeLocationNames=None, **kwargs):
"""Removes objects with prefix.
:param removeNamePrefixes: Names of prefixes to match with when removing items
:param doRemoveOnlyDynamic: If True, then remove objects that were added through dynamic means like UpdateObjects/UpdateEnvironmentState
:return: dict with key 'removedBodyNames' for the removed object names
"""
taskparameters = {'command': 'RemoveObjectsWithPrefix',
}
taskparameters.update(kwargs)
if prefix is not None:
log.warn('prefix is deprecated')
taskparameters['prefix'] = prefix
if removeNamePrefixes is not None:
taskparameters['removeNamePrefixes'] = removeNamePrefixes
if removeLocationNames is not None:
taskparameters['removeLocationNames'] = removeLocationNames
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def GetTrajectoryLog(self, timeout=10, **kwargs):
"""Gets the recent trajectories executed on the binpicking server. The internal server keeps trajectories around for 10 minutes before clearing them.
:param startindex: int, Start of the trajectory to get. If negative, will start counting from the end. For example, -1 is the last element, -2 is the second to last element.
:param num: int, Number of trajectories from startindex to return. If 0 will return all the trajectories starting from startindex
:param includejointvalues: bool, If True will include timedjointvalues, if False will just give back the trajectories. Default: False
:return:
total: 10
trajectories: [
{
"timestarted": 12345215
"name": "movingtodest",
"numpoints": 100,
"duration": 0.8,
"timedjointvalues": [0, 0, 0, .....]
},
{ ... }
]
Where timedjointvalues is a list joint values and the trajectory time. For a 3DOF robot sampled at 0.008s, this is
[J1, J2, J3, 0, J1, J2, J3, 0.008, J1, J2, J3, 0.016, ...]
"""
taskparameters = {'command': 'GetTrajectoryLog',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def ChuckGripper(self, robotname=None, grippername=None, timeout=10, usewebapi=None, **kwargs):
"""Chucks the manipulator
:param grippername: Name of the gripper
"""
taskparameters = {'command': 'ChuckGripper', 'grippername':grippername}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)
def UnchuckGripper(self, robotname=None, grippername=None, timeout=10, usewebapi=None, **kwargs):
"""Unchucks the manipulator and releases the target
:param grippername: Name of the gripper
:param targetname: Name of the target
"""
taskparameters = {'command': 'UnchuckGripper', 'grippername':grippername}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)
def CalibrateGripper(self, robotname=None, grippername=None, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""Goes through the gripper calibration procedure
:param grippername: Name of the gripper
"""
taskparameters = {'command': 'CalibrateGripper', 'grippername':grippername}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def StopGripper(self, robotname=None, grippername=None, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""
:param grippername: Name of the gripper
"""
taskparameters = {'command': 'StopGripper', 'grippername':grippername}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def MoveGripper(self, grippervalues, robotname=None, grippername=None, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""Moves the chuck of the manipulator to a given value.
:param grippername: Name of the manipulator.
:param grippervalues: Target value of the chuck
"""
taskparameters = {
'command': 'MoveGripper',
'grippername':grippername,
'grippervalues': grippervalues,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def ExecuteRobotProgram(self, robotProgramName, robotname=None, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""Execute a robot specific program by name
"""
taskparameters = {
'command': 'ExecuteRobotProgram',
'robotProgramName': robotProgramName,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def SaveScene(self, timeout=10, **kwargs):
"""Saves the current scene to file
:param filename: e.g. /tmp/testscene.mujin.dae, if not specified, it will be saved with an auto-generated filename
:param preserveexternalrefs: If True, any bodies that are currently being externally referenced from the environment will be saved as external references.
:param externalref: If '*', then each of the objects will be saved as externally referencing their original filename. Otherwise will force saving specific bodies as external references.
:param saveclone: If 1, will save the scenes for all the cloned environments
:param saveReferenceUriAsHint: If True, use save the reference uris as referenceUriHint so that webstack does not get confused and deletes content
:return: The filename the scene is saved to in a json dictionary, e.g. {'filename': '2013-11-01-17-10-00-UTC.dae'}
"""
taskparameters = {'command': 'SaveScene'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def SaveGripper(self, timeout=10, robotname=None, **kwargs):
"""Separate gripper from a robot in a scene and save it.
:param filename: str. File name to save on the file system. e.g. /tmp/robotgripper/mujin.dae
:param robotname: str. Name of robot waiting for extracting hand from.
:param manipname: str. Name of manipulator.
:param timeout:
:return:
"""
taskparameters = {'command': 'SaveGripper'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout)
def ResetRobotBridges(self, timeout=10, usewebapi=True, **kwargs):
"""Resets the robot bridge states
"""
taskparameters = {
'command': 'ResetRobotBridges'
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi)
def MoveJointsToJointConfigurationStates(self, jointConfigurationStates, robotname=None, robotspeed=None, robotaccelmult=None, execute=1, startJointConfigurationStates=None, envclearance=None, timeout=10, usewebapi=True, **kwargs):
"""Moves the robot to desired joint angles specified in jointStates
:param jointStates: List[{'jointName':str, 'jointValue':float}]
:param jointindices: List of corresponding joint indices, default is range(len(jointvalues))
:param robotspeed: Value in (0,1] setting the percentage of robot speed to move at
:param robotaccelmult: Value in (0,1] setting the percentage of robot acceleration to move at
:param envclearance: Environment clearance in millimeters
"""
taskparameters = {
'command': 'MoveJointsToJointConfigurationStates',
'goalJointConfigurationStates': jointConfigurationStates,
'execute': execute,
}
if envclearance is not None:
taskparameters['envclearance'] = envclearance
if startJointConfigurationStates is not None:
taskparameters['startJointConfigurationStates'] = startJointConfigurationStates
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, robotspeed=robotspeed, robotaccelmult=robotaccelmult, timeout=timeout, usewebapi=usewebapi)
def MoveJoints(self, jointvalues, jointindices=None, robotname=None, robotspeed=None, robotaccelmult=None, execute=1, startvalues=None, envclearance=None, timeout=10, usewebapi=True, **kwargs):
"""Moves the robot to desired joint angles specified in jointvalues
:param jointvalues: List of joint values
:param jointindices: List of corresponding joint indices, default is range(len(jointvalues))
:param robotspeed: Value in (0,1] setting the percentage of robot speed to move at
:param robotaccelmult: Value in (0,1] setting the percentage of robot acceleration to move at
:param envclearance: Environment clearance in millimeters
"""
if jointindices is None:
jointindices = range(len(jointvalues))
log.warn(u'No jointindices specified. Moving joints with default jointindices: %s', jointindices)
taskparameters = {
'command': 'MoveJoints',
'goaljoints': list(jointvalues),
'jointindices': list(jointindices),
'execute': execute,
}
if envclearance is not None:
taskparameters['envclearance'] = envclearance
if startvalues is not None:
taskparameters['startvalues'] = list(startvalues)
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, robotspeed=robotspeed, robotaccelmult=robotaccelmult, timeout=timeout, usewebapi=usewebapi)
def MoveJointsToPositionConfiguration(self, positionConfigurationName=None, positionConfigurationCandidateNames=None, robotname=None, robotspeed=None, robotaccelmult=None, execute=1, startvalues=None, envclearance=None, timeout=10, usewebapi=True, **kwargs):
"""Moves the robot to desired position configuration specified in positionConfigurationName
:param positionConfigurationName: (optional) If specified, the name of position configuration to move to. If it does not exist, will raise an error.
:param positionConfigurationCandidateNames: (optional) If specified, goes to the first position that is defined for the robot. If no positions exist, returns without moving the robot.
:param robotspeed: Value in (0,1] setting the percentage of robot speed to move at
:param robotaccelmult: Value in (0,1] setting the percentage of robot acceleration to move at
:param envclearance: Environment clearance in millimeters
"""
taskparameters = {
'command': 'MoveJointsToPositionConfiguration',
'execute': execute,
}
if positionConfigurationName:
taskparameters['positionConfigurationName'] = positionConfigurationName
if positionConfigurationCandidateNames:
taskparameters['positionConfigurationCandidateNames'] = positionConfigurationCandidateNames
if envclearance is not None:
taskparameters['envclearance'] = envclearance
if startvalues is not None:
taskparameters['startvalues'] = list(startvalues)
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, robotspeed=robotspeed, robotaccelmult=robotaccelmult, timeout=timeout, usewebapi=usewebapi)
def MoveToDropOff(self, dropOffInfo, robotname=None, robotspeed=None, robotaccelmult=None, execute=1, startvalues=None, envclearance=None, timeout=10, usewebapi=True, **kwargs):
"""Moves the robot to desired joint angles.
:param robotspeed: Value in (0,1] setting the percentage of robot speed to move at
:param robotaccelmult: Value in (0,1] setting the percentage of robot acceleration to move at
:param envclearance: Environment clearance in millimeters
"""
taskparameters = {
'command': 'MoveToDropOff',
'dropOffInfo': dropOffInfo,
'execute': execute,
}
if envclearance is not None:
taskparameters['envclearance'] = envclearance
if startvalues is not None:
taskparameters['startvalues'] = list(startvalues)
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, robotspeed=robotspeed, robotaccelmult=robotaccelmult, timeout=timeout, usewebapi=usewebapi)
def GetRobotBridgeIOVariables(self, ioname=None, ionames=None, robotname=None, timeout=10, usewebapi=None, **kwargs):
"""Returns the data of the IO in ascii hex as a string
:param ioname: One IO name to read
:param ionames: A list of the IO names to read
"""
taskparameters = {
'command': 'GetRobotBridgeIOVariables'
}
if ioname is not None and len(ioname) > 0:
taskparameters['ioname'] = ioname
if ionames is not None and len(ionames) > 0:
taskparameters['ionames'] = ionames
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)
def SetRobotBridgeIOVariables(self, iovalues, robotname=None, timeout=10, usewebapi=None, **kwargs):
taskparameters = {
'command': 'SetRobotBridgeIOVariables',
'iovalues': list(iovalues)
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)
def SetRobotBridgeIOVariableAsciiHex16(self, ioname, iovalue, robotname=None, timeout=20, usewebapi=None, **kwargs):
taskparameters = {
'command': 'SetRobotBridgeIOVariableAsciiHex16',
'ioname': ioname,
'iovalue': iovalue,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)
def GetRobotBridgeIOVariableAsciiHex16(self, ioname=None, ionames=None, robotname=None, timeout=10, usewebapi=None, **kwargs):
"""Returns the data of the IO in ascii hex as a string
:param ioname: One IO name to read
:param ionames: A list of the IO names to read
"""
taskparameters = {
'command': 'GetRobotBridgeIOVariableAsciiHex16'
}
if ioname is not None and len(ioname) > 0:
taskparameters['ioname'] = ioname
if ionames is not None and len(ionames) > 0:
taskparameters['ionames'] = ionames
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)
def GetRobotBridgeIOVariableString(self, ioname=None, ionames=None, robotname=None, timeout=10, usewebapi=None, **kwargs):
"""Returns the data of the IO in ascii hex as a string
:param ioname: One IO name to read
:param ionames: A list of the IO names to read
"""
taskparameters = {
'command': 'GetRobotBridgeIOVariableString'
}
if ioname is not None and len(ioname) > 0:
taskparameters['ioname'] = ioname
if ionames is not None and len(ionames) > 0:
taskparameters['ionames'] = ionames
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)
def ComputeIkParamPosition(self, name, robotname=None, timeout=10, usewebapi=None, **kwargs):
taskparameters = {
'command': 'ComputeIkParamPosition',
'name': name,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)
def ComputeIKFromParameters(self, toolname=None, timeout=10, **kwargs):
"""
:param toolname: string, Tool name
:param limit: int, Number of solutions to return
:param ikparamnames: The ikparameter names, also contains information about the grasp like the preshape
:param targetname: The target object name that the ikparamnames belong to
:param freeincvalue: float, The discretization of the free joints of the robot when computing ik.
:param filteroptionslist: A list of filter option strings. Can be: CheckEnvCollisions, IgnoreCustomFilters, IgnoreEndEffectorCollisions, IgnoreEndEffectorEnvCollisions, IgnoreEndEffectorSelfCollisions, IgnoreJointLimits, IgnoreSelfCollisions
:param filteroptions: OpenRAVE IkFilterOptions bitmask. By default this is 1, which means all collisions are checked, int
:return: A dictionary of:
- solutions: array of IK solutions (each of which is an array of DOF values), sorted by minimum travel distance and truncated to match the limit
"""
taskparameters = {'command': 'ComputeIKFromParameters',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, toolname=toolname, timeout=timeout)
def ReloadModule(self, timeout=10, **kwargs):
taskparameters = {'command': 'ReloadModule'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def ShutdownRobotBridge(self, timeout=10, **kwargs):
taskparameters = {'command': 'ShutdownRobotBridge'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetRobotBridgeState(self, timeout=10, **kwargs):
taskparameters = {'command': 'GetRobotBridgeState'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def ClearRobotBridgeError(self, timeout=10, usewebapi=None, **kwargs):
taskparameters = {
'command': 'ClearRobotBridgeError',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi)
def SetRobotBridgePause(self, timeout=10, **kwargs):
taskparameters = {'command': 'SetRobotBridgePause'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def SetRobotBridgeResume(self, timeout=10, **kwargs):
taskparameters = {'command': 'SetRobotBridgeResume'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
#
# jogging related
#
def SetJogModeVelocities(self, movejointsigns, robotname=None, toolname=None, robotspeed=None, robotaccelmult=None, canJogInCheckMode=None, usewebapi=False, timeout=1, fireandforget=False, **kwargs):
"""
:param jogtype: One of 'joints', 'world', 'robot', 'tool'
:param canJogInCheckMode: if true, then allow jogging even if in check mode. By default it is false.
:param checkSelfCollisionWhileJogging:
:param force:
:param robotname:
:param toolname:
:param robotspeed: Value in (0,1] setting the percentage of robot speed to move at
:param robotaccelmult: Value in (0,1] setting the percentage of robot acceleration to move at
"""
taskparameters = {
'command': 'SetJogModeVelocities',
'movejointsigns': movejointsigns,
}
if canJogInCheckMode is not None:
taskparameters['canJogInCheckMode'] = canJogInCheckMode
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, toolname=toolname, robotspeed=robotspeed, robotaccelmult=robotaccelmult, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)
def EndJogMode(self, usewebapi=False, timeout=1, fireandforget=False, **kwargs):
taskparameters = {
'command': 'EndJogMode',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)
def SetRobotBridgeServoOn(self, servoon, robotname=None, timeout=3, fireandforget=False):
taskparameters = {
'command': 'SetRobotBridgeServoOn',
'isservoon': servoon
}
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, fireandforget=fireandforget)
def SetRobotBridgeLockMode(self, islockmode, robotname=None, timeout=3, fireandforget=False):
taskparameters = {
'command': 'SetRobotBridgeLockMode',
'islockmode': islockmode
}
return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, fireandforget=fireandforget)
def ResetSafetyFault(self, timeout=3, fireandforget=False):
taskparameters = {
'command': 'ResetSafetyFault',
}
return self.ExecuteCommand(taskparameters, timeout=timeout, fireandforget=fireandforget)
def SetRobotBridgeControlMode(self, controlMode, timeout=3, fireandforget=False):
taskparameters = {
'command': 'SetRobotBridgeControlMode',
'controlMode': controlMode
}
return self.ExecuteCommand(taskparameters, timeout=timeout, fireandforget=fireandforget)
def GetDynamicObjects(self, usewebapi=False, timeout=1, **kwargs):
"""Get a list of dynamically added objects in the scene, from vision detection and physics simulation.
"""
taskparameters = {
'command': 'GetDynamicObjects',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def ComputeRobotConfigsForGraspVisualization(self, targetname, graspname, robotname=None, toolname=None, unit='mm', usewebapi=False, timeout=10, **kwargs):
'''Returns robot configs for grasp visualization
'''
taskparameters = {
'command': 'ComputeRobotConfigsForGraspVisualization',
'targetname': targetname,
'graspname': graspname
}
if unit is not None:
taskparameters['unit'] = unit
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, toolname=toolname, usewebapi=usewebapi, timeout=timeout)
def ResetCacheTemplates(self, usewebapi=False, timeout=1, fireandforget=False, **kwargs):
"""Resets any cached templates
"""
taskparameters = {
'command': 'ResetCacheTemplates',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)
def SetRobotBridgeExternalIOPublishing(self, enable, usewebapi=False, timeout=2, fireandforget=False, **kwargs):
"""Enables publishing collision data to the robotbridge
"""
taskparameters = {
'command': 'SetRobotBridgeExternalIOPublishing',
'enable': bool(enable)
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)
def RestoreSceneInitialState(self, usewebapi=None, timeout=1, **kwargs):
"""Restore scene to the state on filesystem
"""
taskparameters = {
'command': 'RestoreSceneInitialState',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
#
# Motor test related.
#
def RunMotorControlTuningStepTest(self, jointName, amplitude, timeout=10, usewebapi=False, **kwargs):
"""runs step response test on specified joint and returns result
"""
taskparameters = {
'command': 'RunMotorControlTuningStepTest',
'jointName': jointName,
'amplitude': amplitude,
}
taskparameters.update(kwargs)
log.warn('sending taskparameters=%r', taskparameters)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def RunMotorControlTuningMaximulLengthSequence(self, jointName, amplitude, timeout=10, usewebapi=False, **kwargs):
"""runs maximum length sequence test on specified joint and returns result
"""
taskparameters = {
'command': 'RunMotorControlTuningMaximulLengthSequence',
'jointName': jointName,
'amplitude': amplitude,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def RunMotorControlTuningDecayingChirp(self, jointName, amplitude, freqMax, timeout=120, usewebapi=False, **kwargs):
"""runs chirp test on specified joint and returns result
"""
taskparameters = {
'command': 'RunMotorControlTuningDecayingChirp',
'jointName': jointName,
'freqMax': freqMax,
'amplitude': amplitude,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def RunMotorControlTuningGaussianImpulse(self, jointName, amplitude, timeout=20, usewebapi=False, **kwargs):
"""runs Gaussian Impulse test on specified joint and returns result
"""
taskparameters = {
'command': 'RunMotorControlTuningGaussianImpulse',
'jointName': jointName,
'amplitude': amplitude,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def RunMotorControlTuningBangBangResponse(self, jointName, amplitude, timeout=60, usewebapi=False, **kwargs):
"""runs bangbang trajectory in acceleration or jerk space and returns result
"""
taskparameters = {
'command': 'RunMotorControlTuningBangBangResponse',
'jointName': jointName,
'amplitude': amplitude,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def RunDynamicsIdentificationTest(self, timeout, usewebapi=False, **kwargs):
taskparameters = dict()
taskparameters['command'] = 'RunDynamicsIdentificationTest'
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def GetTimeToRunDynamicsIdentificationTest(self, usewebapi=False, timeout=10, **kwargs):
taskparameters = dict()
taskparameters['command'] = 'GetTimeToRunDynamicsIdentificationTest'
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def GetInertiaChildJointStartValues(self, usewebapi=False, timeout=10, **kwargs):
taskparameters = dict()
taskparameters['command'] = 'GetInertiaChildJointStartValues'
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def CalculateTestRangeFromCollision(self, usewebapi=False, timeout=10, **kwargs):
taskparameters = dict()
taskparameters['command'] = 'CalculateTestRangeFromCollision'
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
# def RunDynamicsIdentificationInertiaTest(self):
# # TODO
# pass
# def RunDynamicsIdentificationCenterOfMassTest(self):
# # TODO
# pass
def GetMotorControlParameterSchema(self, usewebapi=False, timeout=10, **kwargs):
"""Gets motor control parameter schema
"""
taskparameters = {
'command': 'GetMotorControlParameterSchema',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def GetMotorControlParameter(self, jointName, parameterName, usewebapi=False, timeout=10, **kwargs):
"""Gets motor control parameters as name-value dict
"""
taskparameters = {
'command': 'GetMotorControlParameter',
'jointName': jointName,
'parameterName': parameterName,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def GetMotorControlParameters(self, usewebapi=False, timeout=10, **kwargs):
"""Gets cached motor control parameters as name-value dict
"""
taskparameters = {
'command': 'GetMotorControlParameters',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def SetMotorControlParameter(self, jointName, parameterName, parameterValue, timeout=10, usewebapi=False, **kwargs):
"""Sets motor control parameter
"""
taskparameters = {
'command': 'SetMotorControlParameter',
'jointName': jointName,
'parameterName': parameterName,
'parameterValue': parameterValue,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def IsProfilingRunning(self, timeout=10, usewebapi=False):
"""Queries if profiling is running on planning
"""
return self.ExecuteCommand({'command': 'IsProfilingRunning'}, usewebapi=usewebapi, timeout=timeout)
def StartProfiling(self, clocktype='cpu', timeout=10, usewebapi=False):
"""Start profiling planning
"""
return self.ExecuteCommand({'command': 'StartProfiling', 'clocktype': clocktype}, usewebapi=usewebapi, timeout=timeout)
def StopProfiling(self, timeout=10, usewebapi=False):
"""Stop profiling planning
"""
return self.ExecuteCommand({'command': 'StopProfiling'}, usewebapi=usewebapi, timeout=timeout)
def ReplaceBodies(self, bodieslist, timeout=10, **kwargs):
"""Replaces bodies in the environment with new uris
:param replaceInfos: list of dicts with keys: name, uri, containerDynamicProperties
:param testLocationName: If specified, will test if the container in this location matches testLocationContainerId, and only execute the replace if it matches and testLocationContainerId is not empty.
:param testLocationContainerId: containerId used for testing logic with testLocationName
:param removeNamePrefixes:
:param removeLocationNames:
:param doRemoveOnlyDynamic:
"""
taskparameters = {
'command': 'ReplaceBodies',
'bodieslist': bodieslist, # for back compatibility for now
'replaceInfos': bodieslist,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetState(self, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
taskparameters = {'command': 'GetState'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def EnsureSyncWithRobotBridge(self, syncTimeStampUS, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""Ensures that planning has synchronized with robotbridge data that is newer than syncTimeStampUS
:param syncTimeStampUS: us (linux time) of the timestamp
"""
taskparameters = {'command': 'EnsureSyncWithRobotBridge', 'syncTimeStampUS':syncTimeStampUS}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
|
mujin/mujincontrollerclientpy
|
python/mujincontrollerclient/realtimerobotclient.py
|
Python
|
apache-2.0
| 53,535
|
[
"Gaussian"
] |
580a48a567b94336333c49c1e01e6ebe8c6b76a474a91f81c542486bd995f293
|
#! /usr/bin/env python
# -*- coding: utf8 -*-
# Author: Platinhom; Last Updated: 2015-10-1
# Summary each atom type surface area from the pqrta file.
# Usage: python ESES_AtomicArea.py file.pqrta gaff
#
# Note: Only for PQRTA format input.
# Custom: ESES parameters, atom type set
import os,sys
# Modify the ESES program parameter here.
# You can modify to command line input parameter as you like
probe=1.4
grid=0.2
solbuffer=4.0
# 5 sets of atom type
gaff=["br", "c", "c1", "c2", "c3", "ca", "cc", "cd", "ce", "cf", "cg", "cl", "cp", "cx", "cy", "f", "h1", "h2", "h3", "h4", "h5", "ha", "hc", "hn", "ho", "hs", "i", "n", "n1", "n2", "n3", "na", "nb", "nc", "nd", "ne", "nf", "nh", "no", "o", "oh", "os", "p5", "s", "s4", "s6", "sh", "ss", "sy"]
amber=["Br", "C", "C*", "CA", "CB", "CD", "CK", "Cl", "CM", "CN", "CQ", "CR", "CT", "CV", "CW", "CZ", "DU", "F", "H", "H1", "H2", "H3", "H4", "H5", "HA", "HC", "HO", "HS", "I", "N", "N*", "N1", "N2", "NA", "NB", "NC", "NO", "NT", "O", "O2", "OH", "OS", "OW", "P", "S", "SH", "SO"]
sybyl=["Br", "C.1", "C.2", "C.3", "C.ar", "Cl", "F", "H", "I", "N.1", "N.2", "N.3", "N.am", "N.ar", "N.pl3", "O.2", "O.3", "P.3", "S.2", "S.3", "S.o", "S.o2"]
bcc=["11", "12", "13", "14", "15", "16", "17", "21", "22", "23", "24", "25", "31", "32", "33", "42", "51", "52", "53", "71", "72", "73", "74", "91"]
gas=["br", "c1", "c2", "c3", "cl", "f", "h", "i", "n1", "n2", "n3", "na", "o2", "o3", "os", "p", "s2", "s3", "so", "so2"]
if (__name__ == '__main__'):
fname=sys.argv[1]
fnamelist=os.path.splitext(fname)
if (fnamelist[1].lower()!='.pqrta'):
print 'Input should be pqrta format!'
exit(1)
fnum=fnamelist[0].split('_')[0];
settype=[]
if (sys.argv[2].lower()=='gaff'):settype=gaff
elif (sys.argv[2].lower()=='amber'):settype=amber
elif (sys.argv[2].lower()=='sybyl'):settype=sybyl
elif (sys.argv[2].lower()=='bcc'):settype=bcc
elif (sys.argv[2].lower()=='gas'):settype=gas
else:
print 'Atom type set should be given, gaff, amber, sybyl, bcc or gas!'
exit(1)
areatype={}
for t in settype:
areatype[t]=0.0
fr=open(fname)
for line in fr:
# Each atom
if (line[:4]=="ATOM" or line[:6]=="HETATM"):
tmp=line.split();
atype=tmp[-2].strip();
aarea=float(tmp[-1].strip());
if (atype not in settype): settype.append(atype);
areatype[atype]=areatype.setdefault(atype,0.0)+aarea;
fr.close()
outputatype=fnum+' '
outputaarea=fnum+' '
for atype in settype:
outputatype=outputatype+atype+' '
outputaarea=outputaarea+str(areatype[atype])+" ";
print outputatype+' : '+outputaarea;
#end main
|
platinhom/DailyTools
|
scripts/ESES_AtomicArea.py
|
Python
|
gpl-2.0
| 2,573
|
[
"Amber"
] |
7450d7e30774a8c623db024ac6c7c2c89e76f25b78fff44f69f084f137edb4f2
|
""" Rough implementation of Harris Corner Detector and Shi-Tomasi
More faster than Harris.py (>10x) """
import cv2,time,sys
import numpy as np
from matplotlib import pyplot as plt
def det(a,b,c,d):
return 0
img = cv2.imread('sofsign.jpg',0)
#img = cv2.equalizeHist(img)
rows,cols = img.shape
#img = cv2.GaussianBlur(img,(5,5),1.4)
t = time.time()
# Find Ix, Iy
Ix = cv2.Sobel(img,5,1,0)
Iy = cv2.Sobel(img,5,0,1)
# Find Ix2, Iy2, IxIy
Ix2 = Ix*Ix
Iy2 = Iy*Iy
IxIy = Ix*Iy
# Convolve them with a larger Gaussian Window
a = Ix2 = cv2.GaussianBlur(Ix2,(5,5),1)
d = Iy2 = cv2.GaussianBlur(Iy2,(5,5),1)
b = c = IxIy = cv2.GaussianBlur(IxIy,(5,5),1)
Trace = a+d
Det = a*d - np.square(b)
#R = np.abs(Trace-0.04*np.square(Det))
R = Det/(Trace+1)
cv2.normalize(R,R,0,1,cv2.NORM_MINMAX)
R = np.where(R>0.1,255,0)
R = np.uint8(R)
t2 = time.time()
print " time taken :", t2 - t
img = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
contours, hierarchy = cv2.findContours(R,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
m = cv2.moments(cnt)
if m['m00'] != 0:
x = int(m['m10']/m['m00'])
y = int(m['m01']/m['m00'])
else:
x,y = cnt.flatten()[:2]
cv2.circle(img,(x,y),2,255,-1)
cv2.imshow("edges",R)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
abidrahmank/MyRoughWork
|
roughnote/corner_detectors/Harris_2.py
|
Python
|
mit
| 1,304
|
[
"Gaussian"
] |
9640a7796fb8e41bbe42b10e434cbd6dc39125952700cfb3e22525b862c1a84b
|
#!/usr/bin/env python3
from setuptools import setup
setup(name='visipfix',
version='0.1.0',
description='IPFIX message visualizer for Python 3.3',
author='Brian Trammell',
author_email='brian@trammell.ch',
url='http://github.com/britram/python-ipfix',
packages=['visipfix'],
requires='ipfix',
classifiers=["Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: "
"GNU Lesser General Public License v3 or later (LGPLv3+)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.3",
"Topic :: System :: Networking"]
)
|
britram/python-ipfix
|
vis-package/setup.py
|
Python
|
lgpl-3.0
| 756
|
[
"Brian"
] |
d964ccb326b5150029113743e2d4458dbd6cbe139caa5005603e555ec1f09aee
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return
# pylint: disable=unidiomatic-typecheck
"""
This file contains the set of passes for Relay, which exposes an interface for
configuring the passes and scripting them in Python.
"""
from . import _analysis
from . import _make
from .expr import Expr
from .ty import Type
from .module import Module
from .feature import Feature
def post_order_visit(expr, fvisit):
"""Recursively visit the ir in post DFS order node,
apply fvisit. Each node is guaranteed to be visited
only once.
Parameters
----------
expr : tvm.relay.Expr
The input expression.
fvisit : function
The visitor function to be applied.
"""
return _analysis.post_order_visit(expr, fvisit)
def well_formed(expr):
"""Check that each Var is only bound once (well formed).
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
well_form : bool
Whether the input expression is well formed
"""
return _analysis.well_formed(expr)
def check_kind(t, mod=None):
"""Check that the type is well kinded and return the kind.
For example, this mean type cannot has tensor of tensor, or is a tuple type
of 2 shapes.
Parameters
----------
t : tvm.relay.Type
The type to check
mod : Optional[tvm.relay.Module]
The global module.
Returns
-------
kind : Kind
the kind of t
Examples
--------
.. code:: python
assert check_kind(relay.TupleType([relay.TypeParam('tp1', relay.Kind.Shape)])) == Shape
assert check_kind(relay.TupleType([relay.TypeParam('tp1', relay.Kind.Type)])) == Type
"""
if mod is not None:
return _analysis.check_kind(t, mod)
else:
return _analysis.check_kind(t)
def free_vars(expr):
"""Get free Vars from expression expr in Post DFS order.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of free variables in post DFS order.
Note
----
The fact that Vars are post-DFS ordred are useful in
neural networks: usually this means weights of previous
are ordered first.
"""
return _analysis.free_vars(expr)
def bound_vars(expr):
"""Get bound vars from expression expr in post-DFS order.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of bound variables in post-DFS order.
"""
return _analysis.bound_vars(expr)
def all_vars(expr):
"""Get all vars from expression expr in post-DFS order.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of all variables in post-DFS order.
"""
return _analysis.all_vars(expr)
def free_type_vars(expr, mod=None):
"""Get free type variables from expression/type e
Parameters
----------
expr : Union[tvm.relay.Expr,tvm.relay.Type]
The input expression/type
mod : Optional[tvm.relay.Module]
The global module
Returns
-------
free : List[tvm.relay.TypeVar]
The list of free type variables in post-DFS order
"""
use_mod = mod if mod is not None else Module()
return _analysis.free_type_vars(expr, use_mod)
def bound_type_vars(expr, mod=None):
"""Get bound type variables from expression/type e
Parameters
----------
expr : Union[tvm.relay.Expr,tvm.relay.Type]
The input expression/type
mod : Optional[tvm.relay.Module]
The global module
Returns
-------
free : List[tvm.relay.TypeVar]
The list of bound type variables in post-DFS order
"""
use_mod = mod if mod is not None else Module()
return _analysis.bound_type_vars(expr, use_mod)
def all_type_vars(expr, mod=None):
"""Get all type variables from expression/type e
Parameters
----------
expr : Union[tvm.relay.Expr,tvm.relay.Type]
The input expression/type
mod : Optional[tvm.relay.Module]
The global module
Returns
-------
free : List[tvm.relay.TypeVar]
The list of all type variables in post-DFS order
"""
use_mod = mod if mod is not None else Module()
return _analysis.all_type_vars(expr, use_mod)
def alpha_equal(lhs, rhs):
"""Compare two Relay expr for structural equivalence (alpha equivalence).
Parameters
----------
lhs : tvm.relay.Expr
One of the input Expression.
rhs : tvm.relay.Expr
One of the input Expression.
Returns
-------
result : bool
True iff lhs is alpha equal to rhs.
"""
return bool(_make._alpha_equal(lhs, rhs))
def graph_equal(lhs, rhs):
"""Compare two Relay expr for data-flow equivalence.
The difference between this and alpha-equality is that
variables are not expected to match between lhs and rhs;
they are treated as sources and are mapped between each other.
Parameters
----------
lhs : tvm.relay.Expr
One of the input Expression.
rhs : tvm.relay.Expr
One of the input Expression.
Returns
-------
result : bool
True iff lhs is data-flow equivalent to rhs.
"""
return bool(_make._graph_equal(lhs, rhs))
def collect_device_info(expr):
"""Collect the device allocation map for the given expression. The device
ids are propagated from the `device_copy` operators.
Parameters
----------
expr : tvm.relay.Expr
The input expression.
Returns
-------
ret : Dict[tvm.relay.expr, int]
A dictionary mapping tvm.relay.Expr to device type.
"""
return _analysis.CollectDeviceInfo(expr)
def collect_device_annotation_ops(expr):
"""Collect the device annotation ops for the given expression.
Parameters
----------
expr : tvm.relay.Expr
The input expression.
Returns
-------
ret : Dict[tvm.relay.expr, int]
A dictionary mapping tvm.relay.Expr to device type where the keys are
annotation expressions.
"""
return _analysis.CollectDeviceAnnotationOps(expr)
def get_total_mac_number(expr):
"""
Count the number of MACs (multiply-accumulate) of a model
Parameters
----------
expr : tvm.relay.Expr
The input expression.
Returns
-------
result : int64
The number of MACs (multiply-accumulate) of a model
"""
return _analysis.GetTotalMacNumber(expr)
def unmatched_cases(match, mod=None):
"""
Finds cases that the match expression does not catch, if any.
Parameters
----------
match : tvm.relay.Match
The match expression
mod : Optional[tvm.relay.Module]
The module (defaults to an empty module)
Returns
-------
missing_patterns : [tvm.relay.Pattern]
Patterns that the match expression does not catch.
"""
return _analysis.unmatched_cases(match, mod)
def detect_feature(a, b=None):
"""
Detect the feature used in a relay program.
Parameters
----------
a : Union[tvm.relay.Expr, tvm.relay.Module]
The input expression or module.
b : Optional[Union[tvm.relay.Expr, tvm.relay.Module]]
The input expression or module.
The two arguments cannot both be expression or module.
Returns
-------
features : Set[Feature]
Features used in the program.
"""
if isinstance(a, Module):
a, b = b, a
return set([Feature(int(x)) for x in _analysis.detect_feature(a, b)])
def structural_hash(value):
"""Hash a Relay expression structurally.
Parameters
----------
expr : Union[tvm.relay.Expr, tvm.relay.Type]
The expression to hash.
Returns
-------
result : int
The hash value
"""
if isinstance(value, Expr):
return int(_analysis._expr_hash(value))
elif isinstance(value, Type):
return int(_analysis._type_hash(value))
else:
msg = ("found value of type {0} expected" +
"relay.Expr or relay.Type").format(type(value))
raise TypeError(msg)
|
mlperf/training_results_v0.7
|
Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/python/tvm/relay/analysis.py
|
Python
|
apache-2.0
| 9,038
|
[
"VisIt"
] |
98476352c6dcb8a336484d656802033de4817372d5e9fc8d6dcbb035af0e4478
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMzid(RPackage):
"""An mzIdentML parser for R
A parser for mzIdentML files implemented using the XML package. The
parser tries to be general and able to handle all types of mzIdentML
files with the drawback of having less 'pretty' output than a vendor
specific parser. Please contact the maintainer with any problems and
supply an mzIdentML file so the problems can be fixed quickly."""
homepage = "https://bioconductor.org/packages/mzID"
git = "https://git.bioconductor.org/packages/mzID.git"
version('1.28.0', commit='cd006631c8222ce5b4af0577a7401b39cc58fd9c')
version('1.22.0', commit='382d9cf11f0cba996911a9d79e193d28f3ac6042')
version('1.20.1', commit='819582646944440ddd9ed3724ae964841573e54c')
version('1.18.0', commit='7d8924ae95585eb8cf472d21619a7603d291d652')
version('1.16.0', commit='fc203832a4cbbbe20f6dd826c6bf2128f2c271c4')
version('1.14.0', commit='1c53aa6523ae61d3ebb13381381fc119d6cc6115')
depends_on('r-xml', type=('build', 'run'))
depends_on('r-plyr', type=('build', 'run'))
depends_on('r-doparallel', type=('build', 'run'))
depends_on('r-foreach', type=('build', 'run'))
depends_on('r-iterators', type=('build', 'run'))
depends_on('r-protgenerics', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-mzid/package.py
|
Python
|
lgpl-2.1
| 1,523
|
[
"Bioconductor"
] |
75422a51eaed918d58b44a29a9886ec17848c04a9d0e880687fe58d0e2637ed1
|
# This is part of DEPTH.
# DEPTH (Version: 2.0) computes the closest distance of a residue/atom to bulk solvent and predicts small molecule binding site of a protein.
# Copyright (C) 2013, Kuan Pern Tan, Nguyen Thanh Binh, Raghavan Varadarajan and M.S. Madhusudhan
#
# DEPTH is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# DEPTH is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with DEPTH. If not, see <http://www.gnu.org/licenses/>.
# this script automate the process of getting entropy values for a pdb file
import commands
import sys
fname = sys.argv[1]
final_entropy_fname = sys.argv[2]
blosum62 = sys.argv[3] #'../database/BLOSUM62'
bin_dir = sys.argv[4] #'../'
blast_db = sys.argv[5] #'../database/uniref90.db'
blast_exe = sys.argv[6] #'../blast-2.2.22/bin/blastpgp'
blast_iter_n = sys.argv[7] #'5'
e_thr = sys.argv[8] #'0.0001'
max_seq_n = sys.argv[9] #'1000'
# 1. get fasta sequence for every chain
inputs = ['python', bin_dir+'/pdb2fasta.py', fname, fname]
cmd = ' '.join(inputs)
print cmd
fastas = commands.getoutput(cmd).strip().split('\n')
# 2. blast for all chains
# copy blosum62 table here
inputs = ['cp', blosum62, '.']
cmd = ' '.join(inputs)
commands.getoutput(cmd)
out_lines = []
for fasta_fname in fastas:
msa_fname = fasta_fname+'.msa'
inputs = [blast_exe, '-i ', fasta_fname, '-b', max_seq_n, '-d', blast_db, '-j', blast_iter_n, '-o', msa_fname+' -m 0', '-e', e_thr]
cmd = ' '.join(inputs)
print cmd
commands.getoutput(cmd)
# 3. msa2block
block_fname = fasta_fname+'.blk'
inputs = ['python', bin_dir+'/msa2block.py', msa_fname, fasta_fname, bin_dir, block_fname]
cmd = ' '.join(inputs)
print cmd
commands.getoutput(cmd)
# 4. msa_entropy.py
entropy_fname = msa_fname+'.entropy'
inputs = ['python', bin_dir+'/get_msa_entropy.py', block_fname, entropy_fname]
cmd = ' '.join(inputs)
print cmd
commands.getoutput(cmd)
# 5. combine
lines = open(entropy_fname).read().strip().split('\n')
header, content = [lines[0], lines[1:]]
out_lines.extend(content)
# end for
# write output
fout = open(final_entropy_fname, 'w')
fout.writelines(header+'\n')
fout.writelines('\n'.join(out_lines)+'\n')
fout.close()
|
asford/depth
|
bin/get_blast_entropy.py
|
Python
|
gpl-3.0
| 2,691
|
[
"BLAST"
] |
e9c1f6578272ddbaf0c91e31d51b8fc7ea2088886d322280887c6b4b078cefbc
|
#!/usr/bin/env python
#By: Guruprasad Ananda
"""
Fetch closest up/downstream interval from features corresponding to every interval in primary
usage: %prog primary_file features_file out_file direction
-1, --cols1=N,N,N,N: Columns for start, end, strand in first file
-2, --cols2=N,N,N,N: Columns for start, end, strand in second file
"""
from galaxy import eggs
import pkg_resources
pkg_resources.require( "bx-python" )
import sys, traceback, fileinput
from warnings import warn
from bx.cookbook import doc_optparse
from galaxy.tools.util.galaxyops import *
from bx.intervals.io import *
from bx.intervals.operations import quicksect
assert sys.version_info[:2] >= ( 2, 4 )
def get_closest_feature (node, direction, threshold_up, threshold_down, report_func_up, report_func_down):
#direction=1 for +ve strand upstream and -ve strand downstream cases; and it is 0 for +ve strand downstream and -ve strand upstream cases
#threhold_Up is equal to the interval start for +ve strand, and interval end for -ve strand
#threhold_down is equal to the interval end for +ve strand, and interval start for -ve strand
if direction == 1:
if node.maxend < threshold_up:
if node.end == node.maxend:
report_func_up(node)
elif node.right and node.left:
if node.right.maxend == node.maxend:
get_closest_feature(node.right, direction, threshold_up, threshold_down, report_func_up, report_func_down)
elif node.left.maxend == node.maxend:
get_closest_feature(node.left, direction, threshold_up, threshold_down, report_func_up, report_func_down)
elif node.right and node.right.maxend == node.maxend:
get_closest_feature(node.right, direction, threshold_up, threshold_down, report_func_up, report_func_down)
elif node.left and node.left.maxend == node.maxend:
get_closest_feature(node.left, direction, threshold_up, threshold_down, report_func_up, report_func_down)
elif node.minend < threshold_up:
if node.end < threshold_up:
report_func_up(node)
if node.left and node.right:
if node.right.minend < threshold_up:
get_closest_feature(node.right, direction, threshold_up, threshold_down, report_func_up, report_func_down)
if node.left.minend < threshold_up:
get_closest_feature(node.left, direction, threshold_up, threshold_down, report_func_up, report_func_down)
elif node.left:
if node.left.minend < threshold_up:
get_closest_feature(node.left, direction, threshold_up, threshold_down, report_func_up, report_func_down)
elif node.right:
if node.right.minend < threshold_up:
get_closest_feature(node.right, direction, threshold_up, threshold_down, report_func_up, report_func_down)
elif direction == 0:
if node.start > threshold_down:
report_func_down(node)
if node.left:
get_closest_feature(node.left, direction, threshold_up, threshold_down, report_func_up, report_func_down)
else:
if node.right:
get_closest_feature(node.right, direction, threshold_up, threshold_down, report_func_up, report_func_down)
def proximal_region_finder(readers, region, comments=True):
primary = readers[0]
features = readers[1]
if region == 'Upstream':
up, down = True, False
elif region == 'Downstream':
up, down = False, True
else:
up, down = True, True
# Read features into memory:
rightTree = quicksect.IntervalTree()
for item in features:
if type( item ) is GenomicInterval:
rightTree.insert( item, features.linenum, item.fields )
for interval in primary:
if type( interval ) is Header:
yield interval
if type( interval ) is Comment and comments:
yield interval
elif type( interval ) == GenomicInterval:
chrom = interval.chrom
start = int(interval.start)
end = int(interval.end)
strand = interval.strand
if chrom not in rightTree.chroms:
continue
else:
root = rightTree.chroms[chrom] #root node for the chrom tree
result_up = []
result_down = []
if (strand == '+' and up) or (strand == '-' and down):
#upstream +ve strand and downstream -ve strand cases
get_closest_feature (root, 1, start, None, lambda node: result_up.append( node ), None)
if (strand == '+' and down) or (strand == '-' and up):
#downstream +ve strand and upstream -ve strand case
get_closest_feature (root, 0, None, end, None, lambda node: result_down.append( node ))
if result_up:
outfields = list(interval)
if len(result_up) > 1: #The results_up list has a list of intervals upstream to the given interval.
ends = []
for n in result_up:
ends.append(n.end)
res_ind = ends.index(max(ends)) #fetch the index of the closest interval i.e. the interval with the max end from the results_up list
else:
res_ind = 0
map(outfields.append, result_up[res_ind].other)
yield outfields
if result_down:
outfields = list(interval)
map(outfields.append, result_down[-1].other) #The last element of result_down will be the closest element to the given interval
yield outfields
def main():
options, args = doc_optparse.parse( __doc__ )
try:
chr_col_1, start_col_1, end_col_1, strand_col_1 = parse_cols_arg( options.cols1 )
chr_col_2, start_col_2, end_col_2, strand_col_2 = parse_cols_arg( options.cols2 )
in_fname, in2_fname, out_fname, direction = args
except:
doc_optparse.exception()
g1 = NiceReaderWrapper( fileinput.FileInput( in_fname ),
chrom_col=chr_col_1,
start_col=start_col_1,
end_col=end_col_1,
strand_col=strand_col_1,
fix_strand=True )
g2 = NiceReaderWrapper( fileinput.FileInput( in2_fname ),
chrom_col=chr_col_2,
start_col=start_col_2,
end_col=end_col_2,
strand_col=strand_col_2,
fix_strand=True )
out_file = open( out_fname, "w" )
try:
for line in proximal_region_finder([g1,g2], direction):
if type( line ) is list:
out_file.write( "%s\n" % "\t".join( line ) )
else:
out_file.write( "%s\n" % line )
except ParseError, exc:
fail( "Invalid file format: %s" % str( exc ) )
print "Direction: %s" %(direction)
if g1.skipped > 0:
print skipped( g1, filedesc=" of 1st dataset" )
if g2.skipped > 0:
print skipped( g2, filedesc=" of 2nd dataset" )
if __name__ == "__main__":
main()
|
dbcls/dbcls-galaxy
|
tools/new_operations/flanking_features.py
|
Python
|
mit
| 7,557
|
[
"Galaxy"
] |
10b18dfc4845f81a04116ccb8632624d67f4e075a793142bbb9b087ed4a297f2
|
#!/usr/bin/python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import os
import glob
import re
DriverPath = ''
InsertPath = '/../../../'
if (len(sys.argv) == 2):
DriverPath = sys.argv[1] + '/'
sys.path.insert(0, os.path.abspath(os.getcwd()))
import apply_relpath
IncludePath = apply_relpath.get_topsrcdir_asrelativepathto_objdirsfnxsource()[1]
def pts(category, pyfile):
print('Auto-documenting %s file %s' % (category, pyfile))
# helper fn
def sphinxify_comment(text):
text = text.replace('@@', '_')
text = text.replace(' $', ' :math:`')
text = text.replace('($', r'(\ :math:`')
text = text.replace('$ ', '` ')
text = text.replace('$.', '`.')
text = text.replace('$,', '`,')
text = text.replace('$)', r'`\ )')
return text
# helper fn
# including the options abbr substitutions file in every SSSOUT option file slows
# compilation by a factor of ten. so, back-translate |%s__%s| into :term:`%s`
def substitute_comment(cmnt):
subst = re.compile(r'^(.*?)[\s\(]\|(\w+)__(\w+)\|[\s\).,](.*?)$')
while True:
if subst.match(cmnt):
m = subst.match(cmnt)
cmnt = m.group(1) + ' :term:`' + m.group(3).upper() + ' <' + m.group(3).upper() + ' (' + m.group(2).upper() + ')>` ' + m.group(4)
continue
else:
break
return cmnt
# helper fn
def determine_options(cfilename):
module = re.compile(r'^(.*)name\s*==\s*"(.*)"(.*?)$', re.IGNORECASE)
modulecomment = re.compile(r'^(\s*?)\/\*-\s*MODULEDESCRIPTION\s*(.*?)-\*\/(\s*?)$', re.IGNORECASE)
modulecommentstart = re.compile(r'^(\s*?)\/\*-\s*MODULEDESCRIPTION\s*(.*?)(\s*?)$', re.IGNORECASE)
subsection = re.compile(r'^(\s*?)\/\*-\s*SUBSECTION\s*(.*?)\s*-\*\/(\s*?)$', re.IGNORECASE)
comment = re.compile(r'^(\s*?)\/\*-\s*(.*?)-\*\/(\s*?)$', re.IGNORECASE)
commentend = re.compile(r'^(\s*)(.*?)-\*\/(\s*?)$', re.IGNORECASE)
commentstart = re.compile(r'^(\s*?)\/\*-\s*(.*)(\s*?)$', re.IGNORECASE)
kw_string_def_opt = re.compile(r'add_str\(\s*"(.*)"\s*,\s*"(.*)"\s*,\s*"(.*)"\s*\)')
kw_string_def_opt_2 = re.compile(r'add_str_i\(\s*"(.*)"\s*,\s*"(.*)"\s*,\s*"(.*)"\s*\)')
kw_string_def = re.compile(r'add_str\(\s*"(.*)"\s*,\s*"(.*)"\s*\)')
kw_string_def_2 = re.compile(r'add_str_i\(\s*"(.*)"\s*,\s*"(.*)"\s*\)')
kw_bool_def = re.compile(r'add_bool\(\s*"(.*)"\s*,\s*("?)([-\w]+)("?)\s*\)')
kw_double_def = re.compile(r'add_double\(\s*"(.*)"\s*,\s*("?)([-/\.\w]+)("?)\s*\)')
kw_generic_def = re.compile(r'add_(\w+)\(\s*"(\w+)"\s*,\s*("?)([-\w]+)("?)\s*\)') # untested
kw_complicated = re.compile(r'add\(\s*"(\w*)"\s*,\s*new\s+(\w+)\(\)\s*\)') # untested
fcfile = open(cfilename)
contents = fcfile.readlines()
fcfile.close()
ii = 0
while (ii < len(contents)):
line = contents[ii]
if module.match(line):
currentmodule = module.match(line).group(2).upper()
fmodule.write('.. toctree::\n :hidden:\n :glob:\n\n %s__*\n\n' % (currentmodule.lower()))
elif modulecommentstart.match(line):
tag = ''
while 1:
if (not commentend.match(line)):
if modulecommentstart.match(line):
tag += modulecommentstart.match(line).group(2)
else:
tag += ' ' + line.strip()
ii += 1
line = contents[ii]
continue
else:
if modulecomment.match(line):
tag += modulecomment.match(line).group(2)
break
else:
tag += ' ' + commentend.match(line).group(2)
break
fglossary.write('**%s**: %s\n\n' % (currentmodule, tag))
elif subsection.match(line):
currentsubsection = subsection.match(line).group(2)
fglossary.write('\n%s\n%s\n\n' % (currentsubsection, '^' * len(currentsubsection)))
fglossary.write('.. glossary::\n :sorted:\n\n')
elif commentstart.match(line):
tag = ''
while 1:
if (not commentend.match(line)):
if commentstart.match(line):
tag += commentstart.match(line).group(2)
else:
tag += ' ' + line.strip()
ii += 1
line = contents[ii]
continue
else:
if comment.match(line):
tag += comment.match(line).group(2)
break
else:
tag += ' ' + commentend.match(line).group(2)
break
tag = sphinxify_comment(tag)
# capture option immediately after comment
kw_name = ''
kw_default = 'No Default'
kw_type = ''
kw_possible = ''
ii += 1
line = contents[ii]
if (not line or line.isspace()):
ii += 1
line = contents[ii]
if kw_string_def_opt.search(line):
m = kw_string_def_opt.search(line)
kw_name = m.group(1)
kw_type = 'str'
if not (not m.group(2) or m.group(2).isspace()):
kw_default = m.group(2)
kw_possible = m.group(3)
elif kw_string_def_opt_2.search(line):
m = kw_string_def_opt_2.search(line)
kw_name = m.group(1)
kw_type = 'str'
if not (not m.group(2) or m.group(2).isspace()):
kw_default = m.group(2)
kw_possible = m.group(3)
elif kw_string_def.search(line):
m = kw_string_def.search(line)
kw_name = m.group(1)
kw_type = 'str'
if not (not m.group(2) or m.group(2).isspace()):
kw_default = m.group(2)
elif kw_string_def_2.search(line):
m = kw_string_def_2.search(line)
kw_name = m.group(1)
kw_type = 'str'
if not (not m.group(2) or m.group(2).isspace()):
kw_default = m.group(2)
elif kw_bool_def.search(line):
m = kw_bool_def.search(line)
kw_name = m.group(1)
kw_type = 'bool'
if not (not m.group(3) or m.group(3).isspace()):
kw_default = m.group(3).lower()
if kw_default == '1':
kw_default = 'true'
if kw_default == '0':
kw_default = 'false'
elif kw_double_def.search(line):
m = kw_double_def.search(line)
kw_name = m.group(1)
kw_type = 'double'
if not (not m.group(3) or m.group(3).isspace()):
kw_default = m.group(3).lower()
elif kw_generic_def.search(line):
m = kw_generic_def.search(line)
kw_name = m.group(2)
kw_type = m.group(1)
if not (not m.group(4) or m.group(4).isspace()):
kw_default = m.group(4).lower()
elif kw_complicated.search(line):
m = kw_complicated.search(line)
kw_name = m.group(1)
kw_type = m.group(2)
if kw_type == 'ArrayType':
kw_type = 'array'
elif kw_type == 'MapType':
kw_type = 'map'
elif kw_type == 'PythonDataType':
kw_type = 'python'
else:
print('ERROR: unrecognized type %s for %s' % (kw_type, kw_name))
sys.exit()
if kw_type == 'str': kw_type = 'string'
elif kw_type == 'int': kw_type = 'integer'
elif kw_type == 'bool': kw_type = 'boolean'
elif kw_type == 'double': pass
elif kw_type == 'array': pass
elif kw_type == 'map': pass
elif kw_type == 'python': pass
else:
print('ERROR: unrecognized type2 %s for %s' % (kw_type, kw_name))
sys.exit()
#print 'kw_name = \t', kw_name
#print 'kw_type = \t', kw_type
#print 'kw_dflt = \t', kw_default
#print 'kw_poss = \t', kw_possible
#print 'kw_tagl = \t', tag
#print '\n'
# substitution list file
fabbr.write('.. |%s__%s| replace:: :term:`%s <%s (%s)>`\n' %
(currentmodule.lower(), kw_name.lower(), kw_name.upper(), kw_name.upper(), currentmodule.upper()))
# individual option file for plugin options. rather pointless but consistent w/regular module options
fsssdoc = open('source/autodir_plugins/'+currentmodule.lower()+'__'+kw_name.lower()+'.rst', 'w')
div = '"' * (14 + len(currentmodule) + 2 * len(kw_name))
fsssdoc.write(':term:`%s <%s (%s)>`\n%s\n\n' % (kw_name.upper(), kw_name.upper(), currentmodule.upper(), div))
fsssdoc.write(' %s\n\n' % (substitute_comment(tag)))
fglossary.write(' %s (%s)\n %s\n\n' % (kw_name.upper(), currentmodule.upper(), tag))
if kw_type == 'boolean':
fglossary.write(' * **Type**: :ref:`boolean <op_c_boolean>`\n')
fsssdoc.write(' * **Type**: :ref:`boolean <op_c_boolean>`\n')
elif (kw_type == 'double') and ((kw_name.lower().find('conv') > -1) or (kw_name.lower().find('tol') > -1)):
fglossary.write(' * **Type**: :ref:`conv double <op_c_conv>`\n')
fsssdoc.write(' * **Type**: :ref:`conv double <op_c_conv>`\n')
elif (kw_type == 'string') and ((kw_name.lower() == 'basis') or (kw_name.lower().startswith('df_basis'))):
fglossary.write(' * **Type**: %s\n' % kw_type)
fsssdoc.write(' * **Type**: %s\n' % kw_type)
fglossary.write(' * **Possible Values**: :ref:`basis string <apdx:basisElement>`\n')
fsssdoc.write(' * **Possible Values**: :ref:`basis string <apdx:basisElement>`\n')
else:
fglossary.write(' * **Type**: %s\n' % kw_type)
fsssdoc.write(' * **Type**: %s\n' % kw_type)
if not (not kw_possible or kw_possible.isspace()):
sline = kw_possible.split()
fglossary.write(' * **Possible Values**: %s\n' % (', '.join(sline)))
fsssdoc.write(' * **Possible Values**: %s\n' % (', '.join(sline)))
fglossary.write(' * **Default**: %s\n\n' % kw_default)
fsssdoc.write(' * **Default**: %s\n\n' % kw_default)
fsssdoc.close()
if (line.find('extern "C" PsiReturnType') > -1):
break
ii += 1
# Objective #3
# Plugin directories in psi4/plugin/
fdriver = open('source/autodoc_available_plugins.rst', 'w')
fdriver.write('\n.. index:: plugins; available\n')
fdriver.write('.. _`sec:availablePlugins`:\n\n')
fdriver.write('====================================================\n')
fdriver.write('Emerging Theoretical Methods: Plugins DFADC to RQCHF\n')
fdriver.write('====================================================\n\n')
fdriver.write('.. toctree::\n :maxdepth: 1\n\n')
fabbr = open('source/autodoc_abbr_options_plugins.rst', 'w')
# from each plugin directory ...
for pydir in glob.glob(DriverPath + '../../plugins/*'):
dirname = os.path.split(pydir)[1]
div = '=' * len(dirname)
if dirname not in []:
pts('plugin', dirname)
fdriver.write(' autodir_plugins/module__%s' % (dirname))
fmodule = open('source/autodir_plugins/module__'+dirname+'.rst', 'w')
fmodule.write('\n.. _`sec:%s`:\n' % (dirname.lower()))
fmodule.write('.. index:: plugin; %s\n\n' % (dirname.lower()))
fmodule.write(':srcplugin:`' + dirname.lower() + '`\n')
fmodule.write(div + '=============' + '\n\n')
#fmodule.write(dirname.lower() + '\n')
#fmodule.write(div + '\n\n')
#fmodule.write('.. toctree::\n :hidden:\n :glob:\n\n %s__*\n\n' % (dirname.lower()))
fmodule.write('.. toctree::\n :hidden:\n\n /autodir_plugins/glossary__%s\n\n' % (dirname.lower()))
fglossary = open('source/autodir_plugins/glossary__'+dirname+'.rst', 'w')
fglossary.write('\n.. include:: /autodoc_abbr_options_c.rst\n')
fglossary.write('.. include:: /autodoc_abbr_options_plugins.rst\n\n')
fglossary.write('.. glossary::\n :sorted:\n\n')
# ... include doc.rst file
docfile = '%s/doc.rst' % (pydir)
if os.path.isfile(docfile):
fmodule.write('.. include:: %splugins/%s/doc.rst\n\n' % (IncludePath, dirname))
# ... include docstrings from any *.py files
pyfiles = glob.glob(pydir + '/*.py')
if len(pyfiles) > 0:
fmodule.write('Py-side Documentation\n')
fmodule.write('---------------------\n\n')
for pyfile in pyfiles:
filename = os.path.split(pyfile)[1]
basename = os.path.splitext(filename)[0]
fmodule.write('.. automodule:: %s.%s\n' % (dirname, basename))
fmodule.write(' :members:\n')
fmodule.write(' :undoc-members:\n\n')
# ... include keywords section from any *.cc files
# todo: turn this into a fn and store in a dictionary
cfiles = glob.glob(pydir + '/*.cc') + glob.glob(pydir + '/*.cc.in')
if len(cfiles) > 0:
fmodule.write('C-side Documentation\n')
fmodule.write('--------------------\n\n')
for cfile in cfiles:
determine_options(cfile)
fmodule.write('.. include:: /autodir_plugins/glossary__%s.rst' % (dirname))
fmodule.write('\n\n')
fmodule.close()
fglossary.write('\n\n')
fglossary.close()
fdriver.write('\n')
fdriver.write('\n')
fdriver.close()
fabbr.write('\n')
fabbr.close()
|
psi4/psi4
|
doc/sphinxman/document_plugins.py
|
Python
|
lgpl-3.0
| 15,223
|
[
"Psi4"
] |
768378bbc4c7aff207a7d8e3258edc593defdc6ea2315c2ccf805fca8038b8af
|
from DIRAC.WorkloadManagementSystem.Client.SandboxStoreClient import SandboxStoreClient
from WebAppDIRAC.Lib.WebHandler import WebHandler, asyncGen
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC import gConfig, gLogger
from DIRAC.Core.Utilities import Time
from WebAppDIRAC.WebApp.handler.Palette import Palette
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.Core.Utilities import DictCache
import json
class JobMonitorHandler( WebHandler ):
AUTH_PROPS = "authenticated"
__dataCache = DictCache.DictCache()
@asyncGen
def web_getJobData( self ):
RPC = RPCClient( "WorkloadManagement/JobMonitoring", timeout = 600 )
req = self._request()
result = yield self.threadTask( RPC.getJobPageSummaryWeb, req, self.globalSort , self.pageNumber, self.numberOfJobs )
if not result["OK"]:
self.finish( {"success":"false", "result":[], "total":0, "error":result["Message"]} )
return
result = result["Value"]
if not result.has_key( "TotalRecords" ):
self.finish( {"success":"false", "result":[], "total":-1, "error":"Data structure is corrupted"} )
return
if not ( result["TotalRecords"] > 0 ):
self.finish( {"success":"false", "result":[], "total":0, "error":"There were no data matching your selection"} )
return
if not ( result.has_key( "ParameterNames" ) and result.has_key( "Records" ) ):
self.finish( {"success":"false", "result":[], "total":-1, "error":"Data structure is corrupted"} )
return
if not ( len( result["ParameterNames"] ) > 0 ):
self.finish( {"success":"false", "result":[], "total":-1, "error":"ParameterNames field is missing"} )
return
if not ( len( result["Records"] ) > 0 ):
self.finish( {"success":"false", "result":[], "total":0, "Message":"There are no data to display"} )
return
callback = []
jobs = result["Records"]
head = result["ParameterNames"]
headLength = len( head )
for i in jobs:
tmp = {}
for j in range( 0, headLength ):
tmp[head[j]] = i[j]
callback.append( tmp )
total = result["TotalRecords"]
if result.has_key( "Extras" ):
st = self.__dict2string( {} )
extra = result["Extras"]
timestamp = Time.dateTime().strftime( "%Y-%m-%d %H:%M [UTC]" )
callback = {"success":"true", "result":callback, "total":total, "extra":extra, "request":st, "date":timestamp }
else:
callback = {"success":"true", "result":callback, "total":total, "date":None}
self.finish( callback )
def __dict2string( self, req ):
result = ""
try:
for key, value in req.iteritems():
result = result + str( key ) + ": " + ", ".join( value ) + "; "
except Exception, x:
pass
gLogger.info( "\033[0;31m Exception: \033[0m %s" % x )
result = result.strip()
result = result[:-1]
return result
@asyncGen
def web_getSelectionData( self ):
sData = self.getSessionData()
user = sData["user"]["username"]
if user == "Anonymous":
callback["prod"] = [["Insufficient rights"]]
else:
cacheKey = ( sData["user"].get( "group", "" ),
sData["setup"] )
callback = JobMonitorHandler.__dataCache.get( cacheKey )
if not callback:
callback = {}
RPC = RPCClient( "WorkloadManagement/JobMonitoring" )
result = yield self.threadTask( RPC.getProductionIds )
if result["OK"]:
prod = []
prods = result["Value"]
if len( prods ) > 0:
prods.sort( reverse = True )
prod = [ [ i ] for i in prods if i.startswith('00')]
else:
prod = [["Nothing to display"]]
else:
gLogger.error( "RPC.getProductionIds() return error: %s" % result["Message"] )
prod = [["Error happened on service side"]]
callback["prod"] = prod
RPC = RPCClient( "WorkloadManagement/JobMonitoring" )
result = yield self.threadTask( RPC.getSites )
if result["OK"]:
tier1 = gConfig.getValue( "/WebApp/PreferredSites", [] ) # Always return a list
site = []
if len( result["Value"] ) > 0:
s = list( result["Value"] )
for i in tier1:
site.append( [str( i )] )
for i in s:
if i not in tier1:
site.append( [str( i )] )
else:
site = [["Nothing to display"]]
else:
gLogger.error( "RPC.getSites() return error: %s" % result["Message"] )
site = [["Error happened on service side"]]
callback["site"] = site
# ##
result = yield self.threadTask( RPC.getStates )
if result["OK"]:
stat = []
if len( result["Value"] ) > 0:
for i in result["Value"]:
stat.append( [str( i )] )
else:
stat = [["Nothing to display"]]
else:
gLogger.error( "RPC.getStates() return error: %s" % result["Message"] )
stat = [["Error happened on service side"]]
callback["status"] = stat
# ##
result = yield self.threadTask( RPC.getMinorStates )
if result["OK"]:
stat = []
if len( result["Value"] ) > 0:
for i in result["Value"]:
stat.append( [i] )
else:
stat = [["Nothing to display"]]
else:
gLogger.error( "RPC.getMinorStates() return error: %s" % result["Message"] )
stat = [["Error happened on service side"]]
callback["minorstat"] = stat
# ##
result = yield self.threadTask( RPC.getApplicationStates )
if result["OK"]:
app = []
if len( result["Value"] ) > 0:
for i in result["Value"]:
app.append( [i] )
else:
app = [["Nothing to display"]]
else:
gLogger.error( "RPC.getApplicationstates() return error: %s" % result["Message"] )
app = [["Error happened on service side"]]
callback["app"] = app
# ##
result = yield self.threadTask( RPC.getJobTypes )
if result["OK"]:
types = []
if len( result["Value"] ) > 0:
for i in result["Value"]:
types.append( [i] )
else:
types = [["Nothing to display"]]
else:
gLogger.error( "RPC.getJobTypes() return error: %s" % result["Message"] )
types = [["Error happened on service side"]]
callback["types"] = types
# ##
# groupProperty = credentials.getProperties(group)
if user == "Anonymous":
callback["owner"] = [["Insufficient rights"]]
else:
result = yield self.threadTask( RPC.getOwners )
if result["OK"]:
owner = []
if len( result["Value"] ) > 0:
for i in result["Value"]:
owner.append( [str( i )] )
else:
owner = [["Nothing to display"]]
elif 'NormalUser' in sData['user']['properties']:
owner = [[user]]
callback["owner"] = owner
else:
gLogger.error( "RPC.getOwners() return error: %s" % result["Message"] )
owner = [["Error happened on service side"]]
callback["owner"] = owner
result = yield self.threadTask( RPC.getOwnerGroup )
if result['OK']:
callback['OwnerGroup'] = [ [group] for group in result['Value']]
JobMonitorHandler.__dataCache.add( cacheKey, 360, callback )
self.finish( callback )
def _request( self ):
self.pageNumber = 0
self.numberOfJobs = 25
self.globalSort = [["JobID", "DESC"]]
req = {}
if self.request.arguments.has_key( "limit" ) and len( self.request.arguments["limit"][0] ) > 0:
self.numberOfJobs = int( self.request.arguments["limit"][0] )
if self.request.arguments.has_key( "start" ) and len( self.request.arguments["start"][0] ) > 0:
self.pageNumber = int( self.request.arguments["start"][0] )
else:
self.pageNumber = 0
if "JobID" in self.request.arguments:
jobids = list( json.loads( self.request.arguments[ 'JobID' ][-1] ) )
if len( jobids ) > 0:
req['JobID'] = jobids
if "jobGroup" in self.request.arguments:
prodids = list( json.loads( self.request.arguments[ 'jobGroup' ][-1] ) )
if len( prodids ) > 0:
req['JobGroup'] = prodids
if "site" in self.request.arguments:
sites = list( json.loads( self.request.arguments[ 'site' ][-1] ) )
if len( sites ) > 0:
req["Site"] = sites
if "status" in self.request.arguments:
status = list( json.loads( self.request.arguments[ 'status' ][-1] ) )
if len( status ) > 0:
req["Status"] = status
if "minorStatus" in self.request.arguments:
minorstat = list( json.loads( self.request.arguments[ 'minorStatus' ][-1] ) )
if len( minorstat ) > 0:
req["MinorStatus"] = minorstat
if "appStatus" in self.request.arguments:
apps = list( json.loads( self.request.arguments[ 'appStatus' ][-1] ) )
if len( apps ) > 0:
req["ApplicationStatus"] = apps
if "jobType" in self.request.arguments:
types = list( json.loads( self.request.arguments[ 'jobType' ][-1] ) )
if len( types ) > 0:
req["JobType"] = types
if "owner" in self.request.arguments:
owner = list( json.loads( self.request.arguments[ 'owner' ][-1] ) )
if len( owner ) > 0:
req["Owner"] = owner
if "OwnerGroup" in self.request.arguments:
ownerGroup = list( json.loads( self.request.arguments[ 'OwnerGroup' ][-1] ) )
if len( ownerGroup ) > 0:
req["OwnerGroup"] = ownerGroup
if 'startDate' in self.request.arguments and len( self.request.arguments["startDate"][0] ) > 0:
if 'startTime' in self.request.arguments and len( self.request.arguments["startTime"][0] ) > 0:
req["FromDate"] = str( self.request.arguments["startDate"][0] + " " + self.request.arguments["startTime"][0] )
else:
req["FromDate"] = str( self.request.arguments["startDate"][0] )
if 'endDate' in self.request.arguments and len( self.request.arguments["endDate"][0] ) > 0:
if 'endTime' in self.request.arguments and len( self.request.arguments["endTime"][0] ) > 0:
req["ToDate"] = str( self.request.arguments["endDate"][0] + " " + self.request.arguments["endTime"][0] )
else:
req["ToDate"] = str( self.request.arguments["endDate"][0] )
if 'date' in self.request.arguments and len( self.request.arguments["date"][0] ) > 0:
req["LastUpdate"] = str( self.request.arguments["date"][0] )
if 'sort' in self.request.arguments:
sort = json.loads( self.request.arguments['sort'][-1] )
if len( sort ) > 0:
self.globalSort = []
for i in sort :
if "LastSignOfLife" not in i['property']:
self.globalSort += [[str( i['property'] ), str( i['direction'] )]]
else:
self.globalSort = [["JobID", "DESC"]]
gLogger.debug( "Request", str( req ) )
return req
@asyncGen
def web_jobAction( self ):
ids = self.request.arguments["JobID"][0].split( "," )
ids = [int( i ) for i in ids ]
RPC = RPCClient( "WorkloadManagement/JobManager" )
if self.request.arguments["action"][0] == "delete":
result = yield self.threadTask( RPC.deleteJob, ids )
elif self.request.arguments["action"][0] == "kill":
result = yield self.threadTask( RPC.killJob, ids )
elif self.request.arguments["action"][0] == "reschedule":
result = yield self.threadTask( RPC.rescheduleJob, ids )
elif self.request.arguments["action"][0] == "reset":
result = yield self.threadTask( RPC.resetJob, ids )
callback = {}
if result["OK"]:
callback = {"success":"true", "result":""}
else:
if result.has_key( "InvalidJobIDs" ):
callback = {"success":"false", "error":"Invalid JobIDs: %s" % result["InvalidJobIDs"]}
elif result.has_key( "NonauthorizedJobIDs" ):
callback = {"success":"false", "error":"You are nonauthorized to %s jobs with JobID: %s" % ( self.request.arguments["action"][0], result["NonauthorizedJobIDs"] )}
else:
callback = {"success":"false", "error":result["Message"]}
self.finish( callback )
@asyncGen
def web_jobData( self ):
jobId = int( self.request.arguments["id"][0] )
callback = {}
if self.request.arguments["data_kind"][0] == "getJDL":
RPC = RPCClient( "WorkloadManagement/JobMonitoring" )
result = yield self.threadTask( RPC.getJobJDL, jobId, False )
if result["OK"]:
callback = {"success":"true", "result":result["Value"]}
else:
callback = {"success":"false", "error":result["Message"]}
#--------------------------------------------------------------------------------
elif self.request.arguments["data_kind"][0] == "getBasicInfo":
RPC = RPCClient( "WorkloadManagement/JobMonitoring" )
result = yield self.threadTask( RPC.getJobSummary, jobId )
if result["OK"]:
items = []
for key, value in result["Value"].items():
items.append( [key, value] )
callback = {"success":"true", "result":items}
else:
callback = {"success":"false", "error":result["Message"]}
#--------------------------------------------------------------------------------
elif self.request.arguments["data_kind"][0] == "getParams":
RPC = RPCClient( "WorkloadManagement/JobMonitoring" )
result = yield self.threadTask( RPC.getJobParameters, jobId )
if result["OK"]:
attr = result["Value"]
items = []
for i in attr.items():
if i[0] == "Log URL": # the link has to be opened in a new tab.
items.append( [i[0], i[1].replace( '>', ' target="_blank">' )] )
elif i[0] != "StandardOutput":
items.append( [i[0], i[1]] )
callback = {"success":"true", "result":items}
else:
callback = {"success":"false", "error":result["Message"]}
#--------------------------------------------------------------------------------
elif self.request.arguments["data_kind"][0] == "getLoggingInfo":
RPC = RPCClient( "WorkloadManagement/JobMonitoring" )
result = yield self.threadTask( RPC.getJobLoggingInfo, jobId )
if result["OK"]:
callback = {"success":"true", "result":result["Value"]}
else:
callback = {"success":"false", "error":result["Message"]}
#--------------------------------------------------------------------------------
elif self.request.arguments["data_kind"][0] == "getStandardOutput":
RPC = RPCClient( "WorkloadManagement/JobMonitoring" )
result = yield self.threadTask( RPC.getJobParameters, jobId )
attr = result["Value"]
if result["OK"]:
if attr.has_key( "StandardOutput" ):
callback = {"success":"true", "result":attr["StandardOutput"]}
else:
callback = {"success":"false", "error":"Not accessible yet"}
else:
callback = {"success":"false", "error":result["Message"]}
#--------------------------------------------------------------------------------
elif self.request.arguments["data_kind"][0] == "getPending":
RPC = RPCClient( "RequestManagement/ReqManager" )
result = yield self.threadTask( RPC.readRequestsForJobs, [jobId] )
if result["OK"]:
items = {}
if jobId in result['Value']['Successful']:
req = Request( result['Value']['Successful'][jobId] ).getDigest()['Value']
items["PendingRequest"] = req
callback = {"success":"true", "result":items}
elif jobId in result['Value']['Failed']: # when no request associated to the job
callback = {"success":"false", "error":result['Value']["Failed"][jobId]}
else:
callback = {"success":"false", "error":"No request found with unknown reason"}
else:
callback = {"success":"false", "error":result["Message"]}
#--------------------------------------------------------------------------------
elif self.request.arguments["data_kind"][0] == "getLogURL":
RPC = RPCClient( "WorkloadManagement/JobMonitoring" )
result = yield self.threadTask( RPC.getJobParameters, jobId )
if result["OK"]:
attr = result["Value"]
if attr.has_key( "Log URL" ):
url = attr["Log URL"].split( '"' )
# we can not open non secured URL
httpsUrl = url[1].replace( 'http', 'https' )
callback = {"success":"true", "result":httpsUrl}
else:
callback = {"success":"false", "error":"No URL found"}
else:
callback = {"success":"false", "error":result["Message"]}
#--------------------------------------------------------------------------------
elif self.request.arguments["data_kind"][0] == "getStagerReport":
RPC = RPCClient( "WorkloadManagement/JobMonitoring" )
result = yield self.threadTask( RPC.getJobParameters, jobId )
if result["OK"]:
attr = result["Value"]
if "StagerReport" in attr:
callback = {"success":"true", "result":attr["StagerReport"]}
else:
callback = {"success":"false", "error":"StagerReport not available"}
else:
callback = {"success":"false", "error":result["Message"]}
#--------------------------------------------------------------------------------
elif self.request.arguments["data_kind"][0] == "getPilotStdOut":
RPC = RPCClient( "WorkloadManagement/WMSAdministrator" )
result = yield self.threadTask( RPC.getJobPilotOutput, jobId )
if result["OK"]:
if result["Value"].has_key( "StdOut" ):
callback = {"success":"true", "result":result["Value"]["StdOut"]}
else:
callback = {"success":"false", "error":result["Message"]}
#--------------------------------------------------------------------------------
elif self.request.arguments["data_kind"][0] == "getPilotStdErr":
RPC = RPCClient( "WorkloadManagement/WMSAdministrator" )
result = yield self.threadTask( RPC.getJobPilotOutput, jobId )
if result["OK"]:
if result["Value"].has_key( "StdErr" ):
callback = {"success":"true", "result":result["Value"]["StdErr"]}
else:
callback = {"success":"false", "error":result["Message"]}
#--------------------------------------------------------------------------------
elif self.request.arguments["data_kind"][0] == "getPilotLoggingInfo":
PILOTRPC = RPCClient( "WorkloadManagement/WMSAdministrator" )
retVal = yield self.threadTask( PILOTRPC.getPilots, int( jobId ) )
if retVal['OK']:
pilotReference = retVal['Value'].keys()[0]
retVal = yield self.threadTask( PILOTRPC.getPilotLoggingInfo, pilotReference )
if retVal["OK"]:
callback = {"success":"true", "result":retVal["Value"]}
else:
callback = {"success":"false", "error":retVal["Message"]}
else:
callback = {"success":"false", "error":retVal["Message"]}
self.finish( callback )
@asyncGen
def web_getStatisticsData( self ):
req = self._request()
paletteColor = Palette()
RPC = RPCClient( "WorkloadManagement/JobMonitoring" )
selector = self.request.arguments["statsField"][0]
if selector == "Minor Status":
selector = "MinorStatus"
elif selector == "Application Status":
selector = "ApplicationStatus"
elif selector == "Job Group":
selector = "JobGroup"
elif selector == "Owner Group":
selector = "OwnerGroup"
elif selector == "Job Type":
selector = "JobType"
result = yield self.threadTask( RPC.getJobStats, selector, req )
if result["OK"]:
callback = []
result = dict( result["Value"] )
keylist = result.keys()
keylist.sort()
if selector == "Site":
tier1 = gConfig.getValue( "/WebApp/PreferredSites", [] )
if len( tier1 ) > 0:
tier1.sort()
for i in tier1:
if result.has_key( i ):
countryCode = i.rsplit( ".", 1 )[1]
callback.append( {"key":i, "value":result[i], "code":countryCode, "color": paletteColor.getColor( countryCode ) } )
for key in keylist:
if selector == "Site" and tier1:
if key not in tier1:
try:
countryCode = key.rsplit( ".", 1 )[1]
except:
countryCode = "Unknown"
callback.append( {"key":key, "value":result[key], "code":countryCode, "color": paletteColor.getColor( key ) } )
elif selector == "Site" and not tier1:
try:
countryCode = key.rsplit( ".", 1 )[1]
except:
countryCode = "Unknown"
callback.append( {"key":key, "value":result[key], "code":countryCode, "color": paletteColor.getColor( key ) } )
else:
callback.append( {"key":key, "value":result[key], "code":"", "color": paletteColor.getColor( key ) } )
callback = {"success":"true", "result":callback}
else:
callback = {"success":"false", "error":result["Message"]}
self.finish( callback )
@asyncGen
def web_getSandbox( self ):
if 'jobID' not in self.request.arguments:
self.finish( {"success":"false", "error":"Maybe you forgot the jobID ?"} )
return
jobID = int( self.request.arguments['jobID'][0] )
sbType = 'Output'
if 'sandbox' in self.request.arguments:
sbType = str( self.request.arguments['sandbox'][0] )
userData = self.getSessionData()
client = SandboxStoreClient( useCertificates = True,
delegatedDN = str( userData["user"]["DN"] ),
delegatedGroup = str( userData["user"]["group"] ),
setup = userData["setup"] )
result = yield self.threadTask( client.downloadSandboxForJob, jobID, sbType, inMemory = True )
if not result['OK']:
self.finish( {"success":"false", "error":"Error: %s" % result['Message']} )
return
if "check" in self.request.arguments:
self.finish( {"success":"true"} )
return
data = result['Value']
fname = "%s_%sSandbox.tar" % ( str( jobID ), sbType )
self.set_header( 'Content-type', 'application/x-tar' )
self.set_header( 'Content-Disposition', 'attachment; filename="%s"' % fname )
self.set_header( 'Content-Length', len( data ) )
self.set_header( 'Cache-Control', "no-cache, no-store, must-revalidate, max-age=0" )
self.set_header( 'Pragma', "no-cache" )
self.finish( data )
|
zmathe/WebAppDIRAC
|
WebApp/handler/JobMonitorHandler.py
|
Python
|
gpl-3.0
| 22,745
|
[
"DIRAC"
] |
c4b02e122131eb277efe2f83b944c4e86270e51fa572052f330f806f1d55c79a
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
import time
m = 500
n = 250
display = True
worldRank = El.mpi.WorldRank()
def Rectang(height,width):
A = El.DistMatrix()
El.Uniform( A, height, width )
return A
A = Rectang(m,n)
b = El.DistMatrix()
El.Gaussian( b, m, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
ctrl = El.LPAffineCtrl_d()
ctrl.mehrotraCtrl.progress = True
startLAV = time.clock()
x = El.LAV( A, b, ctrl )
endLAV = time.clock()
if worldRank == 0:
print "LAV time: ", endLAV-startLAV
if display:
El.Display( x, "x" )
bTwoNorm = El.Nrm2( b )
bInfNorm = El.MaxNorm( b )
r = El.DistMatrix()
El.Copy( b, r )
El.Gemv( El.NORMAL, -1., A, x, 1., r )
if display:
El.Display( r, "r" )
rTwoNorm = El.Nrm2( r )
rOneNorm = El.EntrywiseNorm( r, 1 )
if worldRank == 0:
print "|| b ||_2 =", bTwoNorm
print "|| b ||_oo =", bInfNorm
print "|| A x - b ||_2 =", rTwoNorm
print "|| A x - b ||_1 =", rOneNorm
# The dense least squares overwrites A
ALS = El.DistMatrix()
El.Copy( A, ALS )
xLS = El.LeastSquares(ALS,b)
if display:
El.Display( xLS, "x_{LS}" )
rLS = El.DistMatrix()
El.Copy( b, rLS )
El.Gemv( El.NORMAL, -1., A, xLS, 1., rLS )
El.Display( rLS, "A x_{LS} - b" )
rLSTwoNorm = El.Nrm2(rLS)
rLSOneNorm = El.EntrywiseNorm(rLS,1)
if worldRank == 0:
print "|| A x_{LS} - b ||_2 =", rLSTwoNorm
print "|| A x_{LS} - b ||_1 =", rLSOneNorm
# Require the user to press a button before the figures are closed
commSize = El.mpi.Size( El.mpi.COMM_WORLD() )
El.Finalize()
if commSize == 1:
raw_input('Press Enter to exit')
|
sg0/Elemental
|
examples/interface/LAVDense.py
|
Python
|
bsd-3-clause
| 1,802
|
[
"Gaussian"
] |
ef28bde0b807f67230454afb5482793628f3e64e02678ac229174df3d9dd5f1f
|
# -*- coding: utf-8 -*-
# Spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the “Software”). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by Ryan P. Adams, Michael Gelbart, and
# Jasper Snoek at Harvard University, Kevin Swersky at the
# University of Toronto (“Toronto”), and Hugo Larochelle at the
# Université de Sherbrooke (“Sherbrooke”), which assigned its rights
# in the Software to Socpra Sciences et Génie
# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (“Harvard”).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (“Agreement”). Absent your agreement
# to the terms below, you (the “End User”) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# User’s internal, non-commercial research and academic use at End
# User’s academic or not-for-profit research institution
# (“Institution”) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: otd@harvard.edu
#
# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End User’s Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End User’s use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End User’s
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams
# Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams
# Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams
# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology Jasper Snoek, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
import copy
import numpy as np
from collections import OrderedDict
from .task import Task
class TaskGroup(object):
"""
A task group is a group of tasks that share underlying data. The task group
is responsible for storing all of this data, while the underlying tasks will
give different views of the data.
For now the only view type is that the tasks will by default not return data
when the associated values are nan.
"""
def __init__(self, tasks_config, variables_config):
self.tasks = {}
for task_name, task_options in tasks_config.iteritems():
self.tasks[task_name] = Task(task_name,
task_options,
variables_config)
self.dummy_task = Task('dummy', {'type': 'dummy'}, variables_config)
#TODO: Validate the data
self._inputs = np.zeros((0,self.num_dims))#np.array([])
self._pending = np.zeros((0,self.num_dims))#np.array([])
self._values = np.zeros((0,self.num_dims))#np.array([])
self._costs = np.zeros((0,self.num_dims))#np.array([])
self.variables_config = copy.copy(variables_config)
@property
def num_dims(self):
return self.dummy_task.num_dims
@property
def inputs(self):
return self._inputs
@inputs.setter
def inputs(self, inputs):
self._inputs = inputs
for task in self.tasks.values():
task.inputs = inputs
@property
def pending(self):
return self._pending
@pending.setter
def pending(self, pending):
self._pending = pending
for task in self.tasks.values():
task.pending = pending
@property
def values(self):
"""return a dictionary of the task values keyed by task name"""
return {task_name : task.values for task_name, task in self.tasks.iteritems()}
@values.setter
def values(self, values):
self._values = values
for task_name in self.tasks:
task = self.tasks[task_name]
task.values = values[task_name]
def add_nan_task_if_nans(self):
valids = np.vstack([vals for vals in self.values.values()]).sum(0)
if np.any(np.isnan(valids)):
# First, see if all the tasks currently in this group are noiseless
# If so, we should make the NaN task noiseless also
# This is important because if a NaN constraint unnecessarily
# thinks it's non-deterministic it could take MUCH longer to pass
# the confidence threshold
all_noiseless = True
for task_name in self.tasks:
if self.tasks[task_name].options.get('likelihood', 'gaussian').lower() != 'noiseless':
all_noiseless = False
break
nan_likelihood = 'STEP' if all_noiseless else 'BINOMIAL'
nan_task = Task('NaN', {'type' : 'CONSTRAINT', 'likelihood' : nan_likelihood}, self.variables_config)
nan_task.inputs = self._inputs
nan_task.pending = self._pending
nan_task.values = np.logical_not(np.isnan(valids))
self.tasks['NaN'] = nan_task
def paramify_and_print(self, *args, **kwargs):
return self.dummy_task.paramify_and_print(*args, **kwargs)
def paramify(self, data_vector):
"""convert a data vector on the unit interval into a dict of dicts keyed by parameter names
the values will be stored as, e.g. param["name"]["value"]
"""
return self.dummy_task.paramify(data_vector)
def vectorify(self, params):
return self.dummy_task.vectorify(params)
def from_unit(self, U):
"""remove the scaling for the parameters that keeps them in [0,1)"""
return self.dummy_task.from_unit(U)
|
DavidMcDonald1993/ghsom
|
spearmint/spearmint/tasks/task_group.py
|
Python
|
gpl-2.0
| 13,533
|
[
"Gaussian"
] |
3f4a4f516038f012703d17a2aa79bf1206a976fe95962bf225288cc36546f941
|
# -*- coding: utf-8 -*-
"""This module provides the Expr class. This attempts to create a
consistent interface to SymPy's expressions.
Copyright 2014--2022 Michael Hayes, UCECE
"""
# TODO, propagate assumptions for arithmetic......... This may be
# tricky. At the moment only a limited propagation of assumptions are
# performed.
from __future__ import division
from .assumptions import Assumptions
from .cache import cached_property
from .domains import UndefinedDomain
from .quantity import UndefinedQuantity
from .ratfun import Ratfun
from .sym import sympify, symsimplify, j, omegasym, symdebug, AppliedUndef
from .sym import capitalize_name, tsym, miscsymbol, usersymbol, symbol_map, tausym, nusym, oo
from .sym import fsym, ssym, Fsym, Omegasym, symbol_delete, pi
from .sym import nsym, ksym, zsym
from .state import state
from .printing import pprint, pretty, print_str, latex
from .functions import sqrt, log10, atan2, gcd, exp, Function, Eq
from .units import units, u as uu, dB
from .utils import (as_N_D, as_sum, remove_images, pair_conjugates,
split_dirac_delta, expand_functions)
import sympy as sym
from sympy.utilities.lambdify import lambdify
from .sym import simplify
from .simplify import simplify_sin_cos, simplify_heaviside, simplify_dirac_delta
from .simplify import simplify_rect, simplify_unit_impulse, simplify_conjugates
from .simplify import expand_hyperbolic_trig
from .approximate import approximate_fractional_power, approximate_exp
from .approximate import approximate_hyperbolic_trig
from .config import heaviside_zero, unitstep_zero
from collections import OrderedDict
from warnings import warn
__all__ = ('expr', 'symbol', 'symbols', 'deg', 'rad', 'degrees',
'radians', 'equation', 'difference_equation')
class ExprPrint(object):
@property
def _pexpr(self):
"""Return expression for printing."""
if not hasattr(self, 'expr'):
return self
if state.show_units:
if state.canonical_units:
return self.expr_with_canonical_units
else:
return self.expr_with_units
else:
return self.expr
def __repr__(self):
"""This is called by repr(expr). It is used, e.g., when printing
in the debugger."""
return '%s(%s)' % (self.__class__.__name__, print_str(self._pexpr))
def _repr_pretty_(self, p, cycle):
"""This is used by jupyter notebooks to display an expression using
unicode. It is also called by IPython when displaying an
expression."""
p.text(pretty(self._pexpr))
# Note, _repr_latex_ is handled at the end of this file.
def pretty(self, **kwargs):
"""Make pretty string."""
return pretty(self._pexpr, **kwargs)
def prettyans(self, name, **kwargs):
"""Make pretty string with LHS name."""
return pretty(sym.Eq(sympify(name), self._pexpr), **kwargs)
def pprint(self, **kwargs):
"""Pretty print"""
pprint(self._pexpr, **kwargs)
def pprintans(self, name, **kwargs):
"""Pretty print string with LHS name."""
print(self.prettyans(name, **kwargs))
def latex(self, **kwargs):
"""Make latex string."""
return latex(self._pexpr, **kwargs)
def latex_with_units(self, eng_format=False, show_units=True,
evalf=True, num_digits=3, **kwargs):
"""Make latex string with optional units."""
from .engformatter import EngFormatter
expr = self
if evalf:
expr = expr.evalf(num_digits)
if show_units:
units = str(expr.units)
if units == '1':
units = ''
else:
units = ''
value = expr.sympy
if evalf and value.is_number and eng_format:
return EngFormatter(num_digits=num_digits).latex(value, units)
s = latex(value, **kwargs)
if show_units and units != '':
s += '\,' + units
return s
def latex_math(self, **kwargs):
"""Make latex math-mode string."""
return '$' + self.latex(**kwargs) + '$'
def latexans(self, name, **kwargs):
"""Print latex string with LHS name."""
return latex(sym.Eq(sympify(name), self._pexpr), **kwargs)
def srepr(self):
return sym.repr(self)
class ExprContainer(object):
@property
def sympy(self):
"""Return SymPy expression."""
return self.expr
def evaluate(self):
"""Evaluate each element to convert to floating point.
This may change..."""
return self.__class__([v.evalf() for v in self])
def evalf(self, n=15):
"""Evaluate each element to convert to floating point values.
`n` is the number of decimal places."""
return self.__class__([v.evalf(n) for v in self])
def simplify(self, **kwargs):
"""Simplify each element."""
return self.__class__([simplify(v, **kwargs) for v in self])
@property
def symbols(self):
"""Return dictionary of symbols in the expression keyed by name."""
symbols = {}
for expr in list(self):
symbols.update(expr.symbols)
return symbols
class ExprMisc(object):
def pdb(self):
"""Enter the python debugger."""
import pdb; pdb.set_trace()
return self
class ExprDict(ExprPrint, ExprContainer, ExprMisc, OrderedDict):
"""Facade class for dictionary created by sympy."""
def __getitem__(self, key):
# This is used for nodalanalysis to store results
# indexed by node name.
key2 = str(key)
try:
return super(ExprDict, self).__getitem__(key2)
except:
return super(ExprDict, self).__getitem__(key)
def __call__(self, *args, **kwargs):
"""Perform substitution/transformation on each element."""
new = {}
for key, val in self.items():
new[key] = expr(val)(*args, **kwargs)
return self.__class__(new)
def evaluate(self):
"""Evaluate each element to convert to floating point.
The keys are also converted if possible to handle
dictionaries of poles/zeros."""
new = self.__class__()
for k, v in self.items():
try:
k = k.evalf()
except:
pass
try:
v = v.evalf()
except:
pass
new[k] = v
return new
def simplify(self, **kwargs):
"""Simplify each element but not the keys."""
new = self.__class__()
for k, v in self.items():
new[k] = simplify(v, **kwargs)
return new
def solve(self, *symbols, **kwargs):
"""Solve system of equations, and return as ExprDict.
See sympy.solve for usage."""
symbols = delcapify(symbols)
system = list(delcapify(self).values())
solutions = sym.solve(system, *symbols, **kwargs)
new = {}
for key, val in solutions.items():
new[key] = expr(val)
return ExprDict(new)
def evalf(self, n=15):
"""Evaluate each element to convert to floating point values.
`n` is the number of decimal places."""
new = self.__class__()
for k, v in self.items():
try:
k = k.evalf(n)
except:
pass
try:
v = v.evalf(n)
except:
pass
new[k] = v
return new
def subs(self, *args, **kwargs):
"""Substitute variables in expression, see sympy.subs for usage."""
new = self.__class__()
for k, v in self.items():
try:
k = k.subs(*args, **kwargs)
except:
pass
try:
v = v.subs(*args, **kwargs)
except:
pass
new[k] = v
return new
@property
def expr(self):
# The behaviour of this may change. Perhaps the keys
# should stay the same?
new = {}
for k, v in self.items():
if isinstance(k, Expr):
k = k.expr
if isinstance(v, Expr):
v = v.expr
new[k] = v
return new
class ExprList(ExprPrint, list, ExprContainer, ExprMisc):
"""Facade class for list created by sympy."""
# Have ExprPrint first so that its _repr__pretty_ is called
# in preference to list's one. Alternatively, add explicit
# _repr_pretty_ method here.
def __init__(self, iterable=None, evalf=False, **assumptions):
if iterable is None:
iterable = []
eiterable = []
for item in iterable:
if evalf:
try:
item = item.evalf()
except:
pass
else:
item = expr(item, **assumptions)
eiterable.append(item)
super (ExprList, self).__init__(eiterable)
def __call__(self, *args, **kwargs):
"""Perform substitution/transformation on each element."""
ret = [elt(*args, **kwargs) for elt in self]
return self.__class__(ret)
def subs(self, *args, **kwargs):
"""Substitute variables in expression, see sympy.subs for usage."""
return expr([e.subs(*args, **kwargs) for e in self])
def solve(self, *symbols, **kwargs):
"""Solve system of equations and return as ExprDict.
See sympy.solve for usage."""
symbols = delcapify(symbols)
system = delcapify(self)
solutions = sym.solve(system, *symbols, **kwargs)
new = {}
for key, val in solutions.items():
new[key] = expr(val)
return ExprDict(new)
@property
def expr(self):
return [e.expr for e in self]
@property
def fval(self):
"""Evaluate expression and return as a list of python float values."""
return [float(a.fval) for a in self]
@property
def cval(self):
"""Evaluate expression and return as a list of python complex values."""
return [complex(a.cval) for a in self]
class ExprTuple(ExprPrint, tuple, ExprContainer, ExprMisc):
"""Facade class for tuple created by sympy."""
# Tuples are immutable, need to use __new__
def __new__(cls, iterable, **assumptions):
eiterable = [expr(e, **assumptions) for e in iterable]
return super (ExprTuple, cls).__new__(cls, eiterable)
def __call__(self, *args, **kwargs):
"""Perform substitution/transformation on each element."""
ret = [elt(*args, **kwargs) for elt in self]
return self.__class__(ret)
def subs(self, *args, **kwargs):
"""Substitute variables in expression, see sympy.subs for usage."""
return expr(tuple([e.subs(*args, **kwargs) for e in self]))
def solve(self, *symbols, **kwargs):
"""Solve system of equations, and return as ExprDict.
See sympy.solve for usage."""
symbols = delcapify(symbols)
system = delcapify(self)
solutions = sym.solve(system, *symbols, **kwargs)
new = {}
for key, val in solutions.items():
new[key] = expr(val)
return ExprDict(new)
@property
def expr(self):
return tuple([e.expr for e in self])
@property
def fval(self):
"""Evaluate expression and return as a tuple of python float values."""
return tuple([float(a.fval) for a in self])
@property
def cval(self):
"""Evaluate expression and return as a tuple of python complex values."""
return tuple([complex(a.cval) for a in self])
class ExprDomain(object):
is_sequence = False
def _class_by_quantity(self, quantity, domain=None):
if domain is None:
domain = self.domain
return expressionclasses.get_quantity(domain, quantity)
def _class_by_domain(self, domain):
return expressionclasses.get_quantity(domain, self.quantity)
def as_quantity(self, quantity):
if quantity == 'voltage':
return self.as_voltage()
elif quantity == 'current':
return self.as_current()
elif quantity == 'impedance':
return self.as_impedance()
elif quantity == 'admittance':
return self.as_admittance()
elif quantity == 'transfer':
return self.as_transfer()
elif quantity == 'power':
return self.as_power()
elif quantity == 'undefined':
return self.as_expr()
raise ValueError('Unknown quantity %s for %s' % (quantity, self))
def as_domain(self, domain):
if domain == 'time':
return self.as_time()
elif domain == 'laplace':
return self.as_laplace()
elif domain == 'fourier':
return self.as_fourier()
elif domain == 'phasor':
return self.as_phasor()
elif domain == 'angular fourier':
return self.as_angular_fourier()
raise ValueError('Unknown domain %s for %s' % (domain, self))
def as_voltage(self):
return self._class_by_quantity('voltage')(self)
def as_current(self):
return self._class_by_quantity('current')(self)
def as_admittance(self):
return self._class_by_quantity('admittance')(self)
def as_impedance(self):
return self._class_by_quantity('impedance')(self)
def as_transfer(self):
return self._class_by_quantity('transfer')(self)
def as_power(self):
return self._class_by_quantity('power')(self)
def as_expr(self):
return self
def as_constant(self):
if not self.is_unchanging:
raise ValueError('Expression %s is not constant' % self)
return self._class_by_quantity(self.quantity)(self)(cexpr(self))
def as_superposition(self):
from .superpositionvoltage import SuperpositionVoltage
from .superpositioncurrent import SuperpositionCurrent
if self.is_voltage:
return SuperpositionVoltage(self)
elif self.is_current:
return SuperpositionCurrent(self)
raise ValueError('Can only convert voltage or current to superposition')
def change(self, arg, domain=None, units_scale=None, **assumptions):
"""Change expression class."""
if domain is None:
domain = self.domain
if domain == 'constant':
# Allow changing of constants, e.g., V1 to 5 * t
domain = expr(arg).domain
quantity = self.quantity
cls = self._class_by_quantity(quantity, domain)
ret = cls(arg, **assumptions)
if units_scale is not None:
ret.units = self.units * units_scale
return ret
class Expr(UndefinedDomain, UndefinedQuantity, ExprPrint, ExprMisc, ExprDomain):
"""Facade class for sympy classes derived from sympy.Expr."""
var = None
# This provides a minor speed improvement for attribute lookup
# but prevents dynamic adding of new attributes.
__slots__ = ()
_mul_mapping = {('voltage', 'admittance'): 'current',
('current', 'impedance'): 'voltage',
('voltage', 'transfer'): 'voltage',
('current', 'transfer'): 'current',
('transfer', 'transfer'): 'transfer',
('voltage', 'constant'): 'voltage',
('current', 'constant'): 'current',
('admittance', 'constant'): 'admittance',
('impedance', 'constant'): 'impedance',
('transfer', 'constant'): 'transfer',
('constant', 'constant'): 'constant',
('voltage', 'voltage'): 'voltagesquared',
('current', 'current'): 'currentsquared',
('admittance', 'impedance'): 'transfer',
('admittance', 'admittance'): 'admittancesquared',
('impedance', 'impedance'): 'impedancesquared',
('voltage', 'current'): 'power',
('voltagesquared', 'admittance'): 'power',
('currentsquared', 'impedance'): 'power',
('impedancesquared', 'admittance'): 'impedance',
('admittancesquared', 'impedance'): 'admittance',
('power', 'impedance'): 'voltagesquared',
('power', 'admittance'): 'currentsquared',
('admittancesquared', 'constant'): 'admittancesquared',
('impedancesquared', 'constant'): 'impedancesquared',
('voltagesquared', 'constant'): 'voltagesquared',
('currentsquared', 'constant'): 'currentsquared',
('power', 'constant'): 'power'}
_div_mapping = {('voltage', 'impedance'): 'current',
('current', 'admittance'): 'voltage',
('voltage', 'transfer'): 'voltage',
('current', 'transfer'): 'current',
('transfer', 'transfer'): 'transfer',
('voltage', 'current'): 'impedance',
('current', 'voltage'): 'admittance',
('current', 'current'): 'transfer',
('voltage', 'voltage'): 'transfer',
('voltage', 'constant'): 'voltage',
('current', 'constant'): 'current',
('impedance', 'constant'): 'impedance',
('admittance', 'constant'): 'admittance',
('transfer', 'constant'): 'transfer',
('constant', 'impedance'): 'admittance',
('constant', 'admittance'): 'impedance',
('constant', 'transfer'): 'transfer',
('constant', 'constant'): 'constant',
('impedance', 'impedance'): 'transfer',
('admittance', 'admittance'): 'transfer',
('voltagesquared', 'voltage'): 'voltage',
('currentsquared', 'current'): 'current',
('admittancesquared', 'admittance'): 'admittance',
('impedancesquared', 'impedance'): 'impedance',
('power', 'current'): 'voltage',
('power', 'voltage'): 'current',
('power', 'admittance'): 'voltagesquared',
('power', 'voltagesquared'): 'admittance',
('power', 'impedance'): 'currentsquared',
('power', 'currentsquared'): 'impedance',
('impedance', 'admittance'): 'impedancesquared',
('admittance', 'impedance'): 'admittancesquared',
('voltagesquared', 'impedance'): 'power',
('currentsquared', 'admittance'): 'power',
('voltagesquared', 'power'): 'impedance',
('currentsquared', 'power'): 'admittance',
('admittancesquared', 'constant'): 'admittancesquared',
('impedancesquared', 'constant'): 'impedancesquared',
('voltagesquared', 'constant'): 'voltagesquared',
('currentsquared', 'constant'): 'currentsquared',
('admittancesquared', 'admittancesquared'): 'transfer',
('impedancesquared', 'impedancesquared'): 'transfer',
('voltagesquared', 'voltagesquared'): 'transfer',
('currentsquared', 'currentsquared'): 'transfer',
('power', 'power'): 'transfer',
('power', 'constant'): 'power'}
# This needs to be larger than what sympy defines so
# that the __rmul__, __radd__ methods get called.
# Otherwise pi * t becomes a Mul rather than a TimeDomainExpression object.
_op_priority = 1000
@property
def _pexpr(self):
"""Return expression for printing."""
if not hasattr(self, 'expr'):
return self
if state.show_units:
if state.canonical_units:
return self.expr_with_canonical_units
else:
return self.expr_with_units
else:
return self.expr
def __init__(self, arg, rational=True, **assumptions):
"""
There are three types of assumptions:
1. The sympy assumptions associated with symbols, for example,
real=True.
2. The expr assumptions such as dc, ac, causal. These are primarily
to help the inverse Laplace transform for LaplaceDomainExpression classes.
3. Additional parameters such as nid for NoiseDomain and omega
for PhasorDomain.
"""
if isinstance(arg, Expr):
ass = arg.assumptions.copy()
if self.is_always_causal:
ass.set('causal', True)
self.assumptions = ass.merge(**assumptions)
self.expr = arg.expr
try:
self._units = self._default_units
except:
self._units = sym.S.One
return
assumptions = Assumptions(assumptions)
# Perhaps could set dc?
#if arg == 0:
# assumptions.set('causal', True)
if self.is_always_causal:
assumptions.set('causal', True)
self.assumptions = assumptions
# Remove Lcapy assumptions from SymPy expr.
self.expr = sympify(arg, rational=rational, **self.assumptions.sympy_assumptions())
try:
self._units = self._default_units
except:
self._units = sym.S.One
def as_time(self):
return self.time()
def as_laplace(self):
return self.laplace()
def as_phasor(self):
return self.phasor()
def as_fourier(self):
return self.fourier()
def as_angular_fourier(self):
return self.angular_fourier()
def __str__(self, printer=None):
"""String representation of expression."""
return print_str(self._pexpr)
def __repr__(self):
"""This is called by repr(expr). It is used, e.g., when printing
in the debugger."""
return '%s(%s)' % (self.__class__.__name__, print_str(self._pexpr))
def _repr_pretty_(self, p, cycle):
"""This is used by jupyter notebooks to display an expression using
unicode. It is also called by IPython when displaying an
expression."""
p.text(pretty(self._pexpr))
def _repr_latex_(self):
"""This is used by jupyter notebooks to display an expression using
LaTeX markup. However, this requires mathjax. If this method
is not defined, jupyter falls back on _repr_pretty_ which
outputs unicode."""
# This is called for Expr but not ExprList
return '$$' + latex(self._pexpr) + '$$'
def _latex(self, *args, **kwargs):
"""Make latex string. This is called by sympy.latex when it
encounters an Expr type."""
# This works in conjunction with LatexPrinter._print
# It is a hack to allow printing of _Matrix types
# and its elements.
# This also catches sym.latex(expr) where expr is
# an Lcapy expr.
return self.latex(**kwargs)
def _pretty(self, *args, **kwargs):
"""Make pretty string."""
# This works in conjunction with Printer._print
# It is a hack to allow printing of _Matrix types
# and its elements.
expr = self._pexpr
printer = args[0]
return printer._print(expr)
@property
def canonical_units(self):
"""Return the canonical units of the expression. This is a simplified
form, so volt * ampere becomes watt.
"""
return units.simplify_units(self._units)
@property
def units(self):
"""Return the units of the expression."""
return self._units
@units.setter
def units(self, unit):
"""Set the units of the expression; these are simplified into canonical form."""
self._units = unit
@property
def is_causal(self):
"""Return True if zero for t < 0."""
if self.assumptions.has_unspecified:
self.assumptions.infer_from_expr(self)
return self.assumptions.is_causal
@is_causal.setter
def is_causal(self, value):
self.assumptions.set('causal', value)
@property
def is_dc(self):
if self.assumptions.has_unspecified:
self.assumptions.infer_from_expr(self)
return self.assumptions.is_dc
@is_dc.setter
def is_dc(self, value):
self.assumptions.set('dc', value)
@property
def is_ac(self):
if self.assumptions.has_unspecified:
self.assumptions.infer_from_expr(self)
return self.assumptions.is_ac
@is_ac.setter
def is_ac(self, value):
self.assumptions.set('ac', value)
@property
def is_unknown(self):
"""Return True if behaviour is unknown for t < 0."""
if self.assumptions.has_unspecified:
self.assumptions.infer_from_expr(self)
return self.assumptions.is_unknown
@is_unknown.setter
def is_unknown(self, value):
self.assumptions.set('unknown', value)
@property
def is_complex_signal(self):
"""Return True if time-domain signal is complex."""
if 'complex_signal' not in self.assumptions:
return False
return self.assumptions['complex_signal'] == True
@property
def is_complex(self):
from .sym import ssym
from .sym import zsym
if self.has(ssym) or self.has(zsym):
return True
# Sometimes there is a lingering Re or Im operator
# even though we know the result is real.
if self.part != '':
return False
return self.has(j)
@property
def is_conditional(self):
"""Return True if expression has a condition, such as t >= 0."""
expr = self.expr
# Could be more specific, such as self.var >= 0, but might
# have self.var >= t1.
return expr.is_Piecewise
@property
def is_rational_function(self):
"""Return True if expression is a rational function."""
return self.expr.is_rational_function(self.var)
@property
def is_strictly_proper(self):
"""Return True if the degree of the dominator is greater
than the degree of the numerator.
This will throw an exception if the expression is not a
rational function."""
if self._ratfun is None:
return False
return self._ratfun.is_strictly_proper
@property
def is_phase(self):
return self.part == 'phase'
@property
def is_phase_radians(self):
return self.part == 'phase' and self.units == uu.rad
@property
def is_phase_degrees(self):
return self.part == 'phase' and self.units == uu.deg
@property
def is_real_part(self):
return self.part == 'real'
@property
def is_imag_part(self):
return self.part == 'imaginary'
@property
def is_magnitude(self):
return self.part == 'magnitude'
@property
def is_dB(self):
return self.part == 'magnitude' and self.units == dB
@property
def ac(self):
"""Return the AC components."""
return self.as_superposition().ac
@property
def dc(self):
"""Return the DC component."""
return self.as_superposition().dc
@property
def transient(self):
"""Return the transient component."""
return self.as_superposition().transient
@property
def fval(self):
"""Evaluate expression and return as a python float value."""
return float(self.val.expr)
@property
def cval(self):
"""Evaluate expression and return as a python complex value."""
return complex(self.val.expr)
@property
def val(self):
"""Return floating point value of expression if it can be evaluated,
otherwise the expression.
This returns an Lcapy Expr object. If you want a numerical value
use expr.fval for a float value or expr.cval for a complex value."""
return self.evalf()
def evalf(self, n=15, *args, **kwargs):
"""Convert constants in an expression to floats, evaluated to `n`
decimal places. If the expression is a constant, return the
floating point result.
This returns an Lcapy Expr object. If you want a numerical value
use expr.fval for a float value or expr.cval for a complex value.
See sympy.evalf for more details.
"""
new = self.copy()
# Don't create Expr since SymPy sympify will create Integers
# rather than Floats if the truncated Float looks like an integer.
new.expr = self.ratfloat().expr.evalf(n, *args, **kwargs)
return new
def __hash__(self):
# This is needed for Python3 so can create a dict key,
# say for subs.
return hash(self.expr)
def _to_class(self, cls, expr):
if isinstance(expr, list):
return ExprList(expr)
elif isinstance(expr, tuple):
return ExprTuple(expr)
elif isinstance(expr, dict):
return ExprDict(expr)
return cls(expr)
# This will allow sym.sympify to magically extract the sympy expression
# but it will also bypass our __rmul__, __radd__, etc. methods that get called
# when sympy punts. Thus pi * t becomes a Mul rather than TimeDomainExpression.
#
# def _sympy_(self):
# # This is called from sym.sympify
# return self.expr
def __getattr__(self, attr):
if False:
print(self.__class__.__name__, attr)
expr1 = self.expr
try:
a = getattr(expr1, attr)
except:
# Hack for ubuntu-20.04, python 3.7 and 3.8
if attr == 'abbrev':
return ''
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, attr))
# This gets called if there is no explicit attribute attr for
# this instance. We call the method of the wrapped sympy
# class and rewrap the returned value if it is a sympy Expr
# object.
# FIXME. This propagates the assumptions. There is a
# possibility that the operation may violate them.
# If it is not callable, directly wrap it.
if not callable(a):
if not isinstance(a, sym.Expr):
return a
ret = a
if hasattr(self, 'assumptions'):
return self.__class__(ret, **self.assumptions)
return self._to_class(self.__class__, ret)
# If it is callable, create a function to pass arguments
# through and wrap its return value.
def wrap(*args, **kwargs):
"""This is quantity for a SymPy function.
For help, see the SymPy documentation."""
# Extract SymPy expressions from Lcapy expressions
newargs = []
for arg in args:
try:
newargs.append(arg.expr)
except AttributeError:
newargs.append(arg)
# Extract SymPy expressions from Lcapy expressions
newkwargs = {}
for key, arg in kwargs.items():
try:
newkwargs[key] = kwargs[key].expr
except AttributeError:
newkwargs[key] = kwargs[key]
ret = a(*newargs, **newkwargs)
if not isinstance(ret, sym.Expr):
# Hack for jupyter notebook printer returning
# png as a bytes object
if isinstance(ret, bytes):
return ret
# Wrap list, tuple, etc.
return expr(ret)
# Wrap the return value
cls = self.__class__
if hasattr(self, 'assumptions'):
return cls(ret, **self.assumptions)
return self._to_class(self.__class__, ret)
return wrap
def debug(self):
"""Print the SymPy expression and the assumptions for all symbols in
the expression. See also srepr."""
name = self.__class__.__name__
s = '%s(' % name
print(symdebug(self.expr, s, len(name) + 1))
def srepr(self):
"""Print the SymPy abstract syntax tree for the expression."""
sym.srepr(self.sympy)
@property
def sympy(self):
"""Return SymPy expression."""
return self.expr
@property
def expr_with_units(self):
"""Return SymPy expression with units."""
if self.units == 1:
return expr
# Don't evaluate otherwise 1 A gets printed as A.
return sym.Mul(self.expr, self.units, evaluate=False)
@property
def expr_with_canonical_units(self):
"""Return SymPy expression with canonical units."""
return self.expr * self.canonical_units
@property
def func(self):
"""Return the top-level function in the Sympy Expression.
For example, this returns Mul for the expression `3 * s`.
See also .args(), to return the args, in this case `(3, s)`"""
return self.expr.func
def __abs__(self):
"""Absolute value."""
return self.__class__(self.abs, **self.assumptions)
def __neg__(self):
"""Negation."""
return self.__class__(-self.expr, **self.assumptions)
def _incompatible(self, x, op, reason=''):
raise ValueError("Cannot determine %s(%s) %s %s(%s)%s" %
(self.__class__.__name__, self, op,
x.__class__.__name__, x, reason))
def _incompatible_domains(self, x, op):
self._incompatible(x, op, ' since the domains are incompatible')
def _incompatible_quantities(self, x, op):
self._incompatible(x, op, """ since the units of the result are unsupported.
As a workaround use x.as_expr() %s y.as_expr()""" % op)
def _dubious_quantities(self, x, op):
self._incompatible(x, op, '; you probably should be using convolution')
def _add_compatible_domains(self, x):
return self.domain == x.domain
def _mul_compatible_domains(self, x):
if self.domain == x.domain:
return True
if (self.is_constant_domain or x.is_constant_domain):
return True
# Allow phasor(x) * omega, etc.
if (self.is_phasor_domain and x.is_angular_fourier_domain and
self.omega == x.var):
return True
return False
def _div_compatible_domains(self, x):
if self.domain == x.domain:
return True
if (self.is_constant_domain or x.is_constant_domain):
return True
return False
def __compat_add__(self, x, op):
assumptions = self.assumptions.copy()
if not isinstance(x, Expr):
x = expr(x)
if state.check_units:
sunits = self.canonical_units
xunits = x.canonical_units
if (sunits != xunits and self.expr != 0 and x.expr != 0 and not
(state.loose_units and x.is_undefined)):
self._incompatible(x, op, ' since the units %s are incompatible with %s' % (self.units, x.units))
cls = self.__class__
xcls = x.__class__
if x.is_constant_domain and x.quantity == 'undefined':
if state.loose_units or x.expr == 0:
# Allow voltage(1) + 2 etc.
return cls, self, x, assumptions
if self.is_transfer:
# Allow transfer(1) == 1
return cls, self, x, assumptions
if (isinstance(self, (LaplaceDomainExpression, ZDomainExpression)) or
isinstance(x, (LaplaceDomainExpression, ZDomainExpression))):
assumptions = self.assumptions.add(x)
if self.is_constant_domain and self.quantity == 'undefined':
return xcls, self, x, assumptions
if self.quantity == x.quantity:
if self.is_constant_domain:
return xcls, self, x, assumptions
if x.is_constant_domain:
return cls, self, x, assumptions
if self.domain == x.domain:
return cls, self, x, assumptions
# For phasor comparisons...
if self.is_phasor_domain and x.is_angular_fourier_domain:
return cls, self, cls(x), assumptions
if self.is_angular_fourier_domain and x.is_phasor_domain:
return xcls, cls(self), x, assumptions
if not self._add_compatible_domains(x):
self._incompatible_domains(x, op)
# expr + voltage
if self.quantity == 'undefined':
if state.loose_units or x.is_transfer:
return xcls, self, x, assumptions
# voltage + expr
if x.quantity == 'undefined':
if state.loose_units or self.is_transfer:
return cls, self, x, assumptions
self._incompatible_quantities(x, op)
def __mul__(self, x):
"""Multiply."""
from .super import Superposition
if isinstance(x, Superposition):
return x.__mul__(self)
if not isinstance(x, Expr):
if isinstance(x, (tuple, list, dict)):
raise ValueError('Cannot multiply %s by a tuple, list, or dict %s' % (self, x))
x = expr(x)
# Handle omega * t
if (self.__class__ == AngularFourierDomainExpression and
x.__class__ == TimeDomainExpression):
return TimeDomainExpression(self.expr * x.expr)
if (x.__class__ == AngularFourierDomainExpression and
self.__class__ == TimeDomainExpression):
return TimeDomainExpression(self.expr * x.expr)
# Try to convert immittance to a constant so that can handle I(t) * Z
if x.is_immittance:
try:
xunits = x.units
x = x.as_constant()
x.units = xunits
except:
pass
if x.is_time_domain and self.is_time_domain:
if (self.is_signal and x.is_immittance or x.is_signal and self.is_immittance):
self._dubious_quantities(x, '/')
if not self._mul_compatible_domains(x):
self._incompatible_domains(x, '*')
if self.is_transform_domain:
assumptions = self.assumptions.convolve(x)
else:
assumptions = Assumptions()
xquantity, yquantity = x.quantity, self.quantity
# Maybe use undefined for voltage**2 etc.
if xquantity == 'undefined':
xquantity = 'constant'
if yquantity == 'undefined':
yquantity = 'constant'
key = (yquantity, xquantity)
if key not in self._mul_mapping:
key = (xquantity, yquantity)
if key not in self._mul_mapping:
# TODO: What about voltage**2. etc.
self._incompatible_quantities(x, '*')
quantity = self._mul_mapping[key]
if quantity == 'constant':
quantity = 'undefined'
if self.is_constant_domain:
cls = x._class_by_quantity(quantity)
else:
cls = self._class_by_quantity(quantity)
value = self.expr * x.expr
result = cls(value, **assumptions)
result.units = self.units * x.units
return result
def __rmul__(self, x):
"""Reverse multiply."""
return self.__mul__(x)
def __truediv__(self, x, floor=False):
"""True divide."""
if not isinstance(x, Expr):
x = expr(x)
# Try to convert immittance to a constant so that can handle V(t) / Z
if x.is_immittance:
try:
x = x.as_constant()
except:
pass
if x.is_time_domain and self.is_time_domain:
if (self.is_signal and x.is_immittance or x.is_signal and self.is_immittance):
self._dubious_quantities(x, '/')
if not self._div_compatible_domains(x):
self._incompatible_domains(x, '/')
assumptions = self.assumptions.convolve(x)
xquantity, yquantity = x.quantity, self.quantity
# Maybe use undefined for voltage**2 etc.
if xquantity == 'undefined':
xquantity = 'constant'
if yquantity == 'undefined':
yquantity = 'constant'
key = (yquantity, xquantity)
if key not in self._div_mapping:
# TODO: What about voltage**2. etc.
self._incompatible_quantities(x, '/')
quantity = self._div_mapping[key]
if quantity == 'constant':
quantity = 'undefined'
if self.is_constant_domain:
cls = x._class_by_quantity(quantity)
else:
cls = self._class_by_quantity(quantity)
if floor:
value = self.expr // x.expr
else:
value = self.expr / x.expr
result = cls(value, **assumptions)
result.units = self.units / x.units
return result
def __rtruediv__(self, x, floor=False):
"""Reverse true divide."""
from .matrix import Matrix
if isinstance(x, Matrix):
if floor:
return x // self.expr
else:
return x / self.expr
if not isinstance(x, Expr):
x = expr(x)
return x.__truediv__(self, floor)
def __floordiv__(self, x):
"""Floor divide."""
return self.__truediv__(x, floor=True)
def __rfloordiv__(self, x):
"""Floor divide."""
return self.__rtruediv__(x, floor=True)
def __add__(self, x):
"""Add."""
from .matrix import Matrix
if isinstance(x, Matrix):
return x + self.expr
cls, self, x, assumptions = self.__compat_add__(x, '+')
return cls(self.expr + x.expr, **assumptions)
def __radd__(self, x):
"""Reverse add."""
if not isinstance(x, Expr):
x = expr(x)
return x.__add__(self)
def __sub__(self, x):
"""Subtract."""
from .super import Superposition
if isinstance(x, Superposition):
return -x + self
cls, self, x, assumptions = self.__compat_add__(x, '-')
return cls(self.expr - x.expr, **assumptions)
def __rsub__(self, x):
"""Reverse subtract."""
if not isinstance(x, Expr):
x = expr(x)
return x.__sub__(self)
def __pow__(self, x):
"""Power."""
if x == 2:
return self.__mul__(self)
elif x == -1:
return self.__rtruediv__(1)
elif self.quantity != 'undefined':
raise ValueError('Cannot compute %s(%s) ** %s' % (self.__class__.__name__, self, x))
if not isinstance(x, Expr):
x = expr(x)
result = self.expr.__pow__(x.expr)
if not self.is_constant_domain:
return self.__class__(result)
return x.__class__(result)
def __rpow__(self, x):
"""Reverse pow, x**self."""
if not isinstance(x, Expr):
x = expr(x)
return x.__pow__(self)
def __or__(self, x):
"""Parallel combination."""
return self.parallel(x)
def __eq__(self, x):
"""Test for mathematical equality as far as possible.
This cannot be guaranteed since it depends on simplification.
Note, SymPy comparison is for structural equality."""
# Note, this is used by the in operator.
if x is None:
return False
# Handle self == []
if isinstance(x, list):
return False
# Disallow t == 't', etc.
if isinstance(x, str):
return False
try:
cls, self, x, assumptions = self.__compat_add__(x, '==')
except ValueError:
return False
x = cls(x)
# This does not speed up the comparison.
#if self.expr == x.expr:
# return True
# This fails if one of the operands has the is_real attribute
# and the other doesn't...
return sym.simplify(self.expr - x.expr) == 0
def __ne__(self, x):
"""Test for mathematical inequality as far as possible.
This cannot be guaranteed since it depends on simplification.
Note, SymPy comparison is for structural equality."""
if x is None:
return True
try:
cls, self, x, assumptions = self.__compat_add__(x, '!=')
except ValueError:
return True
x = cls(x)
return sym.simplify(self.expr - x.expr) != 0
def __gt__(self, x):
"""Greater than."""
if x is None:
return True
cls, self, x, assumptions = self.__compat_add__(x, '>')
x = cls(x)
return self.expr > x.expr
def __ge__(self, x):
"""Greater than or equal."""
if x is None:
return True
cls, self, x, assumptions = self.__compat_add__(x, '>=')
x = cls(x)
return self.expr >= x.expr
def __lt__(self, x):
"""Less than."""
if x is None:
return True
cls, self, x, assumptions = self.__compat_add__(x, '<')
x = cls(x)
return self.expr < x.expr
def __le__(self, x):
"""Less than or equal."""
if x is None:
return True
cls, self, x, assumptions = self.__compat_add__(x, '<=')
x = cls(x)
return self.expr <= x.expr
def cancel_terms(self):
"""Simplify terms in expression individually by converting
each to rational functions."""
result = 0
for term in self.expr.as_ordered_terms():
result += sym.cancel(term)
return self.__class__(result, **self.assumptions)
def convolve(self, x, commutate=False, **assumptions):
"""Convolve self with x.
y(t) = int_{taumin}^{taumax} self(tau) x(t - tau) d tau
If `commutate` is True, swap order of functions in integral.
The result is an unevaluated integral. It can be evaluated using
the `doit()` method.
Note, this method not simplify the convolution integral if one
of the functions contains a Dirac delta. This can be done
calling the `simplify_dirac_delta()` method followed by the
`simplify()` method.
"""
if self.domain != x.domain:
self._incompatible_domains(x, 'convolve')
x = expr(x)
f1 = self.expr
f2 = x.expr
if commutate:
f1, f2 = f2, f1
dummyvar = tausym if self.is_time_domain else nusym
taumin = -oo
taumax = oo
if commutate:
if x.is_causal:
taumax = self.var
if self.is_causal:
taumin = 0
else:
if x.is_causal:
taumin = 0
if self.is_causal:
taumax = self.var
if x.is_causal and self.is_causal:
assumptions['causal'] = True
result = sym.Integral(f1.subs(self.var, self.var - dummyvar) *
f2.subs(self.var, dummyvar),
(dummyvar, taumin, taumax))
ret = self.__class__(result, **assumptions)
ret.units = self.units * x.units * self.domain_units
return ret
def parallel(self, x):
"""Parallel combination."""
cls, self, x, assumptions = self.__compat_add__(x, '|')
x = cls(x)
return cls(self.expr * x.expr / (self.expr + x.expr), **assumptions)
def copy(self):
"""Copy the expression."""
return self.__class__(self.expr, **self.assumptions)
@property
def conj(self):
"""Return complex conjugate."""
return self.__class__(sym.conjugate(self.expr), **self.assumptions)
def conjugate(self):
"""Return complex conjugate."""
return self.__class__(sym.conjugate(self.expr), **self.assumptions)
@property
def real(self):
"""Return real part.
Note, SymPy does not always extract the real part. For example,
`exp(t*(-1 - sqrt(5)*j))/(20*sqrt(5) - 20*j)` or
`exp(j * f) / (1 - exp(j * f))`
A work-around is to use `rationalize_denominator()` or
`expand(complex=True)` first.
"""
assumptions = self.assumptions.copy()
assumptions['real'] = True
expr = self.expr
# This can make operations such as abs really slow.
# Without it, we will sometimes get Re functions.
# expr = expr.expand(complex=True)
dst = self.__class__(symsimplify(sym.re(expr)), **assumptions)
dst.part = 'real'
return dst
@property
def re(self):
"""Return real part, see `real`"""
return self.real
@property
def imag(self):
"""Return imaginary part.
Note, SymPy does not always extract the imaginary part. For example,
`exp(t*(-1 - sqrt(5)*j))/(20*sqrt(5) - 20*j)` or
`exp(j * f) / (1 - exp(j * f))`
A work-around is to use `rationalize_denominator()` or
`expand(complex=True)` first.
"""
assumptions = self.assumptions.copy()
if self.is_real:
dst = self.__class__(0, **assumptions)
dst.part = 'imaginary'
return dst
assumptions['real'] = True
expr = self.expr
# This can make operations such as abs really slow.
# Without it, we will sometimes get Im functions.
# expr = expr.expand(complex=True)
dst = self.__class__(symsimplify(sym.im(expr)), **assumptions)
dst.part = 'imaginary'
return dst
@property
def im(self):
"""Return imaginary part, see `im`"""
return self.imag
@property
def real_imag(self):
"""Rewrite as x + j * y."""
return self.real + j * self.imag
@property
def _ratfun(self):
try:
return self.__ratfun
except:
pass
if (self.var is None or self.has(sym.Derivative) or
self.has(sym.Integral)):
self.__ratfun = None
else:
try:
# Note, this handles expressions that are products of
# rational functions and arbitrary delays.
self.__ratfun = Ratfun(self.expr, self.var)
except:
self.__ratfun = None
return self.__ratfun
@property
def ba(self):
"""Return lists of numerator and denominator coefficients."""
a = self.D.coeffs()
b = self.N.coeffs()
a0 = a[0]
if a0 != 1:
a = ExprList([ax / a0 for ax in a])
b = ExprList([bx / a0 for bx in b])
return b, a
@property
def a(self):
"""Return list of denominator coefficients."""
b, a = self.ba
return a
@property
def b(self):
"""Return list of numerator coefficients."""
b, a = self.ba
return b
@property
def K(self):
"""Return gain."""
return self.N.coeffs()[0] / self.D.coeffs()[0]
@property
def N(self):
"""Return numerator of rational function.
The denominator is chosen so that it is a polynomial."""
return self.numerator
@property
def D(self):
"""Return denominator of rational function.
The denominator is chosen so that it is a polynomial."""
return self.denominator
@property
def numerator(self):
"""Return numerator of rational function.
The denominator is chosen so that it is a polynomial."""
N, D = self.as_N_D()
return N
@property
def denominator(self):
"""Return denominator of rational function.
The denominator is chosen so that it is a polynomial."""
N, D = self.as_N_D()
return D
def as_numer_denom(self, monic_denominator=False, use_sympy=False):
return self.as_N_D(monic_denominator, use_sympy)
def rationalize_denominator(self):
"""Rationalize denominator by multiplying numerator and denominator by
complex conjugate of denominator."""
N = self.N
D = self.D
Dconj = D.conj
Nnew = (N * Dconj).simplify()
#Dnew = (D * Dconj).simplify()
Dnew = (D.real**2 + D.imag**2).simplify()
Nnew = Nnew.real_imag
return Nnew / Dnew
def divide_top_and_bottom(self, factor):
"""Divide numerator and denominator by common factor."""
N = (self.N / factor).expand()
D = (self.D / factor).expand()
return N / D
def factor_const(self):
from .utils import factor_const
c, r = factor_const(self.sympy, self.var)
return ConstantDomainExpression(c), self.__class__(r, **self.assumptions)
def term_const(self):
from .utils import term_const
c, r = term_const(self.sympy, self.var)
return ConstantDomainExpression(c), self.__class__(r, **self.assumptions)
def multiply_top_and_bottom(self, factor):
"""Multiply numerator and denominator by common factor."""
N = self.N.expr
D = self.D.expr
N = sym.Mul(N, factor, evaluate=False)
D = sym.Mul(D, factor, evaluate=False)
ID = sym.Pow(D, -1, evaluate=False)
expr = sym.Mul(N, ID, evaluate=False)
return self.__class__(expr)
@property
def magnitude(self):
"""Return magnitude."""
if self.is_real:
dst = expr(abs(self.expr))
dst.part = 'magnitude'
return dst
R = self.rationalize_denominator()
N = R.N
Dnew = R.D
Nnew = sqrt((N.real**2 + N.imag**2).simplify())
dst = Nnew / Dnew
dst = dst.as_quantity(self.quantity)
dst.part = 'magnitude'
return dst
@property
def abs(self):
"""Return magnitude."""
return self.magnitude
@property
def sign(self):
"""Return sign."""
return self.__class__(sym.sign(self.expr), **self.assumptions)
@property
def dB(self):
"""Return magnitude in dB. If squared voltage, squared current,
or power, this uses 10 * log10 otherwise 20 * log10."""
# Need to clip for a desired dynamic range?
# Assume reference is 1.
if self.is_power or self.is_squared:
dst = 10 * log10(self.magnitude)
else:
dst = 20 * log10(self.magnitude)
dst.part = 'magnitude'
dst.units = dB
return dst
@property
def phase(self):
"""Return phase in radians."""
if self.is_time_domain or self.is_discrete_time_domain:
raise ValueError('Cannot determine phase of time-domain expression %s' % self)
R = self.rationalize_denominator()
N = R.N
if N.imag == 0:
if N.real >= 0:
dst = expr(0)
else:
dst = expr(sym.pi)
else:
if N.real != 0:
G = gcd(N.real, N.imag)
N = N / G
dst = atan2(N.imag, N.real)
dst.part = 'phase'
dst.units = uu.rad
return dst
@property
def phase_degrees(self):
"""Return phase in degrees."""
dst = self.phase * 180.0 / sym.pi
dst.part = 'phase'
dst.units = uu.deg
return dst
@property
def angle(self):
"""Return phase angle (in radians)."""
return self.phase
@property
def polar(self):
"""Return in polar format."""
return self.abs * exp(j * self.phase)
@property
def cartesian(self):
"""Return in Cartesian format."""
return self.real + j * self.imag
@property
def is_number(self):
"""Returns True if expression is a number."""
return self.expr.is_number
@property
def is_constant(self):
"""Returns True if expression does not have any free symbols (compare with `is_unchanging`)."""
return self.expr.free_symbols == set()
@property
def is_unchanging(self):
"""Returns True if expression does not have a domain variable (compare with `is_constant`)."""
if self.var is None:
return True
return self.var not in self.expr.free_symbols
def evaluate(self, arg=None):
"""Evaluate expression at arg. `arg` may be a scalar or a vector.
The result is of type float or complex.
If arg is iterable, a NumPy array is returned.
There can be only one or fewer undefined variables in the expression.
This is replaced by `arg` and then evaluated to obtain a result.
"""
import numpy as np
is_time = self.is_time_domain or self.is_discrete_time_domain
is_causal = is_time and self.is_causal
def evaluate_expr(expr, var, arg):
# For some reason the new lambdify will convert a float
# argument to complex
def exp(arg):
# Hack to handle exp(-a * t) * Heaviside(t) for t < 0
# by trying to avoid inf when number overflows float.
if isinstance(arg, complex):
if arg.real > 500:
arg = 500 + 1j * arg.imag
elif arg > 500:
arg = 500;
return np.exp(arg)
def rect(arg):
# Define in terms of Heaviside for consistency
return heaviside(arg + 0.5) - heaviside(arg - 0.5)
def sign(arg):
# Define in terms of Heaviside for consistency
return 2 * heaviside(arg) - 1
def dtsign(arg):
# Define in terms of unitstep for consistency
return 2 * unitstep(arg) - 1
def dtrect(arg):
# Define in terms of UnitStep for consistency
return unitstep(arg + 0.5) - unitstep(arg - 0.5)
def sinc(arg):
"""SymPy sinc."""
# This is used for sinc created by sympify, e.g., a =
# expr('sinc(n)'). SymPy uses the unnormalized form
# but NumPy (and Lcapy) use the normalized form.
# Lambdify does some jiggery pokery and divides the
# arg by pi since it is expecting that the NumPy sinc
# function is going to be used. SymPy's choice is
# unfortunate from a numerical accuracy point of view
# since sincn(n) should be zero for integer n, n != 0.
# Undo SymPy jiggery pokery.
arg = arg * np.pi
return 1.0 if arg == 0 else np.sin(np.pi * arg) / (np.pi * arg)
def sincn(arg):
"""Normalized sinc."""
# Note, if sincn is made to print sinc, then lambdify will
# call sinc. Grrrr.
return 1.0 if arg == 0 else np.sin(np.pi * arg) / (np.pi * arg)
def sincu(arg):
"""Unnormalized sinc."""
return 1.0 if arg == 0 else np.sin(arg) / arg
def psinc(M, arg):
"""Periodic sinc."""
D = np.sin(np.pi * arg)
return 1.0 if D == 0 else np.sin(M * np.pi * arg) / (M * D)
def trap(arg, alpha):
absarg = abs(arg)
foo = absarg - 0.5
if alpha == 0:
if arg < -0.5 or arg > 0.5:
return 0.0
return 1.0
if foo >= 0.5 * alpha:
return 0.0
elif foo <= -0.5 * alpha:
return 1.0
else:
return 0.5 - foo / alpha
def tri(arg):
if arg >= 1:
return 0.0
elif arg <= -1:
return 0.0
else:
return 1.0 - abs(arg)
def ramp(arg):
if arg >= 0:
return arg
return 0
def rampstep(arg):
if arg < 0:
return 0
elif arg > 1:
return 1
else:
return arg
def dirac(arg):
return np.inf if arg == 0.0 else 0.0
def unitimpulse(arg):
return 1.0 if arg == 0 else 0.0
def unitstep(arg, zero=None):
if arg == 0:
if zero is None:
zero = unitstep_zero
return zero
return 1.0 if arg >= 0 else 0.0
def heaviside(arg, zero=None):
if arg == 0:
if zero is None:
zero = heaviside_zero
return zero
return 1.0 if arg > 0.0 else 0.0
def sqrt(arg):
# Large numbers get converted to ints and int has no sqrt
# attribute so convert to float.
if isinstance(arg, int):
arg = float(arg)
if not isinstance(arg, complex) and arg < 0:
arg = arg + 0j
return np.sqrt(arg)
try:
arg0 = arg[0]
scalar = False
except:
arg0 = arg
scalar = True
# For negative arguments, np.sqrt will return Nan.
# np.lib.scimath.sqrt converts to complex but cannot be used
# for lamdification!
func1 = lambdify(var, expr,
[{'DiracDelta' : dirac,
'Heaviside' : heaviside,
'UnitImpulse' : unitimpulse,
'UnitStep' : unitstep,
'dtrect' : dtrect, 'dtsign' : dtsign,
'sinc' : sinc, 'sincn' : sincn,
'sincu' : sincu, 'psinc' : psinc,
'rect' : rect, 'tri' : tri, 'trap' : trap,
'ramp' : ramp, 'rampstep' : rampstep,
'sqrt' : sqrt, 'exp' : exp, 'sign' : sign},
"scipy", "numpy", "math", "sympy"])
def func(arg):
# Lambdify barfs on (-1)**n if for negative values of n.
# even if have (-1)**n * Heaviside(n)
# So this function heads Lambdify off at the pass,
# if the function is causal.
if is_causal and arg < 0:
return 0
try:
result = func1(arg)
except ZeroDivisionError:
result = complex(expr.limit(var, arg))
# If get NaN evaluate limit. This helps for sin(t) / t.
if np.isnan(result):
result = complex(expr.limit(var, arg))
# u(t) -
if np.isinf(result):
result = complex(sym.simplify(expr).limit(var, arg))
return result
try:
# Try to flush out weirdness using first argument
response = func(arg0)
except NameError as e:
raise RuntimeError('Cannot evaluate expression %s: %s' % (self, e))
except AttributeError as e:
# Could return NaN but this would take some jiggery
# pokery. One solution is to find Piecewise with a
# single clause, such as t >= 0 add another Piecewise
# clause for t < 0 to return NaN. Adding Piecewise to
# lambdify functions does not seem to work.
if expr.is_Piecewise:
raise RuntimeError(
'Cannot evaluate expression %s,'
' due to undetermined conditional result' % self)
raise RuntimeError(
'Cannot evaluate expression %s,'
' probably have a mysterious function: %s' % (self, e))
except TypeError as e:
raise RuntimeError('Cannot evaluate expression %s: %s' % (self, e))
if scalar:
if np.allclose(response.imag, 0.0):
response = response.real
return response
try:
response = np.array([complex(func(arg0)) for arg0 in arg])
except TypeError:
raise TypeError(
'Cannot evaluate expression %s,'
' probably have undefined symbols' % self)
if np.allclose(response.imag, 0.0):
response = response.real
return response
# Use doit to expand Sum, etc.
expr = self.doit().expr
if not hasattr(self, 'var') or self.var is None:
symbols = list(expr.free_symbols)
if arg is None:
if len(symbols) == 0:
return expr.evalf()
raise ValueError('Undefined symbols %s in expression %s' % (tuple(symbols), self))
if len(symbols) == 0:
print('Ignoring arg %s' % arg)
return expr.evalf()
elif len(symbols) == 1:
return evaluate_expr(expr, symbols[0], arg)
else:
raise ValueError('Undefined symbols %s in expression %s' % (tuple(symbols), self))
var = self.var
# Use symbol names to avoid problems with symbols of the same
# name with different assumptions.
varname = var.name
free_symbols = set([symbol.name for symbol in expr.free_symbols])
if varname in free_symbols:
free_symbols -= set((varname, ))
if free_symbols != set():
raise ValueError('Undefined symbols %s in expression %s' % (tuple(free_symbols), self))
if arg is None:
if expr.has(var):
raise ValueError('Need value to evaluate expression at')
# The arg is irrelevant since the expression is a constant.
arg = 0
try:
arg = arg.evalf()
except:
pass
return evaluate_expr(expr, var, arg)
def has(self, *patterns):
"""Test whether any subexpressions matches any of the patterns. For example,
V.has(exp(t))
V.has(t)
"""
tweak_patterns = [pattern.expr if isinstance(pattern, (Expr, Function)) else pattern for pattern in patterns]
return self.expr.has(*tweak_patterns)
def has_symbol(self, sym):
"""Test if have symbol contained. For example,
V.has_symbol('a')
V.has_symbol(t)
"""
return self.has(expr(sym))
def _subs1(self, old, new, **kwargs):
# This will fail if a variable has different attributes,
# such as positive or real.
# Should check for bogus substitutions, such as t for s.
# Probably should disallow domain changing. Will need to
# iterate through the domain variables and check if new
# contains one. Then disallow unless same as self.var. Will
# also need to handle s-> jw in select() and transform().
if new is old:
return self
expr = new
if isinstance(new, Expr):
if old == self.var or self.var is None:
domain = new.domain
else:
domain = self.domain
expr = new.expr
else:
domain = self.domain
expr = sympify(expr)
old = symbol_map(old)
if isinstance(old, Expr):
old = old.expr
if isinstance(expr, list):
# Get lists from solve. These stymie sympy's subs.
if len(expr) == 1:
expr = expr[0]
else:
warn('Substituting a list...')
if isinstance(old, str):
result = self.sympy
else:
if state.warn_subs and not self.sympy.has(old):
warn('Expression %s does not have %s' % (self.sympy, old))
if state.break_subs and not self.sympy.has(old):
import pdb; pdb.set_trace()
result = self.sympy.subs(old, expr, **kwargs)
# If get empty Piecewise, then result unknowable. TODO: sympy
# 1.2 requires Piecewise constructor to have at least one
# pair.
if False and result.is_Piecewise and result == sym.Piecewise():
result = sym.nan
return self.change(result, domain=domain, **self.assumptions)
def transform(self, arg, **assumptions):
"""Transform into a different domain.
If arg is f, s, t, omega, jomega perform domain transformation,
otherwise perform substitution.
Note (5 * s)(omega) will fail since 5 * s is assumed not to be
causal and so Fourier transform is unknown. However, Zs(5 *
s)(omega) will work since Zs is assumed to be causal."""
from .transform import transform
return transform(self, arg, **assumptions)
def __call__(self, arg, **assumptions):
"""Transform domain or substitute arg for variable.
Substitution is performed if arg is a tuple, list, numpy
array, or constant. If arg is a tuple or list return a list.
If arg is an numpy array, return numpy array.
Domain transformation is performed if arg is a domain variable
or an expression of a domain variable.
See also evaluate.
"""
from numpy import ndarray, array
if isinstance(arg, (tuple, list)):
return [self._subs1(self.var, arg1) for arg1 in arg]
if isinstance(arg, ndarray):
return array([self._subs1(self.var, arg1) for arg1 in arg])
from .transform import call
return call(self, arg, **assumptions)
def _select(self, kind):
from .transform import select
return select(self, kind)
def limit(self, var, value, dir='+'):
"""Determine limit of expression(var) at var = value.
If `dir == '+'` search from right else if `dir == '-'`
search from left."""
# Need to use lcapy sympify otherwise could use
# getattr to call sym.limit.
var = sympify(var)
value = sympify(value)
# Experimental. Compare symbols by names.
symbols = list(self.expr.free_symbols)
symbolnames = [str(symbol) for symbol in symbols]
if str(var) not in symbolnames:
return self
var = symbols[symbolnames.index(str(var))]
ret = sym.limit(self.expr, var, value, dir=dir)
return self.__class__(ret, **self.assumptions)
def simplify(self, **kwargs):
"""Simplify expression.
This throws the kitchen sink at the problem but can be slow.
See also simplify_terms and simplify_factors."""
# This might be dodgy...
if self.has(AppliedUndef) and not self.has(sym.Integral):
new, defs = self.remove_undefs(return_mappings=True)
return new.simplify(**kwargs).subs(defs)
ret = symsimplify(self.expr, **kwargs)
return self.__class__(ret, **self.assumptions)
def simplify_units(self):
"""Simplify units into canonical form."""
ret = self.__class__(self, **self.assumptions)
ret.units = units.simplify_units(self.units)
return ret
def simplify_conjugates(self, **kwargs):
"""Combine complex conjugate terms."""
result = simplify_conjugates(self.expr)
return self.__class__(result, **self.assumptions)
def simplify_factors(self, **kwargs):
"""Simplify factors in expression individually."""
result = 0
for factor in self.expr.as_ordered_factors():
result *= symsimplify(factor, **kwargs)
return self.__class__(result, **self.assumptions)
def simplify_terms(self, **kwargs):
"""Simplify terms in expression individually."""
result = 0
for term in self.expr.as_ordered_terms():
result += symsimplify(term, **kwargs)
return self.__class__(result, **self.assumptions)
def simplify_sin_cos(self, as_cos=False, as_sin=False):
"""Simplify c * cos(theta) - s * sin(theta) as A * cos(theta - phi)."""
result = simplify_sin_cos(self.expr, as_cos, as_sin)
return self.__class__(result, **self.assumptions)
def simplify_dirac_delta(self):
"""Simplify DiracDelta(4 * t + 2) to DiracDelta(t + 0.5) / 4
and DiracDelta(t) * x(t) to DiracDelta * x(0)."""
result = simplify_dirac_delta(self.expr, self.var)
return self.__class__(result, **self.assumptions)
def simplify_heaviside(self):
"""Simplify Heaviside(4 * t + 2) to Heaviside(t + 0.5)
and Heaviside(t)**2 to Heaviside(t), etc."""
result = simplify_heaviside(self.expr, self.var)
return self.__class__(result, **self.assumptions)
def simplify_unit_impulse(self):
"""Simplify UnitImpulse(4 * k + 8) to UnitImpulse(k + 2), etc."""
result = simplify_unit_impulse(self.expr, self.var)
return self.__class__(result, **self.assumptions)
def simplify_rect(self):
"""Simplify rect(4 * t + 2) to rect(t + 0.5)
and rect(t)**2 to rect(t), etc."""
result = simplify_rect(self.expr, self.var)
return self.__class__(result, **self.assumptions)
def expand_hyperbolic_trig(self):
"""Convert cosh(x) to exp(x) + exp(-x), etc."""
result = expand_hyperbolic_trig(self.expr)
return self.__class__(result, **self.assumptions)
def replace(self, query, value, map=False, simultaneous=True, exact=None):
try:
query = query.expr
except:
pass
try:
value = value.expr
except:
pass
ret = self.expr.replace(query, value, map, simultaneous, exact)
return self.__class__(ret, **self.assumptions)
def subs(self, *args, **kwargs):
"""Substitute variables in expression, see sympy.subs for usage."""
if len(args) > 2:
raise ValueError('Too many arguments')
if len(args) == 0:
raise ValueError('No arguments')
if len(args) == 2:
return self._subs1(args[0], args[1], **kwargs)
if isinstance(args[0], dict):
dst = self
for key, val in args[0].items():
dst = dst._subs1(key, val, **kwargs)
return dst
return self._subs1(self.var, args[0], **kwargs)
@property
def label(self):
label = ''
if hasattr(self, 'quantity_label'):
label += self.quantity_label
if self.part != '':
label += ' ' + self.part
else:
if self.part != '':
label += capitalize_name(self.part)
return label
@property
def label_with_units(self):
label = self.label
if hasattr(self, 'units') and self.units != '' and self.units != 1:
label += ' (%s)' % self.units
return label
@property
def domain_label_with_units(self):
label = ''
if hasattr(self, 'domain_label'):
label += '%s' % self.domain_label
if hasattr(self, 'domain_units'):
if self.domain_units != 1:
label += ' (%s)' % self.domain_units
return label
def differentiate(self, arg=None):
"""Differentiate expression."""
if arg is None:
arg = self.var
arg = self._tweak_arg(arg)
return self.__class__(sym.diff(self.expr, arg), **self.assumptions)
def diff(self, arg=None):
return self.differentiate(arg)
def doit(self, **hints):
"""Evaluate unevaluated functions such as integrals and sums."""
result = self.__class__(self.expr.doit(**hints), **self.assumptions)
result.part = self.part
return result
def _tweak_arg(self, arg):
if isinstance(arg, (Expr, Function)):
return arg.expr
if isinstance(arg, tuple):
return tuple([self._tweak_arg(arg1) for arg1 in arg])
if isinstance(arg, list):
return [self._tweak_arg(arg1) for arg1 in arg]
return arg
def integrate(self, arg=None, **kwargs):
"""Integrate expression.
For example `exp(-3 * t).integrate((t, 0, oo))` gives `1 / 3`.
"""
if arg is None:
arg = self.var
arg = self._tweak_arg(arg)
return self.__class__(sym.integrate(self.expr, arg, **kwargs),
**self.assumptions)
def rewrite(self, *args, **hints):
"""Rewrite expression.
For example, `exp(j*a*t).rewrite(cos)` gives `ⅉ⋅sin(4⋅t) +
cos(4⋅t)`. Similarly, `cos(2 * t).rewrite(exp)` will expand
the cosine as two complex exponentials."""
args = self._tweak_arg(args)
return self.__class__(self.sympy.rewrite(*args, **hints),
**self.assumptions)
def solve(self, *symbols, **flags):
"""Solve expression. This returns a list of solutions. An empty list
is returned if no solutions are found. Note, by default,
Lcapy assumes symbols are positive so an innocuous expression may fail
to give a result if the solution is negative.
For example:
`>>> x = symbols('x')
>>> y = x + 3
>>> y.solve(x)
[]
>>> x = symbols('x', positive=False)
>>> y = x + 3
>>> y.solve(x)
[-3]`"""
if self.has(AppliedUndef):
new, defs = self.remove_undefs(return_mappings=True)
return new.solve(*symbols, **flags).subs(defs)
symbols = [symbol_map(symbol) for symbol in symbols]
return expr(sym.solve(self.expr, *symbols, **flags))
def split_dirac_delta(self):
"""Return expression as a list of terms. The first term has no
DiracDeltas, the second term collates the DiracDeltas, the
third term collates derivatives of DiracDeltas, etc.
For example, u(t) + DiractDelta(t, 1) returns
[u(t), 0, DiracDelta(t, 1)]
"""
# TODO: wrap return value as ExprList
return split_dirac_delta(self)
@property
def symbols(self):
"""Return dictionary of symbols in the expression keyed by name."""
expr = self.sympy
symdict = {sym.name:sym for sym in expr.free_symbols}
# Look for V(s), etc.
funcdict = {atom.func.__name__:atom for atom in expr.atoms(AppliedUndef)}
symdict.update(funcdict)
return symdict
def _fmt_roots(self, roots, aslist=False, pairs=False):
def _wrap_dict(roots):
rootsdict = {}
for root, n in roots.items():
rootsdict[expr(root)] = n
return expr(rootsdict)
def _wrap_list(roots):
rootslist = []
for root, n in roots.items():
rootslist += [expr(root)] * n
return expr(rootslist)
if pairs:
pairs, singles = pair_conjugates(roots)
if aslist:
return _wrap_list(pairs), _wrap_list(singles)
else:
return _wrap_dict(pairs), _wrap_dict(singles)
if aslist:
return _wrap_list(roots)
else:
return _wrap_dict(roots)
def roots(self, aslist=False, pairs=False):
"""Return roots of expression as a dictionary. Note this may not find
them all. In particular, if the rational function has a
degree of five or higher.
If `pairs` is True, return two dictionaries. The first
contains the conjugate pairs and the second contains the
others
If `aslist` is True, return roots as list.
"""
if self._ratfun is None:
roots = {}
else:
roots = self._ratfun.roots()
return self._fmt_roots(roots, aslist, pairs)
def zeros(self, aslist=False, pairs=False):
"""Return zeros of expression as a dictionary Note this may not find
them all. In particular, if the denominator of the rational
function has a degree of five or higher.
If `pairs` is True, return two dictionaries. The first
contains the conjugate pairs and the second contains the
others
If `aslist` is True, return zeros as list.
"""
if self._ratfun is None:
return self.N.roots(aslist, pairs)
zeros = self._ratfun.zeros()
return self._fmt_roots(zeros, aslist, pairs)
def poles(self, aslist=False, damping=None, pairs=False):
"""Return poles of expression as a dictionary. Note this may not find
them all. In particular, if the denominator of the rational
function has a degree of five or higher.
If `pairs` is True, return two dictionaries. The first
contains the conjugate pairs and the second contains the
others.
If `aslist` is True, return poles as list.
"""
if self._ratfun is None:
# Handle expressions such as A(s) * (1 - exp(-s * T)) / B(s).
return self.D.roots(aslist, pairs)
poles = self._ratfun.poles(damping=damping)
polesdict = {}
for pole in poles:
key = pole.expr
if key in polesdict:
polesdict[key] += pole.n
else:
polesdict[key] = pole.n
return self._fmt_roots(polesdict, aslist, pairs)
def parameterize_ZPK(self, zeta=None, ZPK=None):
def def1(defs, symbolname, value):
from .cexpr import cexpr
sym1 = symbol(symbolname, override=False)
defs[symbolname] = cexpr(value)
return sym1
if self._ratfun is None:
return self, {}
zeros, poles, K, undef = self._ratfun.as_ZPK()
defs = ExprDict()
K = def1(defs, 'K', K * undef)
N = 1
D = 1
for m, zero in enumerate(zeros):
z = def1(defs, 'z%d' % (m + 1), zero)
N *= sym.Add(self.var, -z.sympy, evaluate=False)
for m, pole in enumerate(poles):
p = def1(defs, 'p%d' % (m + 1), pole)
D *= sym.Add(self.var, -p.sympy, evaluate=False)
result = sym.Mul(K.sympy, sym.Mul(N, sym.Pow(D, -1)), evaluate=False)
return self.__class__(result, **self.assumptions), defs
def parameterize(self, zeta=None, ZPK=None):
"""Parameterize first and second-order expressions.
For example, pexpr, defs = expr.parameterize()
If parameterization is successful, defs is a dictionary
of the parameters. The original expression can be obtained
with pexpr.subs(defs)
For first order systems, parameterize as:
K * (s + beta) / (s + alpha)
K / (s + alpha)
K (s + beta)
where appropriate.
If `zeta` is True, parameterize second-order expression in
standard form using damping factor and natural frequency
representation, i.e.
N(s) / (s**2 + 2 * zeta * omega_0 * s + omega_0**2)
otherwise parameterize as
N(s) / (s**2 + 2 * sigma_1 * s + omega_1**2 + sigma_1**2)
"""
def def1(defs, symbolname, value):
from .cexpr import cexpr
sym1 = symbol(symbolname, override=False)
defs[symbolname] = cexpr(value)
return sym1
if zeta is None and ZPK is None:
zeta = True
ZPK = False
if ZPK:
return self.parameterize_ZPK()
factors = self.as_ordered_factors()
spowers = [s**-4, s**-3, s**-2, s**-1, s, s**2, s**3, s**4]
for spower in spowers:
if spower in factors:
result, defs = (self / spower).parameterize(zeta)
return result * spower, defs
N = self.N
D = self.D
ndegree = N.degree
ddegree = D.degree
ncoeffs = N.coeffs(norm=True)
dcoeffs = D.coeffs(norm=True)
result = None
defs = ExprDict()
K = self.K
if ndegree < 1 and ddegree < 1:
result = self
elif ndegree == 1 and ddegree == 1:
K = def1(defs, 'K', K)
alpha = def1(defs, 'alpha', dcoeffs[1])
beta = def1(defs, 'beta', ncoeffs[1])
result = K * (s + beta) / (s + alpha)
elif ndegree == 1 and ddegree == 0:
K = def1(defs, 'K', K)
beta = def1(defs, 'beta', ncoeffs[1])
result = K * (s + beta)
elif ndegree == 0 and ddegree == 1:
K = def1(defs, 'K', K)
alpha = def1(defs, 'alpha', dcoeffs[1])
result = K / (s + alpha)
elif ddegree == 2:
K = def1(defs, 'K', K)
coeffs = self.N.coeffs()
if not zeta:
sigma1 = def1(defs, 'sigma_1', dcoeffs[1] / 2)
omega1 = def1(defs, 'omega_1',
sqrt(dcoeffs[2] - (dcoeffs[1] / 2)**2).simplify())
result = K * (self.N / coeffs[0]) / (s**2 + 2 * sigma1 * s + sigma1**2 + omega1**2)
else:
omega0 = def1(defs, 'omega_0', sqrt(dcoeffs[2]))
zeta = def1(defs, 'zeta', dcoeffs[1] / (2 * sqrt(dcoeffs[2])))
result = K * (self.N / coeffs[0]) / (s**2 + 2 * zeta * omega0 * s + omega0**2)
if result is None:
# Copy?
result = self
return self.__class__(result, **self.assumptions), defs
def canonical(self, factor_const=False):
"""Convert rational function to canonical form (aka polynomial form);
this is like general form but with a unity highest power of
denominator. For example,
(5 * s**2 + 5 * s + 5) / (s**2 + 4)
If factor_const is True, factor constants from numerator, for example,
5 * (s**2 + s + 1) / (s**2 + 4)
This is also called gain-polynomial form.
See also general, partfrac, standard, timeconst, and ZPK
"""
if self.is_Equality:
return equation(self.lhs.canonical(), self.rhs.canonical())
if not self.expr.has(self.var):
return self
if self._ratfun is None:
return self.copy()
return self.__class__(self._ratfun.canonical(factor_const),
**self.assumptions)
def general(self):
"""Convert rational function to general form. For example,
(5 * s**2 + 10 * s + 5) / (s**2 + 4)
See also canonical, partfrac, standard, timeconst, and ZPK."""
if self.is_Equality:
return equation(self.lhs.general(), self.rhs.general())
if self._ratfun is None:
return self.copy()
return self.__class__(self._ratfun.general(), **self.assumptions)
def partfrac(self, combine_conjugates=False, pairs=False, damping=None,
method=None):
"""Convert rational function into partial fraction form. For example,
5 + (5 - 15 * j / 4) / (s + 2 * j) + (5 + 15 * j / 4) / (s - 2 * j)
If `combine_conjugates` or `pairs` is True then the pair of partial
fractions for complex conjugate poles are combined. This creates
a sum of biquad sections.
`method` can be 'sub' (substitution method, the default) or
'ec' (equating cofficients method).
See also canonical, standard, general, timeconst, and ZPK."""
pairs = pairs or combine_conjugates
if self.is_Equality:
return equation(self.lhs.partfrac(pairs, damping, method),
self.rhs.partfrac(pairs, damping, method))
try:
if self._ratfun is None:
return self.copy()
return self.__class__(self._ratfun.partfrac(pairs, damping, method),
**self.assumptions)
except ValueError:
return self.as_sum().partfrac(pairs, damping, method)
def recippartfrac(self, combine_conjugates=False, pairs=False,
damping=None, method=None):
"""Convert rational function into partial fraction form
using reciprocal of variable.
For example, if H = 5 * (s**2 + 1) / (s**2 + 5*s + 4)
then H.recippartfrac() gives
5/4 - 10/(3*(1 + 1/s)) + 85/(48*(1/4 + 1/s))
If combine_conjugates is True then the pair of partial
fractions for complex conjugate poles are combined.
`method` can be 'sub' (substitution method, the default) or
'ec' (equating cofficients method).
See also canonical, standard, general, partfrac, timeconst, and ZPK."""
pairs = pairs or combine_conjugates
if self.is_Equality:
return equation(self.lhs.recippartfrac(pairs, damping, method),
self.rhs.recippartfrac(pairs, damping, method))
if self._ratfun is None:
return self.copy()
tmpsym = miscsymbol('qtmp')
expr = self.subs(1 / tmpsym)
ratfun = Ratfun(expr.expr, tmpsym)
nexpr = ratfun.partfrac(combine_conjugates, damping, method)
nexpr = nexpr.subs(tmpsym, 1 / self.var)
return self.__class__(nexpr, **self.assumptions)
def standard(self):
"""Convert rational function into mixed fraction form. For example,
(5 * s - 5) / (s**2 + 4) + 5
This is the sum of strictly proper rational function and a
polynomial.
See also canonical, general, partfrac, timeconst, and ZPK.
"""
if self.is_Equality:
return equation(self.lhs.standard(), self.rhs.standard())
if self._ratfun is None:
return self.copy()
return self.__class__(self._ratfun.standard(), **self.assumptions)
def mixedfrac(self):
"""This is an alias for standard and may be deprecated."""
return self.standard()
def timeconst(self):
"""Convert rational function into time constant form. For example,
5 * (s**2 + 2 * s + 1) / (4 * (s**2 / 4 + 1))
See also timeconst_terms, canonical, general, standard,
partfrac and ZPK."""
if self.is_Equality:
return equation(self.lhs.timeconst(), self.rhs.timeconst())
if self._ratfun is None:
return self.copy()
return self.__class__(self._ratfun.timeconst(), **self.assumptions)
def timeconst_terms(self):
"""Convert each term of expression into time constant form."""
if self.is_Equality:
return equation(self.lhs.timeconst_terms(), self.rhs.timeconst_terms())
result = 0
for term in self.expr.as_ordered_terms():
result += self.__class__(term).timeconst()
return self.__class__(result, **self.assumptions)
def ZPK(self, pairs=False, combine_conjugates=False):
"""Convert to zero-pole-gain (ZPK) form (factored form). For example,
5 * (s + 1)**2 / ((s - 2 * j) * (s + 2 * j))
If `combine_conjugates` or `pairs` is True, then conjugate pairs are
combined to create a product of biquad sections. For example,
5 * (s + 1)**2 / (s**2 + 4)
Note, both the numerator and denominator are expressed as
products of monic factors, i.e., (s + 1 / 3) rather than (3 * s + 1).
See also canonical, general, standard, partfrac, and timeconst.
"""
if self.is_Equality:
return equation(self.lhs.ZPK(), self.rhs.ZPK())
if self._ratfun is None:
return self.copy()
return self.__class__(self._ratfun.ZPK(combine_conjugates or pairs),
**self.assumptions)
def factored(self, pairs=False):
"""Convert to factored form. For example,
5 * (s + 1)**2 / ((s - 2 * j) * (s + 2 * j))
If `pairs` is True, then conjugate pairs are combined. For example,
5 * (s + 1)**2 / (s**2 + 4)
This is an alias for ZPK. See also canonical, general,
standard, partfrac, and timeconst.
"""
if self.is_Equality:
return equation(self.lhs.factored(), self.rhs.factored())
if self._ratfun is None:
return self.copy()
return self.__class__(self._ratfun.ZPK(pairs), **self.assumptions)
def expandcanonical(self):
"""Expand in terms for different powers with each term
expressed in canonical form. For example,
s / (s**2 + 4) + 5 / (s**2 + 4)
See also canonical, general, partfrac, timeconst, and ZPK."""
if self.is_Equality:
return equation(self.lhs.expandcanonoical(), self.rhs.expandcanonical())
if self._ratfun is None:
return self.copy()
return self.__class__(self._ratfun.expandcanonical(), **self.assumptions)
def expand_functions(self):
"""Expand functions in expression.
For example `rect(t)` -> `u(t + 1 / 2) - u(t - 1 / 2)`
"""
return self.__class__(expand_functions(self.sympy, self.var),
**self.assumptions)
def expand_response(self):
"""Expand expression into a sum of Lcapy responses, where each
response has the form B(var) * expp(-var * T) / A(var).
This helps with inverse Laplace transforms.
"""
N = self.N
D = self.D
Nterms = N.expand().as_ordered_terms()
result = sym.S.Zero
for Nterm in Nterms:
result += (Nterm / D)
return self.__class__(result, **self.assumptions)
def coeffs(self, var=None, norm=False):
"""Return list of coeffs assuming the expr is a polynomial in terms of
`var`. If `var` is None, the default variable is used.
The highest powers come first.
This will fail for a rational function. Instead use
expr.N.coeffs or expr.D.coeffs for numerator or denominator
respectively.
If `norm` is True, normalize coefficients so highest power is 1.
"""
if self._ratfun is None:
return expr([self])
if var is None:
var = self.var
try:
z = sym.Poly(self.expr, var)
except:
raise ValueError('Use .N or .D attribute to specify numerator or denominator of rational function')
c = z.all_coeffs()
if norm:
return expr([sym.simplify(c1 / c[0]) for c1 in c])
return expr(c)
def normcoeffs(self, var=None):
"""Return list of coeffs (normalized so the highest power is 1)
assuming the expr is a polynomial in s. The highest powers
come first. This will fail for a rational function. Instead
use expr.N.normcoeffs or expr.D.normcoeffs for numerator or
denominator respectively."""
return self.coeffs(var, norm=True)
@property
def degree(self):
"""Return the degree (order) of the rational function.
This the maximum of the numerator and denominator degrees.
Note zero has a degree of -inf."""
if self._ratfun is None:
return 1
return self._ratfun.degree
@property
def Ndegree(self):
"""Return the degree (order) of the numerator of a rational function.
This will throw an exception if the expression is not a
rational function.
Note zero has a degree of -inf.
"""
if self._ratfun is None:
return 1
return self._ratfun.Ndegree
@property
def Ddegree(self):
"""Return the degree (order) of the denominator of a rational function.
This will throw an exception if the expression is not a
rational function.
Note zero has a degree of -inf."""
if self._ratfun is None:
return 1
return self._ratfun.Ddegree
def prune_HOT(self, degree):
"""Prune higher order terms if expression is a polynomial
so that resultant approximate expression has the desired degree."""
coeffs = self.coeffs
if len(coeffs) < degree:
return self
coeffs = coeffs[::-1]
expr = sym.S.Zero
var = self.var
for m in range(degree + 1):
term = coeffs[m].expr * var ** m
expr += term
return self.__class__(expr, **self.assumptions)
def ratfloat(self):
"""This converts rational numbers in an expression to floats.
See also floatrat.
For example, t / 5 -> 0.2 * t
"""
result = self.copy()
expr = self.expr
result.expr = expr.replace(lambda expr: expr.is_Rational,
lambda expr: sym.Float(expr))
return result
def floatrat(self):
"""This converts floating point numbers to rational numbers in an
expression. See also ratfloat.
For example, 0.2 * t - > t / 5
"""
expr = self.expr
expr = expr.replace(lambda expr: expr.is_Float,
lambda expr: sym.sympify(str(expr), rational=True))
return self.__class__(expr, **self.assumptions)
def approximate_fractional_power(self, method='pade', order=2):
"""This is an experimental method to approximate
s**a, where a is fractional, with a rational function using
a Pade approximant."""
expr = approximate_fractional_power(self, method, order)
return self.__class__(expr, **self.assumptions)
def approximate_exp(self, method='pade', order=1, numer_order=None):
"""Approximate exp(a). The best time-domain response (without a jump)
is achieved with 'numer_order == order - 1'. The best
frequency-domain response is achieved with numer_order ==
order."""
expr = approximate_exp(self, method, order, numer_order)
return self.__class__(expr, **self.assumptions)
def approximate_hyperbolic_trig(self, method='pade', order=1,
numer_order=None):
"""Approximate cosh(a), sinh(a), tanh(a)."""
expr = approximate_hyperbolic_trig(self, method, order, numer_order)
return self.__class__(expr, **self.assumptions)
def approximate(self, method='pade', order=1, numer_order=None):
result = self.approximate_fractional_power(method=method,
order=order)
result = result.approximate_exp(method=method,
order=order,
numer_order=numer_order)
result = result.approximate_hyperbolic_trig(method=method,
order=order,
numer_order=numer_order)
return result
def as_value_unit(self):
"""Return tuple of value and unit. For example,
>>> v = voltage(5)
>>> v.as_value_unit
(5, volts)
"""
from .units import units
return units.as_value_unit(self.expr)
def as_N_D(self, monic_denominator=False, use_sympy=False):
"""Responses due to a sum of delayed transient responses
cannot be factored into ZPK form with a constant delay.
For example, sometimes SymPy gives:
⎛ s⋅τ ⎞ -s⋅τ
⎝V₁⋅ℯ - V₂⎠⋅ℯ
I = ────────────────────
s⋅(L⋅s + R)
This method tries to extract the numerator and denominator
where the denominator is a polynomial.
N, D = I.as_N_D()
-s⋅τ
N = V₁ - V₂⋅ℯ
D = s⋅(L⋅s + R)"""
N, D = as_N_D(self.expr, self.var, monic_denominator, use_sympy)
# Strip quantity and assumptions
cls = self._class_by_quantity('undefined')
return cls(N), cls(D)
def as_sum(self):
"""Responses due to a sum of delayed transient responses
cannot be factored into ZPK form with a constant delay.
For example, sometimes SymPy gives:
⎛ s⋅τ ⎞ -s⋅τ
⎝V₁⋅ℯ - V₂⎠⋅ℯ
I = ────────────────────
s⋅(L⋅s + R)
While this cannot be factored into ZPK form, it can be
expressed as a sum of ZPK forms or as a partial fraction
expansion. However, SymPy does not play ball if trying to
express as a sum of terms:
I.as_ordered_terms()
⎡⎛ s⋅τ ⎞ -s⋅τ⎤
⎢⎝V₁⋅ℯ - V₂⎠⋅ℯ ⎥
⎢────────────────────⎥
⎣ s⋅(L⋅s + R) ⎦
Instead, it appears necessary to split into N / D where
D is a polynomial. Then N can be split.
"""
result = as_sum(self.expr, self.var)
return self.__class__(result, **self.assumptions)
def as_monic_terms(self):
"""Rewrite terms so that each denominator is monic.
This does not expand the expression first; use `.expand()`."""
result = 0
for term in self.expr.as_ordered_terms():
N, D = as_N_D(term, self.var, monic_denominator=True)
result += N / D
return self.__class__(result, **self.assumptions)
def as_nonmonic_terms(self):
"""Rewrite terms so that each denominator is not monic.
This does not expand the expression first; use `.expand()`."""
result = 0
for term in self.expr.as_ordered_terms():
N, D = as_N_D(term, self.var, monic_denominator=False)
result += N / D
return self.__class__(result, **self.assumptions)
def continued_fraction_coeffs(self):
coeffs = []
var = self.var
def foo(Npoly, Dpoly):
# This seems rather complicated to extract the leading terms.
NLM, NLC = Npoly.LT()
DLM, DLC = Dpoly.LT()
NLT = sym.Poly(NLM.as_expr() * NLC, var)
DLT = sym.Poly(DLM.as_expr() * DLC, var)
Q = NLT / DLT
coeffs.append(Q)
Npoly2 = sym.Poly(Npoly.as_expr() - Q * Dpoly.as_expr(), var)
if Npoly2 != 0:
foo(Dpoly, Npoly2)
N, D = self.expr.as_numer_denom()
Npoly = sym.Poly(N, var)
Dpoly = sym.Poly(D, var)
if Dpoly.degree() > Npoly.degree():
coeffs.append(0)
Npoly, Dpoly = Dpoly, Npoly
foo(Npoly, Dpoly)
return expr(coeffs)
def as_continued_fraction(self):
"""Convert expression into a continued fraction."""
def foo(coeffs):
if len(coeffs) == 1:
return coeffs[0]
return coeffs[0] + 1 / foo(coeffs[1:])
coeffs = self.continued_fraction_coeffs()
result = foo(coeffs)
return self.__class__(result, **self.assumptions)
def as_ratfun_delay(self):
B, A, delay, undef = self._ratfun.as_B_A_delay_undef()
if undef != 1:
raise ValueError('Have undefined expression %s' % undef)
return self.__class__(B / A, **self.assumptions), delay
def _as_B_A_delay_undef(self):
return self._ratfun.as_B_A_delay_undef()
def continued_fraction_inverse_coeffs(self):
"""Convert expression into a continued fraction with inverse
coefficients."""
coeffs = []
var = self.var
def foo(Npoly, Dpoly):
# This seems rather complicated to extract the last non-zero terms.
NEM, NEC = Npoly.ET()
DEM, DEC = Dpoly.ET()
NET = NEM.as_expr() * NEC
DET = DEM.as_expr() * DEC
if sym.Poly(NET, var).degree() > sym.Poly(DET, var).degree():
coeffs.append(0)
foo(Dpoly, Npoly)
return
Q = NET / DET
coeffs.append(Q)
Npoly2 = sym.Poly(Npoly.as_expr() - Q * Dpoly.as_expr(), var)
if Npoly2 != 0:
foo(Dpoly, Npoly2)
N, D = self.expr.as_numer_denom()
Npoly = sym.Poly(N, var)
Dpoly = sym.Poly(D, var)
foo(Npoly, Dpoly)
return expr(coeffs)
def as_continued_fraction_inverse(self):
def foo(coeffs):
if len(coeffs) == 1:
return coeffs[0]
return coeffs[0] + 1 / foo(coeffs[1:])
coeffs = self.continued_fraction_inverse_coeffs()
result = foo(coeffs)
return self.__class__(result, **self.assumptions)
def force_time(self):
if state.force_time:
return self.time()
return self
def remove_undefs(self, return_mappings=False):
"""Replace undefined functions with symbols, for example,
replace x(t) with x, etc.
This is useful for simplifying and solving equations.
This method gives up if it finds an expression such as
x(n - 1) + 2 * x(n) since the arguments are different.
If return_mappings is True, then a dictionary of substitutions
is returned as well as the modified expression. For example,
new, defs = expr.remove_undefs(return_mappings=True)
The original expression can be obtained using:
new.subs(defs)"""
mappings = {}
e = self.expr
for item in sym.preorder_traversal(e):
if isinstance(item, AppliedUndef):
name = str(item)
parts = name.split('(')
name = parts[0]
if name in mappings and mappings[name] != item:
# Have found something like x(n) + x(n - 1),
# so give up... We could create different
# named symbols but these cannot conflict
# with other symbols in the expression.
break
mappings[name] = item
# Need to propagate complex assumption, etc.
e = e.subs(item, expr(name).expr)
ret = self.__class__(e, **self.assumptions)
if return_mappings:
return ret, mappings
else:
return ret
def remove_condition(self):
"""Remove the piecewise condition from the expression.
See also force_causal."""
if not self.is_conditional:
return self
expr = self.expr
expr = expr.args[0].args[0]
return self.__class__(expr)
def remove_images(self, m1=0, m2=0):
"""Remove all spectral images resulting from a DTFT.
For example,
>>> x = Sum(DiracDelta(f - m/Delta_t), (m, -oo, oo))
>>> x.remove_images()
DiracDelta(f)
Alternatively, the number of images can be changed,
for example,
>>> x = Sum(DiracDelta(f - m/Delta_t), (m, -1, 1))
>>> x.remove_images()
Sum(DiracDelta(f - m/Delta_t), (f, -1, 1))
"""
from .sym import dt
var = self.var
if var is fsym:
scale = dt
elif var is Fsym:
scale = 1
elif var is omegasym:
scale = dt / (2 * pi)
elif var is Omegasym:
scale = 1 / (2 * pi)
else:
raise RuntimeError('Mystery var %s' % var)
result = remove_images(self.expr, var, scale, m1, m2)
return self.__class__(result, **self.assumptions)
def as_QRD(self):
return self._ratfun.as_QRD()
def as_QRPO(self):
return self._ratfun.as_QRPO()
def exprcontainer(arg, **assumptions):
from numpy import ndarray
if isinstance(arg, (ExprList, ExprTuple, ExprDict)):
return arg
elif isinstance(arg, list):
return ExprList(arg, **assumptions)
elif isinstance(arg, (tuple, sym.Tuple)):
return ExprTuple(arg, **assumptions)
elif isinstance(arg, dict):
return ExprDict(arg)
elif isinstance(arg, ndarray):
from .vector import Vector
if arg.ndim > 1:
raise ValueError('Multidimensional arrays unsupported; convert to Matrix')
return Vector(arg, **assumptions)
raise ValueError('Unsupported exprcontainer %s' % arg.__class__.__name__)
def _make_domain(expr, **assumptions):
symbols = expr.free_symbols
if tsym in symbols:
return texpr(expr, **assumptions)
elif ssym in symbols:
return sexpr(expr, **assumptions)
elif fsym in symbols:
return fexpr(expr, **assumptions)
elif omegasym in symbols:
return omegaexpr(expr, **assumptions)
elif nsym in symbols:
return nexpr(expr, **assumptions)
elif ksym in symbols:
return kexpr(expr, **assumptions)
elif zsym in symbols:
return zexpr(expr, **assumptions)
elif Fsym in symbols:
return Fexpr(expr, **assumptions)
elif Omegasym in symbols:
return Omegaexpr(expr, **assumptions)
else:
return cexpr(expr, **assumptions)
def expr(arg, override=False, **assumptions):
"""Create Lcapy expression from arg.
If `arg` is an `Expr` it is returned, unless `assumptions` is specified.
If `arg` is a string:
If a t symbol is found in the string a TimeDomainExpression object is created.
If a s symbol is found in the string a LaplaceDomainExpression object is created.
If a f symbol is found in the string an FourierDomainExpression object is created.
If an omega symbol is found in the string an AngularFourierDomainExpression object is created.
For example, v = expr('3 * exp(-t / tau) * u(t)')
V = expr('5 * s', causal=True)
If `override` is True, then create new symbol(s) even if
previously defined by SymPy.
"""
from .sequence import Sequence
if arg is None:
return arg
if isinstance(arg, Expr):
if assumptions == {}:
return arg
return arg.__class__(arg, **assumptions)
if isinstance(arg, Sequence):
return arg
if not isinstance(arg, str) and hasattr(arg, '__iter__'):
return exprcontainer(arg, **assumptions)
# Don't set rational=True since this will set rational
# assumption for symbols.
expr = sympify(arg, override=override, **assumptions)
lexpr = _make_domain(expr, **assumptions)
if not lexpr.has(uu.Quantity):
return lexpr
from .units import units
cls = lexpr.__class__
expr, units = units.as_value_unit(lexpr.expr)
# 5 * t * u.volts -> V
# 5 * cos(t) * u.volts -> V
# 5 * s * u.volts -> V / Hz
warn('This may be deprecated since the units may not be what you expect')
if units == uu.volts:
return cls(expr, **assumptions).as_voltage()
elif units == uu.amperes:
return cls(expr, **assumptions).as_current()
elif units == uu.ohms:
return cls(expr, **assumptions).as_impedance()
elif units == uu.siemens:
return cls(expr, **assumptions).as_admittance()
elif units == uu.watts:
return cls(expr, **assumptions).as_power()
warn('Unhandled units: %s' % units)
return lexpr
def expr_class(domain, arg, **assumptions):
try:
quantity = arg.quantity
except:
quantity = 'undefined'
cls = expressionclasses.get_quantity(domain, quantity)
return cls
def expr_make(domain, arg, **assumptions):
cls = expr_class(domain, arg)
return cls(arg, **assumptions)
def equation(lhs, rhs, inputsym='x', outputsym='y', **assumptions):
"""Create an Lcapy equation.
This is an Lcapy expression of the form Eq(lhs, rhs).
For example,
e = equation('Y(s)', 'X(s) * 2 * s')
The left hand side (lhs) and right hand side subexpressions
can be obtained with the `lhs` and `rhs` attributes."""
from .diffeq import DifferenceEquation
lhs = expr(lhs)
rhs = expr(rhs)
# Check if lhs and rhs compatible.
diff = lhs - rhs
if diff.is_discrete_time_domain:
return DifferenceEquation(lhs, rhs, inputsym, outputsym, **assumptions)
cls = lhs.__class__
return cls(sym.Eq(lhs.expr, rhs.expr, evaluate=False), **assumptions)
def difference_equation(lhs, rhs, inputsym='x', outputsym='y', **assumptions):
"""Create an Lcapy difference equation.
This is an Lcapy expression of the form Eq(lhs, rhs).
For example,
e = difference_equation('y(n)', 'x(n) + 2 * y(n - 1)')
The left hand side (lhs) and right hand side subexpressions
can be obtained with the `lhs` and `rhs` attributes."""
from .diffeq import DifferenceEquation
lhs = expr(lhs)
rhs = expr(rhs)
# Check if lhs and rhs compatible.
diff = lhs - rhs
return DifferenceEquation(lhs, rhs, inputsym, outputsym, **assumptions)
def symbol(name, **assumptions):
"""Create an Lcapy symbol.
By default, symbols are assumed to be positive unless real is
defined or `positive` is defined as False.
If `name` is already a symbol, it is overridden unless it is
a domain symbol in which case `force` must be `True`.
"""
ssym = usersymbol(name, **assumptions)
# Create Lcapy symbol
return expr(ssym, **assumptions)
def symbols(names, **assumptions):
"""Create Lcapy symbols from whitespace or comma delimited string of
symbol names. See also symbol."""
from .parser import split
namelist = split(names, ", ")
symbols = []
for name in namelist:
symbols.append(symbol(name, **assumptions))
if len(symbols) == 1:
return symbols[0]
return symbols
def radians(arg, **assumptions):
"""Convert degrees to radians and set units to radians. See also rad()
that sets units as radians."""
expr1 = expr(arg / 180 * sym.pi, **assumptions)
expr1.units = uu.rad
return expr1
def degrees(arg, **assumptions):
"""Convert radians to degrees and set units to degrees. See also deg()
that sets units as degrees."""
expr1 = expr(arg * 180 / sym.pi, **assumptions)
expr1.units = uu.deg
return expr1
def rad(arg, **assumptions):
"""Set units to radians. See also radians() that converts degrees to radians."""
expr1 = expr(arg, **assumptions)
expr1.units = uu.rad
return expr1
def deg(arg, **assumptions):
"""Set units to degrees. See also degrees() that converts radians to degrees."""
expr1 = expr(arg, **assumptions)
expr1.units = uu.deg
return expr1
def delcapify(expr):
"""De-lcapify expression to create pure SymPy expression."""
if isinstance(expr, tuple):
return tuple([delcapify(arg) for arg in expr])
elif isinstance(expr, list):
return [delcapify(arg) for arg in expr]
elif isinstance(expr, dict):
ret = {}
for key, val in expr.items():
ret[delcapify(key)] = delcapify(val)
return ret
elif hasattr(expr, 'expr'):
return expr.expr
return expr
def check(expr):
args = getattr(expr, 'args', None)
if args is not None:
for arg in args:
if isinstance(arg, Expr):
print(arg)
check(arg)
from .cexpr import cexpr, ConstantDomainExpression
from .fexpr import f, fexpr, FourierDomainExpression
from .omegaexpr import omega, omegaexpr, AngularFourierDomainExpression
from .normfexpr import Fexpr
from .normomegaexpr import Omegaexpr
from .texpr import t, texpr, TimeDomainExpression
from .sexpr import s, sexpr, LaplaceDomainExpression
from .nexpr import nexpr
from .kexpr import kexpr
from .zexpr import zexpr, ZDomainExpression
from .expressionclasses import expressionclasses
# Horrible hack to work with IPython around Sympy's back for LaTeX
# formatting. The problem is that Sympy does not check for the
# _repr_latex method and instead relies on a predefined list of known
# types. See _can_print_latex method in sympy/interactive/printing.py
import sys
try:
from .printing import latex
formatter = sys.displayhook.shell.display_formatter.formatters['text/latex']
for cls in (ExprList, ExprTuple, ExprDict):
formatter.type_printers[cls] = Expr._repr_latex_
except:
pass
|
mph-/lcapy
|
lcapy/expr.py
|
Python
|
lgpl-2.1
| 117,484
|
[
"DIRAC"
] |
10f7be83ec2e016a5fa7cd038b147cf5287db018eaaf605902787041e9a3ab4e
|
from gpaw.xc.functional import XCFunctional
from gpaw.mpi import world
class NonLocalFunctional(XCFunctional):
type = 'GLLB'
def __init__(self, xcname):
self.contributions = []
self.xcs = {}
XCFunctional.__init__(self, xcname)
def initialize(self, density, hamiltonian, wfs, occupations):
self.gd = density.gd # smooth grid describtor
self.finegd = density.finegd # fine grid describtor
self.nt_sg = density.nt_sg # smooth density
self.setups = wfs.setups # All the setups
self.nspins = wfs.nspins # number of spins
self.wfs = wfs
self.occupations = occupations
self.density = density
self.hamiltonian = hamiltonian
self.nvalence = wfs.nvalence
#self.vt_sg = paw.vt_sg # smooth potential
#self.kpt_u = kpt_u # kpoints object
#self.interpolate = interpolate # interpolation function
#self.nuclei = nuclei
# Is this OK place?
self.initialize0()
def pass_stuff_1d(self, ae):
self.ae = ae
def initialize0(self):
for contribution in self.contributions:
contribution.initialize()
def initialize_1d(self):
for contribution in self.contributions:
contribution.initialize_1d()
def calculate(self, gd, n_sg, v_sg=None, e_g=None):
#if gd is not self.gd:
# self.set_grid_descriptor(gd)
if e_g is None:
e_g = gd.empty()
if v_sg is None:
v_sg = np.zeros_like(n_sg)
if self.nspins == 1:
self.calculate_spinpaired(e_g, n_sg[0], v_sg[0])
else:
self.calculate_spinpolarized(e_g, n_sg, v_sg)
return gd.integrate(e_g)
def calculate_paw_correction(self, setup, D_sp, dEdD_sp, a=None, addcoredensity=True):
return self.calculate_energy_and_derivatives(setup, D_sp, dEdD_sp, a, addcoredensity)
def calculate_spinpaired(self, e_g, n_g, v_g):
e_g[:] = 0.0
for contribution in self.contributions:
contribution.calculate_spinpaired(e_g, n_g, v_g)
def calculate_spinpolarized(self, e_g, n_sg, v_sg):
e_g[:] = 0.0
for contribution in self.contributions:
contribution.calculate_spinpolarized(e_g, n_sg, v_sg)
def calculate_energy_and_derivatives(self, setup, D_sp, H_sp, a, addcoredensity=True):
Exc = 0.0
H_sp[:] = 0.0
for contribution in self.contributions:
Exc += contribution.calculate_energy_and_derivatives(setup,
D_sp, H_sp, a, addcoredensity)
Exc -= setup.xc_correction.Exc0
return Exc
def get_xc_potential_and_energy_1d(self, v_g):
Exc = 0.0
for contribution in self.contributions:
Exc += contribution.add_xc_potential_and_energy_1d(v_g)
return Exc
def get_smooth_xc_potential_and_energy_1d(self, vt_g):
Exc = 0.0
for contribution in self.contributions:
Exc += contribution.add_smooth_xc_potential_and_energy_1d(vt_g)
return Exc
def initialize_from_atomic_orbitals(self, basis_functions):
for contribution in self.contributions:
contribution.initialize_from_atomic_orbitals(basis_functions)
def get_extra_setup_data(self, dict):
for contribution in self.contributions:
contribution.add_extra_setup_data(dict)
def add_contribution(self, contribution):
self.contributions.append(contribution)
self.xcs[contribution.get_name()] = contribution
def print_functional(self):
if world.rank is not 0:
return
print
print "Functional being used consists of"
print "---------------------------------------------------"
print "| Weight | Module | Description |"
print "---------------------------------------------------"
for contribution in self.contributions:
print "|%9.3f | %-17s| %-17s|" % (contribution.weight, contribution.get_name(), contribution.get_desc())
print "---------------------------------------------------"
print
def read(self, reader):
for contribution in self.contributions:
contribution.read(reader)
def write(self, writer, natoms):
for contribution in self.contributions:
contribution.write(writer, natoms)
|
robwarm/gpaw-symm
|
gpaw/xc/gllb/nonlocalfunctional.py
|
Python
|
gpl-3.0
| 4,509
|
[
"GPAW"
] |
a4fe6fc63f86b6a9edb0671378128b3f3c8beabd0b488362aaabf4320f7e73f4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.